#include <inttypes.h>
#include "util/u_format.h"
-#include "util/u_hash.h"
+#include "util/crc32.h"
#include "util/u_math.h"
#include "util/u_memory.h"
#include "util/ralloc.h"
#include "util/hash_table.h"
#include "tgsi/tgsi_dump.h"
-#include "tgsi/tgsi_lowering.h"
#include "tgsi/tgsi_parse.h"
#include "compiler/nir/nir.h"
#include "compiler/nir/nir_builder.h"
#include "vc4_context.h"
#include "vc4_qpu.h"
#include "vc4_qir.h"
-#ifdef USE_VC4_SIMULATOR
-#include "simpenrose/simpenrose.h"
-#endif
+#include "mesa/state_tracker/st_glsl_types.h"
static struct qreg
ntq_get_src(struct vc4_compile *c, nir_src src, int i);
+static void
+ntq_emit_cf_list(struct vc4_compile *c, struct exec_list *list);
static void
resize_qreg_array(struct vc4_compile *c,
(*regs)[i] = c->undef;
}
+static void
+ntq_emit_thrsw(struct vc4_compile *c)
+{
+ if (!c->fs_threaded)
+ return;
+
+ /* Always thread switch after each texture operation for now.
+ *
+ * We could do better by batching a bunch of texture fetches up and
+ * then doing one thread switch and collecting all their results
+ * afterward.
+ */
+ qir_emit_nondef(c, qir_inst(QOP_THRSW, c->undef,
+ c->undef, c->undef));
+ c->last_thrsw_at_top_level = (c->execute.file == QFILE_NULL);
+}
+
static struct qreg
indirect_uniform_load(struct vc4_compile *c, nir_intrinsic_instr *intr)
{
struct qreg indirect_offset = ntq_get_src(c, intr->src[0], 0);
- uint32_t offset = intr->const_index[0];
+ uint32_t offset = nir_intrinsic_base(intr);
struct vc4_compiler_ubo_range *range = NULL;
unsigned i;
for (i = 0; i < c->num_uniform_ranges; i++) {
qir_TEX_DIRECT(c, indirect_offset, qir_uniform(c, QUNIFORM_UBO_ADDR, 0));
c->num_texture_samples++;
- return qir_TEX_RESULT(c);
-}
-nir_ssa_def *vc4_nir_get_state_uniform(struct nir_builder *b,
- enum quniform_contents contents)
-{
- nir_intrinsic_instr *intr =
- nir_intrinsic_instr_create(b->shader,
- nir_intrinsic_load_uniform);
- intr->const_index[0] = (VC4_NIR_STATE_UNIFORM_OFFSET + contents) * 4;
- intr->num_components = 1;
- intr->src[0] = nir_src_for_ssa(nir_imm_int(b, 0));
- nir_ssa_dest_init(&intr->instr, &intr->dest, 1, NULL);
- nir_builder_instr_insert(b, &intr->instr);
- return &intr->dest.ssa;
+ ntq_emit_thrsw(c);
+
+ return qir_TEX_RESULT(c);
}
nir_ssa_def *
{
switch (swiz) {
default:
- case UTIL_FORMAT_SWIZZLE_NONE:
+ case PIPE_SWIZZLE_NONE:
fprintf(stderr, "warning: unknown swizzle\n");
/* FALLTHROUGH */
- case UTIL_FORMAT_SWIZZLE_0:
+ case PIPE_SWIZZLE_0:
return nir_imm_float(b, 0.0);
- case UTIL_FORMAT_SWIZZLE_1:
+ case PIPE_SWIZZLE_1:
return nir_imm_float(b, 1.0);
- case UTIL_FORMAT_SWIZZLE_X:
- case UTIL_FORMAT_SWIZZLE_Y:
- case UTIL_FORMAT_SWIZZLE_Z:
- case UTIL_FORMAT_SWIZZLE_W:
+ case PIPE_SWIZZLE_X:
+ case PIPE_SWIZZLE_Y:
+ case PIPE_SWIZZLE_Z:
+ case PIPE_SWIZZLE_W:
return srcs[swiz];
}
}
return qregs;
}
+/**
+ * This function is responsible for getting QIR results into the associated
+ * storage for a NIR instruction.
+ *
+ * If it's a NIR SSA def, then we just set the associated hash table entry to
+ * the new result.
+ *
+ * If it's a NIR reg, then we need to update the existing qreg assigned to the
+ * NIR destination with the incoming value. To do that without introducing
+ * new MOVs, we require that the incoming qreg either be a uniform, or be
+ * SSA-defined by the previous QIR instruction in the block and rewritable by
+ * this function. That lets us sneak ahead and insert the SF flag beforehand
+ * (knowing that the previous instruction doesn't depend on flags) and rewrite
+ * its destination to be the NIR reg's destination
+ */
+static void
+ntq_store_dest(struct vc4_compile *c, nir_dest *dest, int chan,
+ struct qreg result)
+{
+ struct qinst *last_inst = NULL;
+ if (!list_empty(&c->cur_block->instructions))
+ last_inst = (struct qinst *)c->cur_block->instructions.prev;
+
+ assert(result.file == QFILE_UNIF ||
+ (result.file == QFILE_TEMP &&
+ last_inst && last_inst == c->defs[result.index]));
+
+ if (dest->is_ssa) {
+ assert(chan < dest->ssa.num_components);
+
+ struct qreg *qregs;
+ struct hash_entry *entry =
+ _mesa_hash_table_search(c->def_ht, &dest->ssa);
+
+ if (entry)
+ qregs = entry->data;
+ else
+ qregs = ntq_init_ssa_def(c, &dest->ssa);
+
+ qregs[chan] = result;
+ } else {
+ nir_register *reg = dest->reg.reg;
+ assert(dest->reg.base_offset == 0);
+ assert(reg->num_array_elems == 0);
+ struct hash_entry *entry =
+ _mesa_hash_table_search(c->def_ht, reg);
+ struct qreg *qregs = entry->data;
+
+ /* Insert a MOV if the source wasn't an SSA def in the
+ * previous instruction.
+ */
+ if (result.file == QFILE_UNIF) {
+ result = qir_MOV(c, result);
+ last_inst = c->defs[result.index];
+ }
+
+ /* We know they're both temps, so just rewrite index. */
+ c->defs[last_inst->dst.index] = NULL;
+ last_inst->dst.index = qregs[chan].index;
+
+ /* If we're in control flow, then make this update of the reg
+ * conditional on the execution mask.
+ */
+ if (c->execute.file != QFILE_NULL) {
+ last_inst->dst.index = qregs[chan].index;
+
+ /* Set the flags to the current exec mask. To insert
+ * the SF, we temporarily remove our SSA instruction.
+ */
+ list_del(&last_inst->link);
+ qir_SF(c, c->execute);
+ list_addtail(&last_inst->link,
+ &c->cur_block->instructions);
+
+ last_inst->cond = QPU_COND_ZS;
+ last_inst->cond_is_exec_mask = true;
+ }
+ }
+}
+
static struct qreg *
ntq_get_dest(struct vc4_compile *c, nir_dest *dest)
{
return r;
}
-static struct qreg
-qir_srgb_decode(struct vc4_compile *c, struct qreg srgb)
-{
- struct qreg low = qir_FMUL(c, srgb, qir_uniform_f(c, 1.0 / 12.92));
- struct qreg high = qir_POW(c,
- qir_FMUL(c,
- qir_FADD(c,
- srgb,
- qir_uniform_f(c, 0.055)),
- qir_uniform_f(c, 1.0 / 1.055)),
- qir_uniform_f(c, 2.4));
-
- qir_SF(c, qir_FSUB(c, srgb, qir_uniform_f(c, 0.04045)));
- return qir_SEL(c, QPU_COND_NS, low, high);
-}
-
static struct qreg
ntq_umul(struct vc4_compile *c, struct qreg src0, struct qreg src1)
{
qir_TEX_DIRECT(c, addr, qir_uniform(c, QUNIFORM_TEXTURE_MSAA_ADDR, unit));
+ ntq_emit_thrsw(c);
+
struct qreg tex = qir_TEX_RESULT(c);
c->num_texture_samples++;
- struct qreg *dest = ntq_get_dest(c, &instr->dest);
enum pipe_format format = c->key->tex[unit].format;
if (util_format_is_depth_or_stencil(format)) {
struct qreg scaled = ntq_scale_depth_texture(c, tex);
for (int i = 0; i < 4; i++)
- dest[i] = scaled;
+ ntq_store_dest(c, &instr->dest, i, qir_MOV(c, scaled));
} else {
for (int i = 0; i < 4; i++)
- dest[i] = qir_UNPACK_8_F(c, tex, i);
- }
-
- for (int i = 0; i < 4; i++) {
- if (c->tex_srgb_decode[unit] & (1 << i))
- dest[i] = qir_srgb_decode(c, dest[i]);
+ ntq_store_dest(c, &instr->dest, i,
+ qir_UNPACK_8_F(c, tex, i));
}
}
static void
ntq_emit_tex(struct vc4_compile *c, nir_tex_instr *instr)
{
- struct qreg s, t, r, lod, proj, compare;
- bool is_txb = false, is_txl = false, has_proj = false;
+ struct qreg s, t, r, lod, compare;
+ bool is_txb = false, is_txl = false;
unsigned unit = instr->texture_index;
if (instr->op == nir_texop_txf) {
case nir_tex_src_comparitor:
compare = ntq_get_src(c, instr->src[i].src, 0);
break;
- case nir_tex_src_projector:
- proj = qir_RCP(c, ntq_get_src(c, instr->src[i].src, 0));
- s = qir_FMUL(c, s, proj);
- t = qir_FMUL(c, t, proj);
- has_proj = true;
- break;
default:
unreachable("unknown texture source");
}
}
+ if (c->stage != QSTAGE_FRAG && !is_txl) {
+ /* From the GLSL 1.20 spec:
+ *
+ * "If it is mip-mapped and running on the vertex shader,
+ * then the base texture is used."
+ */
+ is_txl = true;
+ lod = qir_uniform_ui(c, 0);
+ }
+
+ if (c->key->tex[unit].force_first_level) {
+ lod = qir_uniform(c, QUNIFORM_TEXTURE_FIRST_LEVEL, unit);
+ is_txl = true;
+ is_txb = false;
+ }
+
struct qreg texture_u[] = {
qir_uniform(c, QUNIFORM_TEXTURE_CONFIG_P0, unit),
qir_uniform(c, QUNIFORM_TEXTURE_CONFIG_P1, unit),
}
if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
- struct qreg ma = qir_FMAXABS(c, qir_FMAXABS(c, s, t), r);
- struct qreg rcp_ma = qir_RCP(c, ma);
- s = qir_FMUL(c, s, rcp_ma);
- t = qir_FMUL(c, t, rcp_ma);
- r = qir_FMUL(c, r, rcp_ma);
-
qir_TEX_R(c, r, texture_u[next_texture_u++]);
} else if (c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP_TO_BORDER ||
c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP ||
qir_TEX_S(c, s, texture_u[next_texture_u++]);
c->num_texture_samples++;
+
+ ntq_emit_thrsw(c);
+
struct qreg tex = qir_TEX_RESULT(c);
enum pipe_format format = c->key->tex[unit].format;
struct qreg u0 = qir_uniform_f(c, 0.0f);
struct qreg u1 = qir_uniform_f(c, 1.0f);
if (c->key->tex[unit].compare_mode) {
- if (has_proj)
- compare = qir_FMUL(c, compare, proj);
+ /* From the GL_ARB_shadow spec:
+ *
+ * "Let Dt (D subscript t) be the depth texture
+ * value, in the range [0, 1]. Let R be the
+ * interpolated texture coordinate clamped to the
+ * range [0, 1]."
+ */
+ compare = qir_SAT(c, compare);
switch (c->key->tex[unit].compare_func) {
case PIPE_FUNC_NEVER:
for (int i = 0; i < 4; i++)
dest[i] = qir_UNPACK_8_F(c, tex, i);
}
-
- for (int i = 0; i < 4; i++) {
- if (c->tex_srgb_decode[unit] & (1 << i))
- dest[i] = qir_srgb_decode(c, dest[i]);
- }
}
/**
struct qreg trunc = qir_ITOF(c, qir_FTOI(c, src));
struct qreg diff = qir_FSUB(c, src, trunc);
qir_SF(c, diff);
- return qir_SEL(c, QPU_COND_NS,
- qir_FADD(c, diff, qir_uniform_f(c, 1.0)), diff);
+ return qir_MOV(c, qir_SEL(c, QPU_COND_NS,
+ qir_FADD(c, diff, qir_uniform_f(c, 1.0)),
+ diff));
}
/**
*/
qir_SF(c, qir_FSUB(c, src, trunc));
- return qir_SEL(c, QPU_COND_NS,
- qir_FSUB(c, trunc, qir_uniform_f(c, 1.0)), trunc);
+ return qir_MOV(c, qir_SEL(c, QPU_COND_NS,
+ qir_FSUB(c, trunc, qir_uniform_f(c, 1.0)),
+ trunc));
}
/**
*/
qir_SF(c, qir_FSUB(c, trunc, src));
- return qir_SEL(c, QPU_COND_NS,
- qir_FADD(c, trunc, qir_uniform_f(c, 1.0)), trunc);
+ return qir_MOV(c, qir_SEL(c, QPU_COND_NS,
+ qir_FADD(c, trunc, qir_uniform_f(c, 1.0)),
+ trunc));
}
static struct qreg
qir_MOV_dest(c, t, qir_uniform_f(c, 0.0));
qir_MOV_dest(c, t, qir_uniform_f(c, 1.0))->cond = QPU_COND_ZC;
qir_MOV_dest(c, t, qir_uniform_f(c, -1.0))->cond = QPU_COND_NS;
- return t;
+ return qir_MOV(c, t);
}
static void
c->vattr_sizes[attr] = align(attr_size, 4);
for (int i = 0; i < align(attr_size, 4) / 4; i++) {
- struct qreg vpm = { QFILE_VPM, attr * 4 + i };
- c->inputs[attr * 4 + i] = qir_MOV(c, vpm);
+ c->inputs[attr * 4 + i] =
+ qir_MOV(c, qir_reg(QFILE_VPM, attr * 4 + i));
c->num_inputs++;
}
}
static void
emit_fragcoord_input(struct vc4_compile *c, int attr)
{
- c->inputs[attr * 4 + 0] = qir_FRAG_X(c);
- c->inputs[attr * 4 + 1] = qir_FRAG_Y(c);
+ c->inputs[attr * 4 + 0] = qir_ITOF(c, qir_reg(QFILE_FRAG_X, 0));
+ c->inputs[attr * 4 + 1] = qir_ITOF(c, qir_reg(QFILE_FRAG_Y, 0));
c->inputs[attr * 4 + 2] =
qir_FMUL(c,
qir_ITOF(c, qir_FRAG_Z(c)),
if (instr->src[0].swizzle[0] == instr->src[0].swizzle[1] &&
instr->src[0].swizzle[0] == instr->src[0].swizzle[2] &&
instr->src[0].swizzle[0] == instr->src[0].swizzle[3]) {
- struct qreg *dest = ntq_get_dest(c, &instr->dest.dest);
- *dest = qir_PACK_8888_F(c,
- ntq_get_src(c, instr->src[0].src,
- instr->src[0].swizzle[0]));
+ struct qreg rep = ntq_get_src(c,
+ instr->src[0].src,
+ instr->src[0].swizzle[0]);
+ ntq_store_dest(c, &instr->dest.dest, 0, qir_PACK_8888_F(c, rep));
return;
}
qir_PACK_8_F(c, result, src, i);
}
- struct qreg *dest = ntq_get_dest(c, &instr->dest.dest);
- *dest = result;
+ ntq_store_dest(c, &instr->dest.dest, 0, qir_MOV(c, result));
}
/** Handles sign-extended bitfield extracts for 16 bits. */
struct qreg src0 = ntq_get_alu_src(c, compare_instr, 0);
struct qreg src1 = ntq_get_alu_src(c, compare_instr, 1);
- if (nir_op_infos[compare_instr->op].input_types[0] == nir_type_float)
+ unsigned unsized_type =
+ nir_alu_type_get_base_type(nir_op_infos[compare_instr->op].input_types[0]);
+ if (unsized_type == nir_type_float)
qir_SF(c, qir_FSUB(c, src0, src1));
else
qir_SF(c, qir_SUB(c, src0, src1));
break;
}
+ /* Make the temporary for nir_store_dest(). */
+ *dest = qir_MOV(c, *dest);
+
return true;
}
{
if (!instr->src[0].src.is_ssa)
goto out;
+ if (instr->src[0].src.ssa->parent_instr->type != nir_instr_type_alu)
+ goto out;
nir_alu_instr *compare =
nir_instr_as_alu(instr->src[0].src.ssa->parent_instr);
if (!compare)
out:
qir_SF(c, src[0]);
- return qir_SEL(c, QPU_COND_NS, src[1], src[2]);
+ return qir_MOV(c, qir_SEL(c, QPU_COND_NS, src[1], src[2]));
+}
+
+static struct qreg
+ntq_fddx(struct vc4_compile *c, struct qreg src)
+{
+ /* Make sure that we have a bare temp to use for MUL rotation, so it
+ * can be allocated to an accumulator.
+ */
+ if (src.pack || src.file != QFILE_TEMP)
+ src = qir_MOV(c, src);
+
+ struct qreg from_left = qir_ROT_MUL(c, src, 1);
+ struct qreg from_right = qir_ROT_MUL(c, src, 15);
+
+ /* Distinguish left/right pixels of the quad. */
+ qir_SF(c, qir_AND(c, qir_reg(QFILE_QPU_ELEMENT, 0),
+ qir_uniform_ui(c, 1)));
+
+ return qir_MOV(c, qir_SEL(c, QPU_COND_ZS,
+ qir_FSUB(c, from_right, src),
+ qir_FSUB(c, src, from_left)));
+}
+
+static struct qreg
+ntq_fddy(struct vc4_compile *c, struct qreg src)
+{
+ if (src.pack || src.file != QFILE_TEMP)
+ src = qir_MOV(c, src);
+
+ struct qreg from_bottom = qir_ROT_MUL(c, src, 2);
+ struct qreg from_top = qir_ROT_MUL(c, src, 14);
+
+ /* Distinguish top/bottom pixels of the quad. */
+ qir_SF(c, qir_AND(c,
+ qir_reg(QFILE_QPU_ELEMENT, 0),
+ qir_uniform_ui(c, 2)));
+
+ return qir_MOV(c, qir_SEL(c, QPU_COND_ZS,
+ qir_FSUB(c, from_top, src),
+ qir_FSUB(c, src, from_bottom)));
}
static void
ntq_emit_alu(struct vc4_compile *c, nir_alu_instr *instr)
{
+ /* This should always be lowered to ALU operations for VC4. */
+ assert(!instr->dest.saturate);
+
/* Vectors are special in that they have non-scalarized writemasks,
* and just take the first swizzle channel for each argument in order
* into each writemask channel.
for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
srcs[i] = ntq_get_src(c, instr->src[i].src,
instr->src[i].swizzle[0]);
- struct qreg *dest = ntq_get_dest(c, &instr->dest.dest);
for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
- dest[i] = srcs[i];
+ ntq_store_dest(c, &instr->dest.dest, i,
+ qir_MOV(c, srcs[i]));
return;
}
if (instr->op == nir_op_unpack_unorm_4x8) {
struct qreg src = ntq_get_src(c, instr->src[0].src,
instr->src[0].swizzle[0]);
- struct qreg *dest = ntq_get_dest(c, &instr->dest.dest);
for (int i = 0; i < 4; i++) {
if (instr->dest.write_mask & (1 << i))
- dest[i] = qir_UNPACK_8_F(c, src, i);
+ ntq_store_dest(c, &instr->dest.dest, i,
+ qir_UNPACK_8_F(c, src, i));
}
return;
}
src[i] = ntq_get_alu_src(c, instr, i);
}
- /* Pick the channel to store the output in. */
- assert(!instr->dest.saturate);
- struct qreg *dest = ntq_get_dest(c, &instr->dest.dest);
- assert(util_is_power_of_two(instr->dest.write_mask));
- dest += ffs(instr->dest.write_mask) - 1;
+ struct qreg result;
switch (instr->op) {
case nir_op_fmov:
case nir_op_imov:
- *dest = qir_MOV(c, src[0]);
+ result = qir_MOV(c, src[0]);
break;
case nir_op_fmul:
- *dest = qir_FMUL(c, src[0], src[1]);
+ result = qir_FMUL(c, src[0], src[1]);
break;
case nir_op_fadd:
- *dest = qir_FADD(c, src[0], src[1]);
+ result = qir_FADD(c, src[0], src[1]);
break;
case nir_op_fsub:
- *dest = qir_FSUB(c, src[0], src[1]);
+ result = qir_FSUB(c, src[0], src[1]);
break;
case nir_op_fmin:
- *dest = qir_FMIN(c, src[0], src[1]);
+ result = qir_FMIN(c, src[0], src[1]);
break;
case nir_op_fmax:
- *dest = qir_FMAX(c, src[0], src[1]);
+ result = qir_FMAX(c, src[0], src[1]);
break;
case nir_op_f2i:
case nir_op_f2u:
- *dest = qir_FTOI(c, src[0]);
+ result = qir_FTOI(c, src[0]);
break;
case nir_op_i2f:
case nir_op_u2f:
- *dest = qir_ITOF(c, src[0]);
+ result = qir_ITOF(c, src[0]);
break;
case nir_op_b2f:
- *dest = qir_AND(c, src[0], qir_uniform_f(c, 1.0));
+ result = qir_AND(c, src[0], qir_uniform_f(c, 1.0));
break;
case nir_op_b2i:
- *dest = qir_AND(c, src[0], qir_uniform_ui(c, 1));
+ result = qir_AND(c, src[0], qir_uniform_ui(c, 1));
break;
case nir_op_i2b:
case nir_op_f2b:
qir_SF(c, src[0]);
- *dest = qir_SEL(c, QPU_COND_ZC,
- qir_uniform_ui(c, ~0),
- qir_uniform_ui(c, 0));
+ result = qir_MOV(c, qir_SEL(c, QPU_COND_ZC,
+ qir_uniform_ui(c, ~0),
+ qir_uniform_ui(c, 0)));
break;
case nir_op_iadd:
- *dest = qir_ADD(c, src[0], src[1]);
+ result = qir_ADD(c, src[0], src[1]);
break;
case nir_op_ushr:
- *dest = qir_SHR(c, src[0], src[1]);
+ result = qir_SHR(c, src[0], src[1]);
break;
case nir_op_isub:
- *dest = qir_SUB(c, src[0], src[1]);
+ result = qir_SUB(c, src[0], src[1]);
break;
case nir_op_ishr:
- *dest = qir_ASR(c, src[0], src[1]);
+ result = qir_ASR(c, src[0], src[1]);
break;
case nir_op_ishl:
- *dest = qir_SHL(c, src[0], src[1]);
+ result = qir_SHL(c, src[0], src[1]);
break;
case nir_op_imin:
- *dest = qir_MIN(c, src[0], src[1]);
+ result = qir_MIN(c, src[0], src[1]);
break;
case nir_op_imax:
- *dest = qir_MAX(c, src[0], src[1]);
+ result = qir_MAX(c, src[0], src[1]);
break;
case nir_op_iand:
- *dest = qir_AND(c, src[0], src[1]);
+ result = qir_AND(c, src[0], src[1]);
break;
case nir_op_ior:
- *dest = qir_OR(c, src[0], src[1]);
+ result = qir_OR(c, src[0], src[1]);
break;
case nir_op_ixor:
- *dest = qir_XOR(c, src[0], src[1]);
+ result = qir_XOR(c, src[0], src[1]);
break;
case nir_op_inot:
- *dest = qir_NOT(c, src[0]);
+ result = qir_NOT(c, src[0]);
break;
case nir_op_imul:
- *dest = ntq_umul(c, src[0], src[1]);
+ result = ntq_umul(c, src[0], src[1]);
break;
case nir_op_seq:
case nir_op_ige:
case nir_op_uge:
case nir_op_ilt:
- if (!ntq_emit_comparison(c, dest, instr, instr)) {
+ if (!ntq_emit_comparison(c, &result, instr, instr)) {
fprintf(stderr, "Bad comparison instruction\n");
}
break;
case nir_op_bcsel:
- *dest = ntq_emit_bcsel(c, instr, src);
+ result = ntq_emit_bcsel(c, instr, src);
break;
case nir_op_fcsel:
qir_SF(c, src[0]);
- *dest = qir_SEL(c, QPU_COND_ZC, src[1], src[2]);
+ result = qir_MOV(c, qir_SEL(c, QPU_COND_ZC, src[1], src[2]));
break;
case nir_op_frcp:
- *dest = ntq_rcp(c, src[0]);
+ result = ntq_rcp(c, src[0]);
break;
case nir_op_frsq:
- *dest = ntq_rsq(c, src[0]);
+ result = ntq_rsq(c, src[0]);
break;
case nir_op_fexp2:
- *dest = qir_EXP2(c, src[0]);
+ result = qir_EXP2(c, src[0]);
break;
case nir_op_flog2:
- *dest = qir_LOG2(c, src[0]);
+ result = qir_LOG2(c, src[0]);
break;
case nir_op_ftrunc:
- *dest = qir_ITOF(c, qir_FTOI(c, src[0]));
+ result = qir_ITOF(c, qir_FTOI(c, src[0]));
break;
case nir_op_fceil:
- *dest = ntq_fceil(c, src[0]);
+ result = ntq_fceil(c, src[0]);
break;
case nir_op_ffract:
- *dest = ntq_ffract(c, src[0]);
+ result = ntq_ffract(c, src[0]);
break;
case nir_op_ffloor:
- *dest = ntq_ffloor(c, src[0]);
+ result = ntq_ffloor(c, src[0]);
break;
case nir_op_fsin:
- *dest = ntq_fsin(c, src[0]);
+ result = ntq_fsin(c, src[0]);
break;
case nir_op_fcos:
- *dest = ntq_fcos(c, src[0]);
+ result = ntq_fcos(c, src[0]);
break;
case nir_op_fsign:
- *dest = ntq_fsign(c, src[0]);
+ result = ntq_fsign(c, src[0]);
break;
case nir_op_fabs:
- *dest = qir_FMAXABS(c, src[0], src[0]);
+ result = qir_FMAXABS(c, src[0], src[0]);
break;
case nir_op_iabs:
- *dest = qir_MAX(c, src[0],
+ result = qir_MAX(c, src[0],
qir_SUB(c, qir_uniform_ui(c, 0), src[0]));
break;
case nir_op_ibitfield_extract:
- *dest = ntq_emit_ibfe(c, src[0], src[1], src[2]);
+ result = ntq_emit_ibfe(c, src[0], src[1], src[2]);
break;
case nir_op_ubitfield_extract:
- *dest = ntq_emit_ubfe(c, src[0], src[1], src[2]);
+ result = ntq_emit_ubfe(c, src[0], src[1], src[2]);
break;
case nir_op_usadd_4x8:
- *dest = qir_V8ADDS(c, src[0], src[1]);
+ result = qir_V8ADDS(c, src[0], src[1]);
break;
case nir_op_ussub_4x8:
- *dest = qir_V8SUBS(c, src[0], src[1]);
+ result = qir_V8SUBS(c, src[0], src[1]);
break;
case nir_op_umin_4x8:
- *dest = qir_V8MIN(c, src[0], src[1]);
+ result = qir_V8MIN(c, src[0], src[1]);
break;
case nir_op_umax_4x8:
- *dest = qir_V8MAX(c, src[0], src[1]);
+ result = qir_V8MAX(c, src[0], src[1]);
break;
case nir_op_umul_unorm_4x8:
- *dest = qir_V8MULD(c, src[0], src[1]);
+ result = qir_V8MULD(c, src[0], src[1]);
+ break;
+
+ case nir_op_fddx:
+ case nir_op_fddx_coarse:
+ case nir_op_fddx_fine:
+ result = ntq_fddx(c, src[0]);
+ break;
+
+ case nir_op_fddy:
+ case nir_op_fddy_coarse:
+ case nir_op_fddy_fine:
+ result = ntq_fddy(c, src[0]);
break;
default:
fprintf(stderr, "\n");
abort();
}
+
+ /* We have a scalar result, so the instruction should only have a
+ * single channel written to.
+ */
+ assert(util_is_power_of_two(instr->dest.write_mask));
+ ntq_store_dest(c, &instr->dest.dest,
+ ffs(instr->dest.write_mask) - 1, result);
}
static void
}
uint32_t discard_cond = QPU_COND_ALWAYS;
- if (c->discard.file != QFILE_NULL) {
+ if (c->s->info->fs.uses_discard) {
qir_SF(c, c->discard);
discard_cond = QPU_COND_ZS;
}
if (c->fs_key->stencil_enabled) {
- qir_TLB_STENCIL_SETUP(c, qir_uniform(c, QUNIFORM_STENCIL, 0));
+ qir_MOV_dest(c, qir_reg(QFILE_TLB_STENCIL_SETUP, 0),
+ qir_uniform(c, QUNIFORM_STENCIL, 0));
if (c->fs_key->stencil_twoside) {
- qir_TLB_STENCIL_SETUP(c, qir_uniform(c, QUNIFORM_STENCIL, 1));
+ qir_MOV_dest(c, qir_reg(QFILE_TLB_STENCIL_SETUP, 0),
+ qir_uniform(c, QUNIFORM_STENCIL, 1));
}
if (c->fs_key->stencil_full_writemasks) {
- qir_TLB_STENCIL_SETUP(c, qir_uniform(c, QUNIFORM_STENCIL, 2));
+ qir_MOV_dest(c, qir_reg(QFILE_TLB_STENCIL_SETUP, 0),
+ qir_uniform(c, QUNIFORM_STENCIL, 2));
}
}
}
if (c->fs_key->depth_enabled) {
- struct qreg z;
if (c->output_position_index != -1) {
- z = qir_FTOI(c, qir_FMUL(c, c->outputs[c->output_position_index + 2],
- qir_uniform_f(c, 0xffffff)));
+ qir_FTOI_dest(c, qir_reg(QFILE_TLB_Z_WRITE, 0),
+ qir_FMUL(c,
+ c->outputs[c->output_position_index],
+ qir_uniform_f(c, 0xffffff)))->cond = discard_cond;
} else {
- z = qir_FRAG_Z(c);
+ qir_MOV_dest(c, qir_reg(QFILE_TLB_Z_WRITE, 0),
+ qir_FRAG_Z(c))->cond = discard_cond;
}
- struct qinst *inst = qir_TLB_Z_WRITE(c, z);
- inst->cond = discard_cond;
}
if (!c->msaa_per_sample_output) {
- struct qinst *inst = qir_TLB_COLOR_WRITE(c, color);
- inst->cond = discard_cond;
+ qir_MOV_dest(c, qir_reg(QFILE_TLB_COLOR_WRITE, 0),
+ color)->cond = discard_cond;
} else {
for (int i = 0; i < VC4_MAX_SAMPLES; i++) {
- struct qinst *inst = qir_TLB_COLOR_WRITE_MS(c, c->sample_colors[i]);
- inst->cond = discard_cond;
+ qir_MOV_dest(c, qir_reg(QFILE_TLB_COLOR_WRITE_MS, 0),
+ c->sample_colors[i])->cond = discard_cond;
}
}
}
return;
c->vattr_sizes[0] = 4;
- struct qreg vpm = { QFILE_VPM, 0 };
- (void)qir_MOV(c, vpm);
+ (void)qir_MOV(c, qir_reg(QFILE_VPM, 0));
c->num_inputs++;
}
struct vc4_varying_slot *fs_inputs,
uint32_t num_fs_inputs)
{
- struct qreg rcp_w = qir_RCP(c, c->outputs[c->output_position_index + 3]);
+ struct qreg rcp_w = ntq_rcp(c, c->outputs[c->output_position_index + 3]);
emit_stub_vpm_read(c);
do {
progress = false;
- nir_lower_vars_to_ssa(s);
- nir_lower_alu_to_scalar(s);
-
- progress = nir_copy_prop(s) || progress;
- progress = nir_opt_dce(s) || progress;
- progress = nir_opt_cse(s) || progress;
- progress = nir_opt_peephole_select(s) || progress;
- progress = nir_opt_algebraic(s) || progress;
- progress = nir_opt_constant_folding(s) || progress;
- progress = nir_opt_undef(s) || progress;
+ NIR_PASS_V(s, nir_lower_vars_to_ssa);
+ NIR_PASS(progress, s, nir_lower_alu_to_scalar);
+ NIR_PASS(progress, s, nir_lower_phis_to_scalar);
+ NIR_PASS(progress, s, nir_copy_prop);
+ NIR_PASS(progress, s, nir_opt_remove_phis);
+ NIR_PASS(progress, s, nir_opt_dce);
+ NIR_PASS(progress, s, nir_opt_dead_cf);
+ NIR_PASS(progress, s, nir_opt_cse);
+ NIR_PASS(progress, s, nir_opt_peephole_select, 8);
+ NIR_PASS(progress, s, nir_opt_algebraic);
+ NIR_PASS(progress, s, nir_opt_constant_folding);
+ NIR_PASS(progress, s, nir_opt_undef);
} while (progress);
}
if (c->stage == QSTAGE_FRAG) {
if (var->data.location == VARYING_SLOT_POS) {
emit_fragcoord_input(c, loc);
- } else if (var->data.location == VARYING_SLOT_FACE) {
- c->inputs[loc * 4 + 0] = qir_FRAG_REV_FLAG(c);
- } else if (var->data.location >= VARYING_SLOT_VAR0 &&
- (c->fs_key->point_sprite_mask &
- (1 << (var->data.location -
- VARYING_SLOT_VAR0)))) {
+ } else if (var->data.location == VARYING_SLOT_PNTC ||
+ (var->data.location >= VARYING_SLOT_VAR0 &&
+ (c->fs_key->point_sprite_mask &
+ (1 << (var->data.location -
+ VARYING_SLOT_VAR0))))) {
c->inputs[loc * 4 + 0] = c->point_x;
c->inputs[loc * 4 + 1] = c->point_y;
} else {
ntq_setup_uniforms(struct vc4_compile *c)
{
nir_foreach_variable(var, &c->s->uniforms) {
- unsigned array_len = MAX2(glsl_get_length(var->type), 1);
- unsigned array_elem_size = 4 * sizeof(float);
+ uint32_t vec4_count = st_glsl_type_size(var->type);
+ unsigned vec4_size = 4 * sizeof(float);
- declare_uniform_range(c, var->data.driver_location * array_elem_size,
- array_len * array_elem_size);
+ declare_uniform_range(c, var->data.driver_location * vec4_size,
+ vec4_count * vec4_size);
}
}
_mesa_hash_table_insert(c->def_ht, nir_reg, qregs);
for (int i = 0; i < array_len * nir_reg->num_components; i++)
- qregs[i] = qir_uniform_ui(c, 0);
+ qregs[i] = qir_get_temp(c);
}
}
{
struct qreg *qregs = ntq_init_ssa_def(c, &instr->def);
for (int i = 0; i < instr->def.num_components; i++)
- qregs[i] = qir_uniform_ui(c, instr->value.u[i]);
+ qregs[i] = qir_uniform_ui(c, instr->value.u32[i]);
_mesa_hash_table_insert(c->def_ht, &instr->def, qregs);
}
static void
ntq_emit_intrinsic(struct vc4_compile *c, nir_intrinsic_instr *instr)
{
- const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic];
nir_const_value *const_offset;
unsigned offset;
- struct qreg *dest = NULL;
-
- if (info->has_dest) {
- dest = ntq_get_dest(c, &instr->dest);
- }
switch (instr->intrinsic) {
case nir_intrinsic_load_uniform:
assert(instr->num_components == 1);
const_offset = nir_src_as_const_value(instr->src[0]);
if (const_offset) {
- offset = instr->const_index[0] + const_offset->u[0];
+ offset = nir_intrinsic_base(instr) + const_offset->u32[0];
assert(offset % 4 == 0);
/* We need dwords */
offset = offset / 4;
- if (offset < VC4_NIR_STATE_UNIFORM_OFFSET) {
- *dest = qir_uniform(c, QUNIFORM_UNIFORM,
- offset);
- } else {
- *dest = qir_uniform(c, offset -
- VC4_NIR_STATE_UNIFORM_OFFSET,
- 0);
- }
+ ntq_store_dest(c, &instr->dest, 0,
+ qir_uniform(c, QUNIFORM_UNIFORM,
+ offset));
} else {
- *dest = indirect_uniform_load(c, instr);
+ ntq_store_dest(c, &instr->dest, 0,
+ indirect_uniform_load(c, instr));
}
break;
case nir_intrinsic_load_user_clip_plane:
- *dest = qir_uniform(c, QUNIFORM_USER_CLIP_PLANE,
- instr->const_index[0]);
+ for (int i = 0; i < instr->num_components; i++) {
+ ntq_store_dest(c, &instr->dest, i,
+ qir_uniform(c, QUNIFORM_USER_CLIP_PLANE,
+ nir_intrinsic_ucp_id(instr) *
+ 4 + i));
+ }
+ break;
+
+ case nir_intrinsic_load_blend_const_color_r_float:
+ case nir_intrinsic_load_blend_const_color_g_float:
+ case nir_intrinsic_load_blend_const_color_b_float:
+ case nir_intrinsic_load_blend_const_color_a_float:
+ ntq_store_dest(c, &instr->dest, 0,
+ qir_uniform(c, QUNIFORM_BLEND_CONST_COLOR_X +
+ (instr->intrinsic -
+ nir_intrinsic_load_blend_const_color_r_float),
+ 0));
+ break;
+
+ case nir_intrinsic_load_blend_const_color_rgba8888_unorm:
+ ntq_store_dest(c, &instr->dest, 0,
+ qir_uniform(c, QUNIFORM_BLEND_CONST_COLOR_RGBA,
+ 0));
+ break;
+
+ case nir_intrinsic_load_blend_const_color_aaaa8888_unorm:
+ ntq_store_dest(c, &instr->dest, 0,
+ qir_uniform(c, QUNIFORM_BLEND_CONST_COLOR_AAAA,
+ 0));
+ break;
+
+ case nir_intrinsic_load_alpha_ref_float:
+ ntq_store_dest(c, &instr->dest, 0,
+ qir_uniform(c, QUNIFORM_ALPHA_REF, 0));
break;
case nir_intrinsic_load_sample_mask_in:
- *dest = qir_uniform(c, QUNIFORM_SAMPLE_MASK, 0);
+ ntq_store_dest(c, &instr->dest, 0,
+ qir_uniform(c, QUNIFORM_SAMPLE_MASK, 0));
+ break;
+
+ case nir_intrinsic_load_front_face:
+ /* The register contains 0 (front) or 1 (back), and we need to
+ * turn it into a NIR bool where true means front.
+ */
+ ntq_store_dest(c, &instr->dest, 0,
+ qir_ADD(c,
+ qir_uniform_ui(c, -1),
+ qir_reg(QFILE_FRAG_REV_FLAG, 0)));
break;
case nir_intrinsic_load_input:
assert(instr->num_components == 1);
const_offset = nir_src_as_const_value(instr->src[0]);
assert(const_offset && "vc4 doesn't support indirect inputs");
- if (instr->const_index[0] >= VC4_NIR_TLB_COLOR_READ_INPUT) {
- assert(const_offset->u[0] == 0);
+ if (c->stage == QSTAGE_FRAG &&
+ nir_intrinsic_base(instr) >= VC4_NIR_TLB_COLOR_READ_INPUT) {
+ assert(const_offset->u32[0] == 0);
/* Reads of the per-sample color need to be done in
* order.
*/
- int sample_index = (instr->const_index[0] -
+ int sample_index = (nir_intrinsic_base(instr) -
VC4_NIR_TLB_COLOR_READ_INPUT);
for (int i = 0; i <= sample_index; i++) {
if (c->color_reads[i].file == QFILE_NULL) {
qir_TLB_COLOR_READ(c);
}
}
- *dest = c->color_reads[sample_index];
+ ntq_store_dest(c, &instr->dest, 0,
+ qir_MOV(c, c->color_reads[sample_index]));
} else {
- offset = instr->const_index[0] + const_offset->u[0];
- *dest = c->inputs[offset];
+ offset = nir_intrinsic_base(instr) + const_offset->u32[0];
+ int comp = nir_intrinsic_component(instr);
+ ntq_store_dest(c, &instr->dest, 0,
+ qir_MOV(c, c->inputs[offset * 4 + comp]));
}
break;
case nir_intrinsic_store_output:
const_offset = nir_src_as_const_value(instr->src[1]);
assert(const_offset && "vc4 doesn't support indirect outputs");
- offset = instr->const_index[0] + const_offset->u[0];
+ offset = nir_intrinsic_base(instr) + const_offset->u32[0];
/* MSAA color outputs are the only case where we have an
* output that's not lowered to being a store of a single 32
i));
}
} else {
+ offset = offset * 4 + nir_intrinsic_component(instr);
assert(instr->num_components == 1);
c->outputs[offset] =
qir_MOV(c, ntq_get_src(c, instr->src[0], 0));
break;
case nir_intrinsic_discard:
- c->discard = qir_uniform_ui(c, ~0);
+ if (c->execute.file != QFILE_NULL) {
+ qir_SF(c, c->execute);
+ qir_MOV_cond(c, QPU_COND_ZS, c->discard,
+ qir_uniform_ui(c, ~0));
+ } else {
+ qir_MOV_dest(c, c->discard, qir_uniform_ui(c, ~0));
+ }
break;
- case nir_intrinsic_discard_if:
- if (c->discard.file == QFILE_NULL)
- c->discard = qir_uniform_ui(c, 0);
- c->discard = qir_OR(c, c->discard,
+ case nir_intrinsic_discard_if: {
+ /* true (~0) if we're discarding */
+ struct qreg cond = ntq_get_src(c, instr->src[0], 0);
+
+ if (c->execute.file != QFILE_NULL) {
+ /* execute == 0 means the channel is active. Invert
+ * the condition so that we can use zero as "executing
+ * and discarding."
+ */
+ qir_SF(c, qir_AND(c, c->execute, qir_NOT(c, cond)));
+ qir_MOV_cond(c, QPU_COND_ZS, c->discard, cond);
+ } else {
+ qir_OR_dest(c, c->discard, c->discard,
ntq_get_src(c, instr->src[0], 0));
+ }
+
break;
+ }
default:
fprintf(stderr, "Unknown intrinsic: ");
}
}
+/* Clears (activates) the execute flags for any channels whose jump target
+ * matches this block.
+ */
+static void
+ntq_activate_execute_for_block(struct vc4_compile *c)
+{
+ qir_SF(c, qir_SUB(c,
+ c->execute,
+ qir_uniform_ui(c, c->cur_block->index)));
+ qir_MOV_cond(c, QPU_COND_ZS, c->execute, qir_uniform_ui(c, 0));
+}
+
static void
ntq_emit_if(struct vc4_compile *c, nir_if *if_stmt)
{
- fprintf(stderr, "general IF statements not handled.\n");
+ if (!c->vc4->screen->has_control_flow) {
+ fprintf(stderr,
+ "IF statement support requires updated kernel.\n");
+ return;
+ }
+
+ nir_block *nir_else_block = nir_if_first_else_block(if_stmt);
+ bool empty_else_block =
+ (nir_else_block == nir_if_last_else_block(if_stmt) &&
+ exec_list_is_empty(&nir_else_block->instr_list));
+
+ struct qblock *then_block = qir_new_block(c);
+ struct qblock *after_block = qir_new_block(c);
+ struct qblock *else_block;
+ if (empty_else_block)
+ else_block = after_block;
+ else
+ else_block = qir_new_block(c);
+
+ bool was_top_level = false;
+ if (c->execute.file == QFILE_NULL) {
+ c->execute = qir_MOV(c, qir_uniform_ui(c, 0));
+ was_top_level = true;
+ }
+
+ /* Set ZS for executing (execute == 0) and jumping (if->condition ==
+ * 0) channels, and then update execute flags for those to point to
+ * the ELSE block.
+ */
+ qir_SF(c, qir_OR(c,
+ c->execute,
+ ntq_get_src(c, if_stmt->condition, 0)));
+ qir_MOV_cond(c, QPU_COND_ZS, c->execute,
+ qir_uniform_ui(c, else_block->index));
+
+ /* Jump to ELSE if nothing is active for THEN, otherwise fall
+ * through.
+ */
+ qir_SF(c, c->execute);
+ qir_BRANCH(c, QPU_COND_BRANCH_ALL_ZC);
+ qir_link_blocks(c->cur_block, else_block);
+ qir_link_blocks(c->cur_block, then_block);
+
+ /* Process the THEN block. */
+ qir_set_emit_block(c, then_block);
+ ntq_emit_cf_list(c, &if_stmt->then_list);
+
+ if (!empty_else_block) {
+ /* Handle the end of the THEN block. First, all currently
+ * active channels update their execute flags to point to
+ * ENDIF
+ */
+ qir_SF(c, c->execute);
+ qir_MOV_cond(c, QPU_COND_ZS, c->execute,
+ qir_uniform_ui(c, after_block->index));
+
+ /* If everything points at ENDIF, then jump there immediately. */
+ qir_SF(c, qir_SUB(c, c->execute, qir_uniform_ui(c, after_block->index)));
+ qir_BRANCH(c, QPU_COND_BRANCH_ALL_ZS);
+ qir_link_blocks(c->cur_block, after_block);
+ qir_link_blocks(c->cur_block, else_block);
+
+ qir_set_emit_block(c, else_block);
+ ntq_activate_execute_for_block(c);
+ ntq_emit_cf_list(c, &if_stmt->else_list);
+ }
+
+ qir_link_blocks(c->cur_block, after_block);
+
+ qir_set_emit_block(c, after_block);
+ if (was_top_level)
+ c->execute = c->undef;
+ else
+ ntq_activate_execute_for_block(c);
+
+}
+
+static void
+ntq_emit_jump(struct vc4_compile *c, nir_jump_instr *jump)
+{
+ switch (jump->type) {
+ case nir_jump_break:
+ qir_SF(c, c->execute);
+ qir_MOV_cond(c, QPU_COND_ZS, c->execute,
+ qir_uniform_ui(c, c->loop_break_block->index));
+ break;
+
+ case nir_jump_continue:
+ qir_SF(c, c->execute);
+ qir_MOV_cond(c, QPU_COND_ZS, c->execute,
+ qir_uniform_ui(c, c->loop_cont_block->index));
+ break;
+
+ case nir_jump_return:
+ unreachable("All returns shouold be lowered\n");
+ }
}
static void
ntq_emit_tex(c, nir_instr_as_tex(instr));
break;
+ case nir_instr_type_jump:
+ ntq_emit_jump(c, nir_instr_as_jump(instr));
+ break;
+
default:
fprintf(stderr, "Unknown NIR instr type: ");
nir_print_instr(instr, stderr);
static void
ntq_emit_block(struct vc4_compile *c, nir_block *block)
{
- nir_foreach_instr(block, instr) {
+ nir_foreach_instr(instr, block) {
ntq_emit_instr(c, instr);
}
}
+static void ntq_emit_cf_list(struct vc4_compile *c, struct exec_list *list);
+
+static void
+ntq_emit_loop(struct vc4_compile *c, nir_loop *loop)
+{
+ if (!c->vc4->screen->has_control_flow) {
+ fprintf(stderr,
+ "loop support requires updated kernel.\n");
+ ntq_emit_cf_list(c, &loop->body);
+ return;
+ }
+
+ bool was_top_level = false;
+ if (c->execute.file == QFILE_NULL) {
+ c->execute = qir_MOV(c, qir_uniform_ui(c, 0));
+ was_top_level = true;
+ }
+
+ struct qblock *save_loop_cont_block = c->loop_cont_block;
+ struct qblock *save_loop_break_block = c->loop_break_block;
+
+ c->loop_cont_block = qir_new_block(c);
+ c->loop_break_block = qir_new_block(c);
+
+ qir_link_blocks(c->cur_block, c->loop_cont_block);
+ qir_set_emit_block(c, c->loop_cont_block);
+ ntq_activate_execute_for_block(c);
+
+ ntq_emit_cf_list(c, &loop->body);
+
+ /* If anything had explicitly continued, or is here at the end of the
+ * loop, then we need to loop again. SF updates are masked by the
+ * instruction's condition, so we can do the OR of the two conditions
+ * within SF.
+ */
+ qir_SF(c, c->execute);
+ struct qinst *cont_check =
+ qir_SUB_dest(c,
+ c->undef,
+ c->execute,
+ qir_uniform_ui(c, c->loop_cont_block->index));
+ cont_check->cond = QPU_COND_ZC;
+ cont_check->sf = true;
+
+ qir_BRANCH(c, QPU_COND_BRANCH_ANY_ZS);
+ qir_link_blocks(c->cur_block, c->loop_cont_block);
+ qir_link_blocks(c->cur_block, c->loop_break_block);
+
+ qir_set_emit_block(c, c->loop_break_block);
+ if (was_top_level)
+ c->execute = c->undef;
+ else
+ ntq_activate_execute_for_block(c);
+
+ c->loop_break_block = save_loop_break_block;
+ c->loop_cont_block = save_loop_cont_block;
+}
+
+static void
+ntq_emit_function(struct vc4_compile *c, nir_function_impl *func)
+{
+ fprintf(stderr, "FUNCTIONS not handled.\n");
+ abort();
+}
+
static void
ntq_emit_cf_list(struct vc4_compile *c, struct exec_list *list)
{
foreach_list_typed(nir_cf_node, node, node, list) {
switch (node->type) {
- /* case nir_cf_node_loop: */
case nir_cf_node_block:
ntq_emit_block(c, nir_cf_node_as_block(node));
break;
ntq_emit_if(c, nir_cf_node_as_if(node));
break;
+ case nir_cf_node_loop:
+ ntq_emit_loop(c, nir_cf_node_as_loop(node));
+ break;
+
+ case nir_cf_node_function:
+ ntq_emit_function(c, nir_cf_node_as_function(node));
+ break;
+
default:
- assert(0);
+ fprintf(stderr, "Unknown NIR node type\n");
+ abort();
}
}
}
static void
nir_to_qir(struct vc4_compile *c)
{
+ if (c->stage == QSTAGE_FRAG && c->s->info->fs.uses_discard)
+ c->discard = qir_MOV(c, qir_uniform_ui(c, 0));
+
ntq_setup_inputs(c);
ntq_setup_outputs(c);
ntq_setup_uniforms(c);
ntq_setup_registers(c, &c->s->registers);
/* Find the main function and emit the body. */
- nir_foreach_function(c->s, function) {
+ nir_foreach_function(function, c->s) {
assert(strcmp(function->name, "main") == 0);
assert(function->impl);
ntq_emit_impl(c, function->impl);
.lower_extract_byte = true,
.lower_extract_word = true,
.lower_ffma = true,
- .lower_flrp = true,
+ .lower_flrp32 = true,
.lower_fpow = true,
.lower_fsat = true,
.lower_fsqrt = true,
.lower_negate = true,
+ .native_integers = true,
};
-static bool
-count_nir_instrs_in_block(nir_block *block, void *state)
+const void *
+vc4_screen_get_compiler_options(struct pipe_screen *pscreen,
+ enum pipe_shader_ir ir, unsigned shader)
{
- int *count = (int *) state;
- nir_foreach_instr(block, instr) {
- *count = *count + 1;
- }
- return true;
+ return &nir_options;
}
static int
count_nir_instrs(nir_shader *nir)
{
int count = 0;
- nir_foreach_function(nir, function) {
+ nir_foreach_function(function, nir) {
if (!function->impl)
continue;
- nir_foreach_block(function->impl, count_nir_instrs_in_block, &count);
+ nir_foreach_block(block, function->impl) {
+ nir_foreach_instr(instr, block)
+ count++;
+ }
}
return count;
}
static struct vc4_compile *
vc4_shader_ntq(struct vc4_context *vc4, enum qstage stage,
- struct vc4_key *key)
+ struct vc4_key *key, bool fs_threaded)
{
struct vc4_compile *c = qir_compile_init();
+ c->vc4 = vc4;
c->stage = stage;
c->shader_state = &key->shader_state->base;
c->program_id = key->shader_state->program_id;
- c->variant_id = key->shader_state->compiled_variant_count++;
+ c->variant_id =
+ p_atomic_inc_return(&key->shader_state->compiled_variant_count);
+ c->fs_threaded = fs_threaded;
c->key = key;
switch (stage) {
break;
}
- const struct tgsi_token *tokens = key->shader_state->base.tokens;
-
- if (vc4_debug & VC4_DEBUG_TGSI) {
- fprintf(stderr, "%s prog %d/%d TGSI:\n",
- qir_get_stage_name(c->stage),
- c->program_id, c->variant_id);
- tgsi_dump(tokens, 0);
- }
-
- c->s = tgsi_to_nir(tokens, &nir_options);
- nir_opt_global_to_local(c->s);
- nir_convert_to_ssa(c->s);
+ c->s = nir_shader_clone(c, key->shader_state->base.ir.nir);
if (stage == QSTAGE_FRAG)
- vc4_nir_lower_blend(c);
+ NIR_PASS_V(c->s, vc4_nir_lower_blend, c);
struct nir_lower_tex_options tex_options = {
/* We would need to implement txs, but we don't want the
*/
.lower_rect = false,
- /* We want to use this, but we don't want to newton-raphson
- * its rcp.
- */
- .lower_txp = false,
+ .lower_txp = ~0,
/* Apply swizzles to all samplers. */
.swizzle_result = ~0,
} else {
tex_options.swizzles[i][j] = arb_swiz;
}
-
- /* If ARB_texture_swizzle is reading from the R, G, or
- * B channels of an sRGB texture, then we need to
- * apply sRGB decode to this channel at sample time.
- */
- if (arb_swiz < 3 && util_format_is_srgb(format)) {
- c->tex_srgb_decode[i] |= (1 << j);
- }
-
}
+
+ if (util_format_is_srgb(format))
+ tex_options.lower_srgb |= (1 << i);
}
- nir_lower_tex(c->s, &tex_options);
+ NIR_PASS_V(c->s, nir_lower_tex, &tex_options);
if (c->fs_key && c->fs_key->light_twoside)
- nir_lower_two_sided_color(c->s);
+ NIR_PASS_V(c->s, nir_lower_two_sided_color);
- if (stage == QSTAGE_FRAG)
- nir_lower_clip_fs(c->s, c->key->ucp_enables);
+ if (c->vs_key && c->vs_key->clamp_color)
+ NIR_PASS_V(c->s, nir_lower_clamp_color_outputs);
+
+ if (c->key->ucp_enables) {
+ if (stage == QSTAGE_FRAG) {
+ NIR_PASS_V(c->s, nir_lower_clip_fs, c->key->ucp_enables);
+ } else {
+ NIR_PASS_V(c->s, nir_lower_clip_vs, c->key->ucp_enables);
+ NIR_PASS_V(c->s, nir_lower_io_to_scalar,
+ nir_var_shader_out);
+ }
+ }
+
+ /* FS input scalarizing must happen after nir_lower_two_sided_color,
+ * which only handles a vec4 at a time. Similarly, VS output
+ * scalarizing must happen after nir_lower_clip_vs.
+ */
+ if (c->stage == QSTAGE_FRAG)
+ NIR_PASS_V(c->s, nir_lower_io_to_scalar, nir_var_shader_in);
else
- nir_lower_clip_vs(c->s, c->key->ucp_enables);
+ NIR_PASS_V(c->s, nir_lower_io_to_scalar, nir_var_shader_out);
- vc4_nir_lower_io(c);
- vc4_nir_lower_txf_ms(c);
- nir_lower_idiv(c->s);
- nir_lower_load_const_to_scalar(c->s);
+ NIR_PASS_V(c->s, vc4_nir_lower_io, c);
+ NIR_PASS_V(c->s, vc4_nir_lower_txf_ms, c);
+ NIR_PASS_V(c->s, nir_lower_idiv);
vc4_optimize_nir(c->s);
- nir_remove_dead_variables(c->s);
-
- nir_convert_from_ssa(c->s, true);
+ NIR_PASS_V(c->s, nir_convert_from_ssa, true);
if (vc4_debug & VC4_DEBUG_SHADERDB) {
fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d NIR instructions\n",
switch (stage) {
case QSTAGE_FRAG:
+ /* FS threading requires that the thread execute
+ * QPU_SIG_LAST_THREAD_SWITCH exactly once before terminating
+ * (with no other THRSW afterwards, obviously). If we didn't
+ * fetch a texture at a top level block, this wouldn't be
+ * true.
+ */
+ if (c->fs_threaded && !c->last_thrsw_at_top_level) {
+ c->failed = true;
+ return c;
+ }
+
emit_frag_end(c);
break;
case QSTAGE_VERT:
emit_vert_end(c,
- vc4->prog.fs->input_slots,
- vc4->prog.fs->num_inputs);
+ c->vs_key->fs_inputs->input_slots,
+ c->vs_key->fs_inputs->num_inputs);
break;
case QSTAGE_COORD:
emit_coord_end(c);
qir_get_stage_name(c->stage),
c->program_id, c->variant_id);
qir_dump(c);
+ fprintf(stderr, "\n");
}
qir_optimize(c);
qir_lower_uniforms(c);
qir_schedule_instructions(c);
+ qir_emit_uniform_stream_resets(c);
if (vc4_debug & VC4_DEBUG_QIR) {
fprintf(stderr, "%s prog %d/%d QIR:\n",
qir_get_stage_name(c->stage),
c->program_id, c->variant_id);
qir_dump(c);
+ fprintf(stderr, "\n");
}
qir_reorder_uniforms(c);
if (!so)
return NULL;
- so->base.tokens = tgsi_dup_tokens(cso->tokens);
so->program_id = vc4->next_uncompiled_program_id++;
+ nir_shader *s;
+
+ if (cso->type == PIPE_SHADER_IR_NIR) {
+ /* The backend takes ownership of the NIR shader on state
+ * creation.
+ */
+ s = cso->ir.nir;
+ } else {
+ assert(cso->type == PIPE_SHADER_IR_TGSI);
+
+ if (vc4_debug & VC4_DEBUG_TGSI) {
+ fprintf(stderr, "prog %d TGSI:\n",
+ so->program_id);
+ tgsi_dump(cso->tokens, 0);
+ fprintf(stderr, "\n");
+ }
+ s = tgsi_to_nir(cso->tokens, &nir_options);
+ }
+
+ NIR_PASS_V(s, nir_opt_global_to_local);
+ NIR_PASS_V(s, nir_convert_to_ssa);
+ NIR_PASS_V(s, nir_normalize_cubemap_coords);
+
+ NIR_PASS_V(s, nir_lower_load_const_to_scalar);
+
+ vc4_optimize_nir(s);
+
+ NIR_PASS_V(s, nir_remove_dead_variables, nir_var_local);
+
+ /* Garbage collect dead instructions */
+ nir_sweep(s);
+
+ so->base.type = PIPE_SHADER_IR_NIR;
+ so->base.ir.nir = s;
+
+ if (vc4_debug & VC4_DEBUG_NIR) {
+ fprintf(stderr, "%s prog %d NIR:\n",
+ gl_shader_stage_name(s->stage),
+ so->program_id);
+ nir_print_shader(s, stderr);
+ fprintf(stderr, "\n");
+ }
+
return so;
}
vc4_set_shader_uniform_dirty_flags(shader);
}
+static void
+vc4_setup_compiled_fs_inputs(struct vc4_context *vc4, struct vc4_compile *c,
+ struct vc4_compiled_shader *shader)
+{
+ struct vc4_fs_inputs inputs;
+
+ memset(&inputs, 0, sizeof(inputs));
+ inputs.input_slots = ralloc_array(shader,
+ struct vc4_varying_slot,
+ c->num_input_slots);
+
+ bool input_live[c->num_input_slots];
+
+ memset(input_live, 0, sizeof(input_live));
+ qir_for_each_inst_inorder(inst, c) {
+ for (int i = 0; i < qir_get_op_nsrc(inst->op); i++) {
+ if (inst->src[i].file == QFILE_VARY)
+ input_live[inst->src[i].index] = true;
+ }
+ }
+
+ for (int i = 0; i < c->num_input_slots; i++) {
+ struct vc4_varying_slot *slot = &c->input_slots[i];
+
+ if (!input_live[i])
+ continue;
+
+ /* Skip non-VS-output inputs. */
+ if (slot->slot == (uint8_t)~0)
+ continue;
+
+ if (slot->slot == VARYING_SLOT_COL0 ||
+ slot->slot == VARYING_SLOT_COL1 ||
+ slot->slot == VARYING_SLOT_BFC0 ||
+ slot->slot == VARYING_SLOT_BFC1) {
+ shader->color_inputs |= (1 << inputs.num_inputs);
+ }
+
+ inputs.input_slots[inputs.num_inputs] = *slot;
+ inputs.num_inputs++;
+ }
+ shader->num_inputs = inputs.num_inputs;
+
+ /* Add our set of inputs to the set of all inputs seen. This way, we
+ * can have a single pointer that identifies an FS inputs set,
+ * allowing VS to avoid recompiling when the FS is recompiled (or a
+ * new one is bound using separate shader objects) but the inputs
+ * don't change.
+ */
+ struct set_entry *entry = _mesa_set_search(vc4->fs_inputs_set, &inputs);
+ if (entry) {
+ shader->fs_inputs = entry->key;
+ ralloc_free(inputs.input_slots);
+ } else {
+ struct vc4_fs_inputs *alloc_inputs;
+
+ alloc_inputs = rzalloc(vc4->fs_inputs_set, struct vc4_fs_inputs);
+ memcpy(alloc_inputs, &inputs, sizeof(inputs));
+ ralloc_steal(alloc_inputs, inputs.input_slots);
+ _mesa_set_add(vc4->fs_inputs_set, alloc_inputs);
+
+ shader->fs_inputs = alloc_inputs;
+ }
+}
+
static struct vc4_compiled_shader *
vc4_get_compiled_shader(struct vc4_context *vc4, enum qstage stage,
struct vc4_key *key)
{
struct hash_table *ht;
uint32_t key_size;
+ bool try_threading;
+
if (stage == QSTAGE_FRAG) {
ht = vc4->fs_cache;
key_size = sizeof(struct vc4_fs_key);
+ try_threading = vc4->screen->has_threaded_fs;
} else {
ht = vc4->vs_cache;
key_size = sizeof(struct vc4_vs_key);
+ try_threading = false;
}
struct vc4_compiled_shader *shader;
if (entry)
return entry->data;
- struct vc4_compile *c = vc4_shader_ntq(vc4, stage, key);
+ struct vc4_compile *c = vc4_shader_ntq(vc4, stage, key, try_threading);
+ /* If the FS failed to compile threaded, fall back to single threaded. */
+ if (try_threading && c->failed) {
+ qir_compile_destroy(c);
+ c = vc4_shader_ntq(vc4, stage, key, false);
+ }
+
shader = rzalloc(NULL, struct vc4_compiled_shader);
shader->program_id = vc4->next_compiled_program_id++;
if (stage == QSTAGE_FRAG) {
- bool input_live[c->num_input_slots];
-
- memset(input_live, 0, sizeof(input_live));
- list_for_each_entry(struct qinst, inst, &c->instructions, link) {
- for (int i = 0; i < qir_get_op_nsrc(inst->op); i++) {
- if (inst->src[i].file == QFILE_VARY)
- input_live[inst->src[i].index] = true;
- }
- }
-
- shader->input_slots = ralloc_array(shader,
- struct vc4_varying_slot,
- c->num_input_slots);
+ vc4_setup_compiled_fs_inputs(vc4, c, shader);
- for (int i = 0; i < c->num_input_slots; i++) {
- struct vc4_varying_slot *slot = &c->input_slots[i];
-
- if (!input_live[i])
- continue;
-
- /* Skip non-VS-output inputs. */
- if (slot->slot == (uint8_t)~0)
- continue;
-
- if (slot->slot == VARYING_SLOT_COL0 ||
- slot->slot == VARYING_SLOT_COL1 ||
- slot->slot == VARYING_SLOT_BFC0 ||
- slot->slot == VARYING_SLOT_BFC1) {
- shader->color_inputs |= (1 << shader->num_inputs);
- }
-
- shader->input_slots[shader->num_inputs] = *slot;
- shader->num_inputs++;
- }
+ /* Note: the temporary clone in c->s has been freed. */
+ nir_shader *orig_shader = key->shader_state->base.ir.nir;
+ if (orig_shader->info->outputs_written & (1 << FRAG_RESULT_DEPTH))
+ shader->disable_early_z = true;
} else {
shader->num_inputs = c->num_inputs;
}
}
- copy_uniform_state_to_shader(shader, c);
- shader->bo = vc4_bo_alloc_shader(vc4->screen, c->qpu_insts,
- c->qpu_inst_count * sizeof(uint64_t));
+ shader->failed = c->failed;
+ if (c->failed) {
+ shader->failed = true;
+ } else {
+ copy_uniform_state_to_shader(shader, c);
+ shader->bo = vc4_bo_alloc_shader(vc4->screen, c->qpu_insts,
+ c->qpu_inst_count *
+ sizeof(uint64_t));
+ }
+
+ shader->fs_threaded = c->fs_threaded;
/* Copy the compiler UBO range state to the compiled shader, dropping
* out arrays that were never referenced by an indirect load.
qir_compile_destroy(c);
struct vc4_key *dup_key;
- dup_key = ralloc_size(shader, key_size);
+ dup_key = rzalloc_size(shader, key_size); /* TODO: don't use rzalloc */
memcpy(dup_key, key, key_size);
_mesa_hash_table_insert(ht, dup_key, shader);
{
for (int i = 0; i < texstate->num_textures; i++) {
struct pipe_sampler_view *sampler = texstate->textures[i];
+ struct vc4_sampler_view *vc4_sampler = vc4_sampler_view(sampler);
struct pipe_sampler_state *sampler_state =
texstate->samplers[i];
key->tex[i].compare_func = sampler_state->compare_func;
key->tex[i].wrap_s = sampler_state->wrap_s;
key->tex[i].wrap_t = sampler_state->wrap_t;
+ key->tex[i].force_first_level =
+ vc4_sampler->force_first_level;
}
}
static void
vc4_update_compiled_fs(struct vc4_context *vc4, uint8_t prim_mode)
{
+ struct vc4_job *job = vc4->job;
struct vc4_fs_key local_key;
struct vc4_fs_key *key = &local_key;
VC4_DIRTY_FRAMEBUFFER |
VC4_DIRTY_ZSA |
VC4_DIRTY_RASTERIZER |
+ VC4_DIRTY_SAMPLE_MASK |
VC4_DIRTY_FRAGTEX |
- VC4_DIRTY_TEXSTATE |
VC4_DIRTY_UNCOMPILED_FS))) {
return;
}
} else {
key->logicop_func = PIPE_LOGICOP_COPY;
}
- key->msaa = vc4->rasterizer->base.multisample;
- key->sample_coverage = (vc4->rasterizer->base.multisample &&
- vc4->sample_mask != (1 << VC4_MAX_SAMPLES) - 1);
- key->sample_alpha_to_coverage = vc4->blend->alpha_to_coverage;
- key->sample_alpha_to_one = vc4->blend->alpha_to_one;
+ if (job->msaa) {
+ key->msaa = vc4->rasterizer->base.multisample;
+ key->sample_coverage = (vc4->rasterizer->base.multisample &&
+ vc4->sample_mask != (1 << VC4_MAX_SAMPLES) - 1);
+ key->sample_alpha_to_coverage = vc4->blend->alpha_to_coverage;
+ key->sample_alpha_to_one = vc4->blend->alpha_to_one;
+ }
+
if (vc4->framebuffer.cbufs[0])
key->color_format = vc4->framebuffer.cbufs[0]->format;
return;
vc4->dirty |= VC4_DIRTY_COMPILED_FS;
+
if (vc4->rasterizer->base.flatshade &&
old_fs && vc4->prog.fs->color_inputs != old_fs->color_inputs) {
vc4->dirty |= VC4_DIRTY_FLAT_SHADE_FLAGS;
}
+
+ if (old_fs && vc4->prog.fs->fs_inputs != old_fs->fs_inputs)
+ vc4->dirty |= VC4_DIRTY_FS_INPUTS;
}
static void
if (!(vc4->dirty & (VC4_DIRTY_PRIM_MODE |
VC4_DIRTY_RASTERIZER |
VC4_DIRTY_VERTTEX |
- VC4_DIRTY_TEXSTATE |
VC4_DIRTY_VTXSTATE |
VC4_DIRTY_UNCOMPILED_VS |
- VC4_DIRTY_COMPILED_FS))) {
+ VC4_DIRTY_FS_INPUTS))) {
return;
}
memset(key, 0, sizeof(*key));
vc4_setup_shared_key(vc4, &key->base, &vc4->verttex);
key->base.shader_state = vc4->prog.bind_vs;
- key->compiled_fs_id = vc4->prog.fs->program_id;
+ key->fs_inputs = vc4->prog.fs->fs_inputs;
+ key->clamp_color = vc4->rasterizer->base.clamp_vertex_color;
for (int i = 0; i < ARRAY_SIZE(key->attr_formats); i++)
key->attr_formats[i] = vc4->vtx->pipe[i].src_format;
}
key->is_coord = true;
+ /* Coord shaders don't care what the FS inputs are. */
+ key->fs_inputs = NULL;
struct vc4_compiled_shader *cs =
vc4_get_compiled_shader(vc4, QSTAGE_COORD, &key->base);
if (cs != vc4->prog.cs) {
}
}
-void
+bool
vc4_update_compiled_shaders(struct vc4_context *vc4, uint8_t prim_mode)
{
vc4_update_compiled_fs(vc4, prim_mode);
vc4_update_compiled_vs(vc4, prim_mode);
+
+ return !(vc4->prog.cs->failed ||
+ vc4->prog.vs->failed ||
+ vc4->prog.fs->failed);
}
static uint32_t
return memcmp(key1, key2, sizeof(struct vc4_vs_key)) == 0;
}
+static uint32_t
+fs_inputs_hash(const void *key)
+{
+ const struct vc4_fs_inputs *inputs = key;
+
+ return _mesa_hash_data(inputs->input_slots,
+ sizeof(*inputs->input_slots) *
+ inputs->num_inputs);
+}
+
+static bool
+fs_inputs_compare(const void *key1, const void *key2)
+{
+ const struct vc4_fs_inputs *inputs1 = key1;
+ const struct vc4_fs_inputs *inputs2 = key2;
+
+ return (inputs1->num_inputs == inputs2->num_inputs &&
+ memcmp(inputs1->input_slots,
+ inputs2->input_slots,
+ sizeof(*inputs1->input_slots) *
+ inputs1->num_inputs) == 0);
+}
+
static void
delete_from_cache_if_matches(struct hash_table *ht,
struct hash_entry *entry,
hash_table_foreach(vc4->vs_cache, entry)
delete_from_cache_if_matches(vc4->vs_cache, entry, so);
- free((void *)so->base.tokens);
+ ralloc_free(so->base.ir.nir);
free(so);
}
fs_cache_compare);
vc4->vs_cache = _mesa_hash_table_create(pctx, vs_cache_hash,
vs_cache_compare);
+ vc4->fs_inputs_set = _mesa_set_create(pctx, fs_inputs_hash,
+ fs_inputs_compare);
}
void