#include <inttypes.h>
#include "util/u_format.h"
-#include "util/u_hash.h"
+#include "util/crc32.h"
#include "util/u_math.h"
#include "util/u_memory.h"
#include "util/ralloc.h"
#include "tgsi/tgsi_parse.h"
#include "compiler/nir/nir.h"
#include "compiler/nir/nir_builder.h"
+#include "compiler/nir_types.h"
#include "nir/tgsi_to_nir.h"
#include "vc4_context.h"
#include "vc4_qpu.h"
#include "vc4_qir.h"
#include "mesa/state_tracker/st_glsl_types.h"
-#ifdef USE_VC4_SIMULATOR
-#include "simpenrose/simpenrose.h"
-#endif
static struct qreg
ntq_get_src(struct vc4_compile *c, nir_src src, int i);
static void
ntq_emit_cf_list(struct vc4_compile *c, struct exec_list *list);
+static int
+type_size(const struct glsl_type *type)
+{
+ return glsl_count_attribute_slots(type, false);
+}
+
+static int
+uniforms_type_size(const struct glsl_type *type)
+{
+ return st_glsl_storage_type_size(type, false);
+}
+
static void
resize_qreg_array(struct vc4_compile *c,
struct qreg **regs,
(*regs)[i] = c->undef;
}
+static void
+ntq_emit_thrsw(struct vc4_compile *c)
+{
+ if (!c->fs_threaded)
+ return;
+
+ /* Always thread switch after each texture operation for now.
+ *
+ * We could do better by batching a bunch of texture fetches up and
+ * then doing one thread switch and collecting all their results
+ * afterward.
+ */
+ qir_emit_nondef(c, qir_inst(QOP_THRSW, c->undef,
+ c->undef, c->undef));
+ c->last_thrsw_at_top_level = (c->execute.file == QFILE_NULL);
+}
+
static struct qreg
indirect_uniform_load(struct vc4_compile *c, nir_intrinsic_instr *intr)
{
/* Clamp to [0, array size). Note that MIN/MAX are signed. */
indirect_offset = qir_MAX(c, indirect_offset, qir_uniform_ui(c, 0));
- indirect_offset = qir_MIN(c, indirect_offset,
- qir_uniform_ui(c, (range->dst_offset +
- range->size - 4)));
+ indirect_offset = qir_MIN_NOIMM(c, indirect_offset,
+ qir_uniform_ui(c, (range->dst_offset +
+ range->size - 4)));
+
+ qir_ADD_dest(c, qir_reg(QFILE_TEX_S_DIRECT, 0),
+ indirect_offset,
+ qir_uniform(c, QUNIFORM_UBO_ADDR, 0));
+
+ c->num_texture_samples++;
+
+ ntq_emit_thrsw(c);
+
+ return qir_TEX_RESULT(c);
+}
+
+static struct qreg
+vc4_ubo_load(struct vc4_compile *c, nir_intrinsic_instr *intr)
+{
+ nir_const_value *buffer_index =
+ nir_src_as_const_value(intr->src[0]);
+ assert(buffer_index->u32[0] == 1);
+ assert(c->stage == QSTAGE_FRAG);
+
+ struct qreg offset = ntq_get_src(c, intr->src[1], 0);
+
+ /* Clamp to [0, array size). Note that MIN/MAX are signed. */
+ offset = qir_MAX(c, offset, qir_uniform_ui(c, 0));
+ offset = qir_MIN_NOIMM(c, offset,
+ qir_uniform_ui(c, c->fs_key->ubo_1_size - 4));
+
+ qir_ADD_dest(c, qir_reg(QFILE_TEX_S_DIRECT, 0),
+ offset,
+ qir_uniform(c, QUNIFORM_UBO_ADDR, buffer_index->u32[0]));
- qir_TEX_DIRECT(c, indirect_offset, qir_uniform(c, QUNIFORM_UBO_ADDR, 0));
c->num_texture_samples++;
+
+ ntq_emit_thrsw(c);
+
return qir_TEX_RESULT(c);
}
return qregs;
}
+/**
+ * This function is responsible for getting QIR results into the associated
+ * storage for a NIR instruction.
+ *
+ * If it's a NIR SSA def, then we just set the associated hash table entry to
+ * the new result.
+ *
+ * If it's a NIR reg, then we need to update the existing qreg assigned to the
+ * NIR destination with the incoming value. To do that without introducing
+ * new MOVs, we require that the incoming qreg either be a uniform, or be
+ * SSA-defined by the previous QIR instruction in the block and rewritable by
+ * this function. That lets us sneak ahead and insert the SF flag beforehand
+ * (knowing that the previous instruction doesn't depend on flags) and rewrite
+ * its destination to be the NIR reg's destination
+ */
static void
ntq_store_dest(struct vc4_compile *c, nir_dest *dest, int chan,
struct qreg result)
{
+ struct qinst *last_inst = NULL;
+ if (!list_empty(&c->cur_block->instructions))
+ last_inst = (struct qinst *)c->cur_block->instructions.prev;
+
+ assert(result.file == QFILE_UNIF ||
+ (result.file == QFILE_TEMP &&
+ last_inst && last_inst == c->defs[result.index]));
+
if (dest->is_ssa) {
assert(chan < dest->ssa.num_components);
_mesa_hash_table_search(c->def_ht, reg);
struct qreg *qregs = entry->data;
- /* Conditionally move the result to the destination if the
- * channel is active.
+ /* Insert a MOV if the source wasn't an SSA def in the
+ * previous instruction.
+ */
+ if (result.file == QFILE_UNIF) {
+ result = qir_MOV(c, result);
+ last_inst = c->defs[result.index];
+ }
+
+ /* We know they're both temps, so just rewrite index. */
+ c->defs[last_inst->dst.index] = NULL;
+ last_inst->dst.index = qregs[chan].index;
+
+ /* If we're in control flow, then make this update of the reg
+ * conditional on the execution mask.
*/
if (c->execute.file != QFILE_NULL) {
+ last_inst->dst.index = qregs[chan].index;
+
+ /* Set the flags to the current exec mask. To insert
+ * the SF, we temporarily remove our SSA instruction.
+ */
+ list_del(&last_inst->link);
qir_SF(c, c->execute);
- qir_MOV_cond(c, QPU_COND_ZS, qregs[chan], result);
- } else {
- qir_MOV_dest(c, qregs[chan], result);
+ list_addtail(&last_inst->link,
+ &c->cur_block->instructions);
+
+ last_inst->cond = QPU_COND_ZS;
+ last_inst->cond_is_exec_mask = true;
}
}
}
ntq_get_alu_src(struct vc4_compile *c, nir_alu_instr *instr,
unsigned src)
{
- assert(util_is_power_of_two(instr->dest.write_mask));
+ assert(util_is_power_of_two_or_zero(instr->dest.write_mask));
unsigned chan = ffs(instr->dest.write_mask) - 1;
struct qreg r = ntq_get_src(c, instr->src[src].src,
instr->src[src].swizzle[chan]);
/* Perform the clamping required by kernel validation. */
addr = qir_MAX(c, addr, qir_uniform_ui(c, 0));
- addr = qir_MIN(c, addr, qir_uniform_ui(c, size - 4));
+ addr = qir_MIN_NOIMM(c, addr, qir_uniform_ui(c, size - 4));
- qir_TEX_DIRECT(c, addr, qir_uniform(c, QUNIFORM_TEXTURE_MSAA_ADDR, unit));
+ qir_ADD_dest(c, qir_reg(QFILE_TEX_S_DIRECT, 0),
+ addr, qir_uniform(c, QUNIFORM_TEXTURE_MSAA_ADDR, unit));
+
+ ntq_emit_thrsw(c);
struct qreg tex = qir_TEX_RESULT(c);
c->num_texture_samples++;
- struct qreg dest[4];
enum pipe_format format = c->key->tex[unit].format;
if (util_format_is_depth_or_stencil(format)) {
struct qreg scaled = ntq_scale_depth_texture(c, tex);
for (int i = 0; i < 4; i++)
- dest[i] = scaled;
+ ntq_store_dest(c, &instr->dest, i, qir_MOV(c, scaled));
} else {
for (int i = 0; i < 4; i++)
- dest[i] = qir_UNPACK_8_F(c, tex, i);
+ ntq_store_dest(c, &instr->dest, i,
+ qir_UNPACK_8_F(c, tex, i));
}
-
- for (int i = 0; i < 4; i++)
- ntq_store_dest(c, &instr->dest, i, dest[i]);
}
static void
lod = ntq_get_src(c, instr->src[i].src, 0);
is_txl = true;
break;
- case nir_tex_src_comparitor:
+ case nir_tex_src_comparator:
compare = ntq_get_src(c, instr->src[i].src, 0);
break;
default:
}
}
+ if (c->stage != QSTAGE_FRAG && !is_txl) {
+ /* From the GLSL 1.20 spec:
+ *
+ * "If it is mip-mapped and running on the vertex shader,
+ * then the base texture is used."
+ */
+ is_txl = true;
+ lod = qir_uniform_ui(c, 0);
+ }
+
if (c->key->tex[unit].force_first_level) {
lod = qir_uniform(c, QUNIFORM_TEXTURE_FIRST_LEVEL, unit);
is_txl = true;
unit | (is_txl << 16));
}
+ struct qinst *tmu;
if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
- qir_TEX_R(c, r, texture_u[next_texture_u++]);
+ tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_R, 0), r);
+ tmu->src[qir_get_tex_uniform_src(tmu)] =
+ texture_u[next_texture_u++];
} else if (c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP_TO_BORDER ||
c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP ||
c->key->tex[unit].wrap_t == PIPE_TEX_WRAP_CLAMP_TO_BORDER ||
c->key->tex[unit].wrap_t == PIPE_TEX_WRAP_CLAMP) {
- qir_TEX_R(c, qir_uniform(c, QUNIFORM_TEXTURE_BORDER_COLOR, unit),
- texture_u[next_texture_u++]);
+ tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_R, 0),
+ qir_uniform(c, QUNIFORM_TEXTURE_BORDER_COLOR,
+ unit));
+ tmu->src[qir_get_tex_uniform_src(tmu)] =
+ texture_u[next_texture_u++];
}
if (c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP) {
t = qir_SAT(c, t);
}
- qir_TEX_T(c, t, texture_u[next_texture_u++]);
+ tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_T, 0), t);
+ tmu->src[qir_get_tex_uniform_src(tmu)] =
+ texture_u[next_texture_u++];
- if (is_txl || is_txb)
- qir_TEX_B(c, lod, texture_u[next_texture_u++]);
+ if (is_txl || is_txb) {
+ tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_B, 0), lod);
+ tmu->src[qir_get_tex_uniform_src(tmu)] =
+ texture_u[next_texture_u++];
+ }
- qir_TEX_S(c, s, texture_u[next_texture_u++]);
+ tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_S, 0), s);
+ tmu->src[qir_get_tex_uniform_src(tmu)] = texture_u[next_texture_u++];
c->num_texture_samples++;
+
+ ntq_emit_thrsw(c);
+
struct qreg tex = qir_TEX_RESULT(c);
enum pipe_format format = c->key->tex[unit].format;
struct qreg u0 = qir_uniform_f(c, 0.0f);
struct qreg u1 = qir_uniform_f(c, 1.0f);
if (c->key->tex[unit].compare_mode) {
+ /* From the GL_ARB_shadow spec:
+ *
+ * "Let Dt (D subscript t) be the depth texture
+ * value, in the range [0, 1]. Let R be the
+ * interpolated texture coordinate clamped to the
+ * range [0, 1]."
+ */
+ compare = qir_SAT(c, compare);
+
switch (c->key->tex[unit].compare_func) {
case PIPE_FUNC_NEVER:
depth_output = qir_uniform_f(c, 0.0f);
struct qreg trunc = qir_ITOF(c, qir_FTOI(c, src));
struct qreg diff = qir_FSUB(c, src, trunc);
qir_SF(c, diff);
- return qir_SEL(c, QPU_COND_NS,
- qir_FADD(c, diff, qir_uniform_f(c, 1.0)), diff);
+
+ qir_FADD_dest(c, diff,
+ diff, qir_uniform_f(c, 1.0))->cond = QPU_COND_NS;
+
+ return qir_MOV(c, diff);
}
/**
static struct qreg
ntq_ffloor(struct vc4_compile *c, struct qreg src)
{
- struct qreg trunc = qir_ITOF(c, qir_FTOI(c, src));
+ struct qreg result = qir_ITOF(c, qir_FTOI(c, src));
/* This will be < 0 if we truncated and the truncation was of a value
* that was < 0 in the first place.
*/
- qir_SF(c, qir_FSUB(c, src, trunc));
+ qir_SF(c, qir_FSUB(c, src, result));
- return qir_SEL(c, QPU_COND_NS,
- qir_FSUB(c, trunc, qir_uniform_f(c, 1.0)), trunc);
+ struct qinst *sub = qir_FSUB_dest(c, result,
+ result, qir_uniform_f(c, 1.0));
+ sub->cond = QPU_COND_NS;
+
+ return qir_MOV(c, result);
}
/**
static struct qreg
ntq_fceil(struct vc4_compile *c, struct qreg src)
{
- struct qreg trunc = qir_ITOF(c, qir_FTOI(c, src));
+ struct qreg result = qir_ITOF(c, qir_FTOI(c, src));
/* This will be < 0 if we truncated and the truncation was of a value
* that was > 0 in the first place.
*/
- qir_SF(c, qir_FSUB(c, trunc, src));
+ qir_SF(c, qir_FSUB(c, result, src));
- return qir_SEL(c, QPU_COND_NS,
- qir_FADD(c, trunc, qir_uniform_f(c, 1.0)), trunc);
+ qir_FADD_dest(c, result,
+ result, qir_uniform_f(c, 1.0))->cond = QPU_COND_NS;
+
+ return qir_MOV(c, result);
+}
+
+static struct qreg
+ntq_shrink_sincos_input_range(struct vc4_compile *c, struct qreg x)
+{
+ /* Since we're using a Taylor approximation, we want to have a small
+ * number of coefficients and take advantage of sin/cos repeating
+ * every 2pi. We keep our x as close to 0 as we can, since the series
+ * will be less accurate as |x| increases. (Also, be careful of
+ * shifting the input x value to be tricky with sin/cos relations,
+ * because getting accurate values for x==0 is very important for SDL
+ * rendering)
+ */
+ struct qreg scaled_x =
+ qir_FMUL(c, x,
+ qir_uniform_f(c, 1.0f / (M_PI * 2.0f)));
+ /* Note: FTOI truncates toward 0. */
+ struct qreg x_frac = qir_FSUB(c, scaled_x,
+ qir_ITOF(c, qir_FTOI(c, scaled_x)));
+ /* Map [0.5, 1] to [-0.5, 0] */
+ qir_SF(c, qir_FSUB(c, x_frac, qir_uniform_f(c, 0.5)));
+ qir_FSUB_dest(c, x_frac, x_frac, qir_uniform_f(c, 1.0))->cond = QPU_COND_NC;
+ /* Map [-1, -0.5] to [0, 0.5] */
+ qir_SF(c, qir_FADD(c, x_frac, qir_uniform_f(c, 0.5)));
+ qir_FADD_dest(c, x_frac, x_frac, qir_uniform_f(c, 1.0))->cond = QPU_COND_NS;
+
+ return x_frac;
}
static struct qreg
ntq_fsin(struct vc4_compile *c, struct qreg src)
{
float coeff[] = {
- -2.0 * M_PI,
- pow(2.0 * M_PI, 3) / (3 * 2 * 1),
- -pow(2.0 * M_PI, 5) / (5 * 4 * 3 * 2 * 1),
- pow(2.0 * M_PI, 7) / (7 * 6 * 5 * 4 * 3 * 2 * 1),
- -pow(2.0 * M_PI, 9) / (9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
+ 2.0 * M_PI,
+ -pow(2.0 * M_PI, 3) / (3 * 2 * 1),
+ pow(2.0 * M_PI, 5) / (5 * 4 * 3 * 2 * 1),
+ -pow(2.0 * M_PI, 7) / (7 * 6 * 5 * 4 * 3 * 2 * 1),
+ pow(2.0 * M_PI, 9) / (9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
};
- struct qreg scaled_x =
- qir_FMUL(c,
- src,
- qir_uniform_f(c, 1.0 / (M_PI * 2.0)));
-
- struct qreg x = qir_FADD(c,
- ntq_ffract(c, scaled_x),
- qir_uniform_f(c, -0.5));
+ struct qreg x = ntq_shrink_sincos_input_range(c, src);
struct qreg x2 = qir_FMUL(c, x, x);
struct qreg sum = qir_FMUL(c, x, qir_uniform_f(c, coeff[0]));
for (int i = 1; i < ARRAY_SIZE(coeff); i++) {
ntq_fcos(struct vc4_compile *c, struct qreg src)
{
float coeff[] = {
- -1.0f,
- pow(2.0 * M_PI, 2) / (2 * 1),
- -pow(2.0 * M_PI, 4) / (4 * 3 * 2 * 1),
- pow(2.0 * M_PI, 6) / (6 * 5 * 4 * 3 * 2 * 1),
- -pow(2.0 * M_PI, 8) / (8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
- pow(2.0 * M_PI, 10) / (10 * 9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
+ 1.0f,
+ -pow(2.0 * M_PI, 2) / (2 * 1),
+ pow(2.0 * M_PI, 4) / (4 * 3 * 2 * 1),
+ -pow(2.0 * M_PI, 6) / (6 * 5 * 4 * 3 * 2 * 1),
+ pow(2.0 * M_PI, 8) / (8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
+ -pow(2.0 * M_PI, 10) / (10 * 9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
};
- struct qreg scaled_x =
- qir_FMUL(c, src,
- qir_uniform_f(c, 1.0f / (M_PI * 2.0f)));
- struct qreg x_frac = qir_FADD(c,
- ntq_ffract(c, scaled_x),
- qir_uniform_f(c, -0.5));
-
+ struct qreg x_frac = ntq_shrink_sincos_input_range(c, src);
struct qreg sum = qir_uniform_f(c, coeff[0]);
struct qreg x2 = qir_FMUL(c, x_frac, x_frac);
struct qreg x = x2; /* Current x^2, x^4, or x^6 */
if (i != 1)
x = qir_FMUL(c, x, x2);
- struct qreg mul = qir_FMUL(c,
+ sum = qir_FADD(c, qir_FMUL(c,
x,
- qir_uniform_f(c, coeff[i]));
- if (i == 0)
- sum = mul;
- else
- sum = qir_FADD(c, sum, mul);
+ qir_uniform_f(c, coeff[i])),
+ sum);
}
return sum;
}
qir_MOV_dest(c, t, qir_uniform_f(c, 0.0));
qir_MOV_dest(c, t, qir_uniform_f(c, 1.0))->cond = QPU_COND_ZC;
qir_MOV_dest(c, t, qir_uniform_f(c, -1.0))->cond = QPU_COND_NS;
- return t;
+ return qir_MOV(c, t);
}
static void
qir_PACK_8_F(c, result, src, i);
}
- ntq_store_dest(c, &instr->dest.dest, 0, result);
+ ntq_store_dest(c, &instr->dest.dest, 0, qir_MOV(c, result));
}
/** Handles sign-extended bitfield extracts for 16 bits. */
break;
}
+ /* Make the temporary for nir_store_dest(). */
+ *dest = qir_MOV(c, *dest);
+
return true;
}
{
if (!instr->src[0].src.is_ssa)
goto out;
+ if (instr->src[0].src.ssa->parent_instr->type != nir_instr_type_alu)
+ goto out;
nir_alu_instr *compare =
nir_instr_as_alu(instr->src[0].src.ssa->parent_instr);
if (!compare)
out:
qir_SF(c, src[0]);
- return qir_SEL(c, QPU_COND_NS, src[1], src[2]);
+ return qir_MOV(c, qir_SEL(c, QPU_COND_NS, src[1], src[2]));
}
static struct qreg
qir_SF(c, qir_AND(c, qir_reg(QFILE_QPU_ELEMENT, 0),
qir_uniform_ui(c, 1)));
- return qir_SEL(c, QPU_COND_ZS,
- qir_FSUB(c, from_right, src),
- qir_FSUB(c, src, from_left));
+ return qir_MOV(c, qir_SEL(c, QPU_COND_ZS,
+ qir_FSUB(c, from_right, src),
+ qir_FSUB(c, src, from_left)));
}
static struct qreg
qir_reg(QFILE_QPU_ELEMENT, 0),
qir_uniform_ui(c, 2)));
- return qir_SEL(c, QPU_COND_ZS,
- qir_FSUB(c, from_top, src),
- qir_FSUB(c, src, from_bottom));
+ return qir_MOV(c, qir_SEL(c, QPU_COND_ZS,
+ qir_FSUB(c, from_top, src),
+ qir_FSUB(c, src, from_bottom)));
}
static void
srcs[i] = ntq_get_src(c, instr->src[i].src,
instr->src[i].swizzle[0]);
for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
- ntq_store_dest(c, &instr->dest.dest, i, srcs[i]);
+ ntq_store_dest(c, &instr->dest.dest, i,
+ qir_MOV(c, srcs[i]));
return;
}
result = qir_FMAX(c, src[0], src[1]);
break;
- case nir_op_f2i:
- case nir_op_f2u:
+ case nir_op_f2i32:
+ case nir_op_f2u32:
result = qir_FTOI(c, src[0]);
break;
- case nir_op_i2f:
- case nir_op_u2f:
+ case nir_op_i2f32:
+ case nir_op_u2f32:
result = qir_ITOF(c, src[0]);
break;
case nir_op_b2f:
case nir_op_i2b:
case nir_op_f2b:
qir_SF(c, src[0]);
- result = qir_SEL(c, QPU_COND_ZC,
- qir_uniform_ui(c, ~0),
- qir_uniform_ui(c, 0));
+ result = qir_MOV(c, qir_SEL(c, QPU_COND_ZC,
+ qir_uniform_ui(c, ~0),
+ qir_uniform_ui(c, 0)));
break;
case nir_op_iadd:
break;
case nir_op_fcsel:
qir_SF(c, src[0]);
- result = qir_SEL(c, QPU_COND_ZC, src[1], src[2]);
+ result = qir_MOV(c, qir_SEL(c, QPU_COND_ZC, src[1], src[2]));
break;
case nir_op_frcp:
/* We have a scalar result, so the instruction should only have a
* single channel written to.
*/
- assert(util_is_power_of_two(instr->dest.write_mask));
+ assert(util_is_power_of_two_or_zero(instr->dest.write_mask));
ntq_store_dest(c, &instr->dest.dest,
ffs(instr->dest.write_mask) - 1, result);
}
}
uint32_t discard_cond = QPU_COND_ALWAYS;
- if (c->discard.file != QFILE_NULL) {
+ if (c->s->info.fs.uses_discard) {
qir_SF(c, c->discard);
discard_cond = QPU_COND_ZS;
}
struct vc4_varying_slot *fs_inputs,
uint32_t num_fs_inputs)
{
- struct qreg rcp_w = qir_RCP(c, c->outputs[c->output_position_index + 3]);
+ struct qreg rcp_w = ntq_rcp(c, c->outputs[c->output_position_index + 3]);
emit_stub_vpm_read(c);
static void
emit_coord_end(struct vc4_compile *c)
{
- struct qreg rcp_w = qir_RCP(c, c->outputs[c->output_position_index + 3]);
+ struct qreg rcp_w = ntq_rcp(c, c->outputs[c->output_position_index + 3]);
emit_stub_vpm_read(c);
progress = false;
NIR_PASS_V(s, nir_lower_vars_to_ssa);
- NIR_PASS_V(s, nir_lower_alu_to_scalar);
- NIR_PASS_V(s, nir_lower_phis_to_scalar);
-
+ NIR_PASS(progress, s, nir_lower_alu_to_scalar);
+ NIR_PASS(progress, s, nir_lower_phis_to_scalar);
NIR_PASS(progress, s, nir_copy_prop);
NIR_PASS(progress, s, nir_opt_remove_phis);
NIR_PASS(progress, s, nir_opt_dce);
NIR_PASS(progress, s, nir_opt_dead_cf);
NIR_PASS(progress, s, nir_opt_cse);
- NIR_PASS(progress, s, nir_opt_peephole_select);
+ NIR_PASS(progress, s, nir_opt_peephole_select, 8);
NIR_PASS(progress, s, nir_opt_algebraic);
NIR_PASS(progress, s, nir_opt_constant_folding);
NIR_PASS(progress, s, nir_opt_undef);
+ NIR_PASS(progress, s, nir_opt_loop_unroll,
+ nir_var_shader_in |
+ nir_var_shader_out |
+ nir_var_local);
} while (progress);
}
ntq_setup_uniforms(struct vc4_compile *c)
{
nir_foreach_variable(var, &c->s->uniforms) {
- uint32_t vec4_count = st_glsl_type_size(var->type);
+ uint32_t vec4_count = uniforms_type_size(var->type);
unsigned vec4_size = 4 * sizeof(float);
declare_uniform_range(c, var->data.driver_location * vec4_size,
qregs[i] = qir_uniform_ui(c, 0);
}
+static void
+ntq_emit_color_read(struct vc4_compile *c, nir_intrinsic_instr *instr)
+{
+ assert(nir_src_as_const_value(instr->src[0])->u32[0] == 0);
+
+ /* Reads of the per-sample color need to be done in
+ * order.
+ */
+ int sample_index = (nir_intrinsic_base(instr) -
+ VC4_NIR_TLB_COLOR_READ_INPUT);
+ for (int i = 0; i <= sample_index; i++) {
+ if (c->color_reads[i].file == QFILE_NULL) {
+ c->color_reads[i] =
+ qir_TLB_COLOR_READ(c);
+ }
+ }
+ ntq_store_dest(c, &instr->dest, 0,
+ qir_MOV(c, c->color_reads[sample_index]));
+}
+
+static void
+ntq_emit_load_input(struct vc4_compile *c, nir_intrinsic_instr *instr)
+{
+ assert(instr->num_components == 1);
+
+ nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
+ assert(const_offset && "vc4 doesn't support indirect inputs");
+
+ if (c->stage == QSTAGE_FRAG &&
+ nir_intrinsic_base(instr) >= VC4_NIR_TLB_COLOR_READ_INPUT) {
+ ntq_emit_color_read(c, instr);
+ return;
+ }
+
+ uint32_t offset = nir_intrinsic_base(instr) + const_offset->u32[0];
+ int comp = nir_intrinsic_component(instr);
+ ntq_store_dest(c, &instr->dest, 0,
+ qir_MOV(c, c->inputs[offset * 4 + comp]));
+}
+
static void
ntq_emit_intrinsic(struct vc4_compile *c, nir_intrinsic_instr *instr)
{
}
break;
+ case nir_intrinsic_load_ubo:
+ assert(instr->num_components == 1);
+ ntq_store_dest(c, &instr->dest, 0, vc4_ubo_load(c, instr));
+ break;
+
case nir_intrinsic_load_user_clip_plane:
for (int i = 0; i < instr->num_components; i++) {
ntq_store_dest(c, &instr->dest, i,
break;
case nir_intrinsic_load_input:
- assert(instr->num_components == 1);
- const_offset = nir_src_as_const_value(instr->src[0]);
- assert(const_offset && "vc4 doesn't support indirect inputs");
- if (c->stage == QSTAGE_FRAG &&
- nir_intrinsic_base(instr) >= VC4_NIR_TLB_COLOR_READ_INPUT) {
- assert(const_offset->u32[0] == 0);
- /* Reads of the per-sample color need to be done in
- * order.
- */
- int sample_index = (nir_intrinsic_base(instr) -
- VC4_NIR_TLB_COLOR_READ_INPUT);
- for (int i = 0; i <= sample_index; i++) {
- if (c->color_reads[i].file == QFILE_NULL) {
- c->color_reads[i] =
- qir_TLB_COLOR_READ(c);
- }
- }
- ntq_store_dest(c, &instr->dest, 0,
- c->color_reads[sample_index]);
- } else {
- offset = nir_intrinsic_base(instr) + const_offset->u32[0];
- int comp = nir_intrinsic_component(instr);
- ntq_store_dest(c, &instr->dest, 0,
- c->inputs[offset * 4 + comp]);
- }
+ ntq_emit_load_input(c, instr);
break;
case nir_intrinsic_store_output:
break;
case nir_intrinsic_discard:
- c->discard = qir_uniform_ui(c, ~0);
+ if (c->execute.file != QFILE_NULL) {
+ qir_SF(c, c->execute);
+ qir_MOV_cond(c, QPU_COND_ZS, c->discard,
+ qir_uniform_ui(c, ~0));
+ } else {
+ qir_MOV_dest(c, c->discard, qir_uniform_ui(c, ~0));
+ }
break;
- case nir_intrinsic_discard_if:
- if (c->discard.file == QFILE_NULL)
- c->discard = qir_uniform_ui(c, 0);
- c->discard = qir_OR(c, c->discard,
+ case nir_intrinsic_discard_if: {
+ /* true (~0) if we're discarding */
+ struct qreg cond = ntq_get_src(c, instr->src[0], 0);
+
+ if (c->execute.file != QFILE_NULL) {
+ /* execute == 0 means the channel is active. Invert
+ * the condition so that we can use zero as "executing
+ * and discarding."
+ */
+ qir_SF(c, qir_AND(c, c->execute, qir_NOT(c, cond)));
+ qir_MOV_cond(c, QPU_COND_ZS, c->discard, cond);
+ } else {
+ qir_OR_dest(c, c->discard, c->discard,
ntq_get_src(c, instr->src[0], 0));
+ }
+
break;
+ }
default:
fprintf(stderr, "Unknown intrinsic: ");
return;
}
- nir_cf_node *nir_first_else_node = nir_if_first_else_node(if_stmt);
- nir_cf_node *nir_last_else_node = nir_if_last_else_node(if_stmt);
- nir_block *nir_else_block = nir_cf_node_as_block(nir_first_else_node);
+ nir_block *nir_else_block = nir_if_first_else_block(if_stmt);
bool empty_else_block =
- (nir_first_else_node == nir_last_else_node &&
+ (nir_else_block == nir_if_last_else_block(if_stmt) &&
exec_list_is_empty(&nir_else_block->instr_list));
struct qblock *then_block = qir_new_block(c);
qir_link_blocks(c->cur_block, after_block);
qir_set_emit_block(c, after_block);
- if (was_top_level)
+ if (was_top_level) {
c->execute = c->undef;
- else
+ c->last_top_block = c->cur_block;
+ } else {
ntq_activate_execute_for_block(c);
-
+ }
}
static void
ntq_emit_jump(struct vc4_compile *c, nir_jump_instr *jump)
{
+ struct qblock *jump_block;
switch (jump->type) {
case nir_jump_break:
- qir_SF(c, c->execute);
- qir_MOV_cond(c, QPU_COND_ZS, c->execute,
- qir_uniform_ui(c, c->loop_break_block->index));
+ jump_block = c->loop_break_block;
break;
-
case nir_jump_continue:
- qir_SF(c, c->execute);
- qir_MOV_cond(c, QPU_COND_ZS, c->execute,
- qir_uniform_ui(c, c->loop_cont_block->index));
+ jump_block = c->loop_cont_block;
break;
-
- case nir_jump_return:
- unreachable("All returns shouold be lowered\n");
+ default:
+ unreachable("Unsupported jump type\n");
}
+
+ qir_SF(c, c->execute);
+ qir_MOV_cond(c, QPU_COND_ZS, c->execute,
+ qir_uniform_ui(c, jump_block->index));
+
+ /* Jump to the destination block if everyone has taken the jump. */
+ qir_SF(c, qir_SUB(c, c->execute, qir_uniform_ui(c, jump_block->index)));
+ qir_BRANCH(c, QPU_COND_BRANCH_ALL_ZS);
+ struct qblock *new_block = qir_new_block(c);
+ qir_link_blocks(c->cur_block, jump_block);
+ qir_link_blocks(c->cur_block, new_block);
+ qir_set_emit_block(c, new_block);
}
static void
qir_link_blocks(c->cur_block, c->loop_break_block);
qir_set_emit_block(c, c->loop_break_block);
- if (was_top_level)
+ if (was_top_level) {
c->execute = c->undef;
- else
+ c->last_top_block = c->cur_block;
+ } else {
ntq_activate_execute_for_block(c);
+ }
c->loop_break_block = save_loop_break_block;
c->loop_cont_block = save_loop_cont_block;
static void
nir_to_qir(struct vc4_compile *c)
{
+ if (c->stage == QSTAGE_FRAG && c->s->info.fs.uses_discard)
+ c->discard = qir_MOV(c, qir_uniform_ui(c, 0));
+
ntq_setup_inputs(c);
ntq_setup_outputs(c);
ntq_setup_uniforms(c);
}
static const nir_shader_compiler_options nir_options = {
+ .lower_all_io_to_temps = true,
.lower_extract_byte = true,
.lower_extract_word = true,
+ .lower_fdiv = true,
.lower_ffma = true,
.lower_flrp32 = true,
.lower_fpow = true,
.lower_fsat = true,
.lower_fsqrt = true,
+ .lower_ldexp = true,
.lower_negate = true,
.native_integers = true,
+ .max_unroll_iterations = 32,
};
const void *
vc4_screen_get_compiler_options(struct pipe_screen *pscreen,
- enum pipe_shader_ir ir, unsigned shader)
+ enum pipe_shader_ir ir,
+ enum pipe_shader_type shader)
{
return &nir_options;
}
static struct vc4_compile *
vc4_shader_ntq(struct vc4_context *vc4, enum qstage stage,
- struct vc4_key *key)
+ struct vc4_key *key, bool fs_threaded)
{
struct vc4_compile *c = qir_compile_init();
c->program_id = key->shader_state->program_id;
c->variant_id =
p_atomic_inc_return(&key->shader_state->compiled_variant_count);
+ c->fs_threaded = fs_threaded;
c->key = key;
switch (stage) {
c->s = nir_shader_clone(c, key->shader_state->base.ir.nir);
- if (stage == QSTAGE_FRAG)
+ if (stage == QSTAGE_FRAG) {
+ if (c->fs_key->alpha_test_func != COMPARE_FUNC_ALWAYS) {
+ NIR_PASS_V(c->s, nir_lower_alpha_test,
+ c->fs_key->alpha_test_func,
+ c->fs_key->sample_alpha_to_one &&
+ c->fs_key->msaa);
+ }
NIR_PASS_V(c->s, vc4_nir_lower_blend, c);
+ }
struct nir_lower_tex_options tex_options = {
/* We would need to implement txs, but we don't want the
switch (stage) {
case QSTAGE_FRAG:
+ /* FS threading requires that the thread execute
+ * QPU_SIG_LAST_THREAD_SWITCH exactly once before terminating
+ * (with no other THRSW afterwards, obviously). If we didn't
+ * fetch a texture at a top level block, this wouldn't be
+ * true.
+ */
+ if (c->fs_threaded && !c->last_thrsw_at_top_level) {
+ c->failed = true;
+ return c;
+ }
+
emit_frag_end(c);
break;
case QSTAGE_VERT:
* creation.
*/
s = cso->ir.nir;
- } else {
+
+ NIR_PASS_V(s, nir_lower_io, nir_var_uniform,
+ uniforms_type_size,
+ (nir_lower_io_options)0);
+ } else {
assert(cso->type == PIPE_SHADER_IR_TGSI);
if (vc4_debug & VC4_DEBUG_TGSI) {
s = tgsi_to_nir(cso->tokens, &nir_options);
}
+ NIR_PASS_V(s, nir_lower_io, nir_var_all & ~nir_var_uniform,
+ type_size,
+ (nir_lower_io_options)0);
+
NIR_PASS_V(s, nir_opt_global_to_local);
- NIR_PASS_V(s, nir_convert_to_ssa);
+ NIR_PASS_V(s, nir_lower_regs_to_ssa);
NIR_PASS_V(s, nir_normalize_cubemap_coords);
NIR_PASS_V(s, nir_lower_load_const_to_scalar);
if (vc4_debug & VC4_DEBUG_NIR) {
fprintf(stderr, "%s prog %d NIR:\n",
- gl_shader_stage_name(s->stage),
+ gl_shader_stage_name(s->info.stage),
so->program_id);
nir_print_shader(s, stderr);
fprintf(stderr, "\n");
memset(input_live, 0, sizeof(input_live));
qir_for_each_inst_inorder(inst, c) {
- for (int i = 0; i < qir_get_op_nsrc(inst->op); i++) {
+ for (int i = 0; i < qir_get_nsrc(inst); i++) {
if (inst->src[i].file == QFILE_VARY)
input_live[inst->src[i].index] = true;
}
{
struct hash_table *ht;
uint32_t key_size;
+ bool try_threading;
+
if (stage == QSTAGE_FRAG) {
ht = vc4->fs_cache;
key_size = sizeof(struct vc4_fs_key);
+ try_threading = vc4->screen->has_threaded_fs;
} else {
ht = vc4->vs_cache;
key_size = sizeof(struct vc4_vs_key);
+ try_threading = false;
}
struct vc4_compiled_shader *shader;
if (entry)
return entry->data;
- struct vc4_compile *c = vc4_shader_ntq(vc4, stage, key);
+ struct vc4_compile *c = vc4_shader_ntq(vc4, stage, key, try_threading);
+ /* If the FS failed to compile threaded, fall back to single threaded. */
+ if (try_threading && c->failed) {
+ qir_compile_destroy(c);
+ c = vc4_shader_ntq(vc4, stage, key, false);
+ }
+
shader = rzalloc(NULL, struct vc4_compiled_shader);
shader->program_id = vc4->next_compiled_program_id++;
}
}
- copy_uniform_state_to_shader(shader, c);
- shader->bo = vc4_bo_alloc_shader(vc4->screen, c->qpu_insts,
- c->qpu_inst_count * sizeof(uint64_t));
+ shader->failed = c->failed;
+ if (c->failed) {
+ shader->failed = true;
+ } else {
+ copy_uniform_state_to_shader(shader, c);
+ shader->bo = vc4_bo_alloc_shader(vc4->screen, c->qpu_insts,
+ c->qpu_inst_count *
+ sizeof(uint64_t));
+ }
+
+ shader->fs_threaded = c->fs_threaded;
/* Copy the compiler UBO range state to the compiled shader, dropping
* out arrays that were never referenced by an indirect load.
}
}
+ if ((vc4_debug & VC4_DEBUG_SHADERDB) && stage == QSTAGE_FRAG) {
+ fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d FS threads\n",
+ qir_get_stage_name(c->stage),
+ c->program_id, c->variant_id,
+ 1 + shader->fs_threaded);
+ }
+
qir_compile_destroy(c);
struct vc4_key *dup_key;
- dup_key = ralloc_size(shader, key_size);
+ dup_key = rzalloc_size(shader, key_size); /* TODO: don't use rzalloc */
memcpy(dup_key, key, key_size);
_mesa_hash_table_insert(ht, dup_key, shader);
static void
vc4_update_compiled_fs(struct vc4_context *vc4, uint8_t prim_mode)
{
+ struct vc4_job *job = vc4->job;
struct vc4_fs_key local_key;
struct vc4_fs_key *key = &local_key;
VC4_DIRTY_RASTERIZER |
VC4_DIRTY_SAMPLE_MASK |
VC4_DIRTY_FRAGTEX |
- VC4_DIRTY_UNCOMPILED_FS))) {
+ VC4_DIRTY_UNCOMPILED_FS |
+ VC4_DIRTY_UBO_1_SIZE))) {
return;
}
} else {
key->logicop_func = PIPE_LOGICOP_COPY;
}
- if (vc4->msaa) {
+ if (job->msaa) {
key->msaa = vc4->rasterizer->base.multisample;
- key->sample_coverage = (vc4->rasterizer->base.multisample &&
- vc4->sample_mask != (1 << VC4_MAX_SAMPLES) - 1);
+ key->sample_coverage = (vc4->sample_mask != (1 << VC4_MAX_SAMPLES) - 1);
key->sample_alpha_to_coverage = vc4->blend->alpha_to_coverage;
key->sample_alpha_to_one = vc4->blend->alpha_to_one;
}
key->stencil_full_writemasks = vc4->zsa->stencil_uniforms[2] != 0;
key->depth_enabled = (vc4->zsa->base.depth.enabled ||
key->stencil_enabled);
- if (vc4->zsa->base.alpha.enabled) {
- key->alpha_test = true;
+ if (vc4->zsa->base.alpha.enabled)
key->alpha_test_func = vc4->zsa->base.alpha.func;
- }
+ else
+ key->alpha_test_func = COMPARE_FUNC_ALWAYS;
if (key->is_points) {
key->point_sprite_mask =
PIPE_SPRITE_COORD_UPPER_LEFT);
}
+ key->ubo_1_size = vc4->constbuf[PIPE_SHADER_FRAGMENT].cb[1].buffer_size;
key->light_twoside = vc4->rasterizer->base.light_twoside;
struct vc4_compiled_shader *old_fs = vc4->prog.fs;
vc4->dirty |= VC4_DIRTY_COMPILED_FS;
if (vc4->rasterizer->base.flatshade &&
- old_fs && vc4->prog.fs->color_inputs != old_fs->color_inputs) {
+ (!old_fs || vc4->prog.fs->color_inputs != old_fs->color_inputs)) {
vc4->dirty |= VC4_DIRTY_FLAT_SHADE_FLAGS;
}
- if (old_fs && vc4->prog.fs->fs_inputs != old_fs->fs_inputs)
+ if (!old_fs || vc4->prog.fs->fs_inputs != old_fs->fs_inputs)
vc4->dirty |= VC4_DIRTY_FS_INPUTS;
}
}
}
-void
+bool
vc4_update_compiled_shaders(struct vc4_context *vc4, uint8_t prim_mode)
{
vc4_update_compiled_fs(vc4, prim_mode);
vc4_update_compiled_vs(vc4, prim_mode);
+
+ return !(vc4->prog.cs->failed ||
+ vc4->prog.vs->failed ||
+ vc4->prog.fs->failed);
}
static uint32_t
static void
delete_from_cache_if_matches(struct hash_table *ht,
+ struct vc4_compiled_shader **last_compile,
struct hash_entry *entry,
struct vc4_uncompiled_shader *so)
{
struct vc4_compiled_shader *shader = entry->data;
_mesa_hash_table_remove(ht, entry);
vc4_bo_unreference(&shader->bo);
+
+ if (shader == *last_compile)
+ *last_compile = NULL;
+
ralloc_free(shader);
}
}
struct vc4_context *vc4 = vc4_context(pctx);
struct vc4_uncompiled_shader *so = hwcso;
- struct hash_entry *entry;
- hash_table_foreach(vc4->fs_cache, entry)
- delete_from_cache_if_matches(vc4->fs_cache, entry, so);
- hash_table_foreach(vc4->vs_cache, entry)
- delete_from_cache_if_matches(vc4->vs_cache, entry, so);
+ hash_table_foreach(vc4->fs_cache, entry) {
+ delete_from_cache_if_matches(vc4->fs_cache, &vc4->prog.fs,
+ entry, so);
+ }
+ hash_table_foreach(vc4->vs_cache, entry) {
+ delete_from_cache_if_matches(vc4->vs_cache, &vc4->prog.vs,
+ entry, so);
+ }
ralloc_free(so->base.ir.nir);
free(so);