#include "tgsi/tgsi_parse.h"
#include "compiler/nir/nir.h"
#include "compiler/nir/nir_builder.h"
+#include "compiler/nir_types.h"
#include "nir/tgsi_to_nir.h"
#include "vc4_context.h"
#include "vc4_qpu.h"
static void
ntq_emit_cf_list(struct vc4_compile *c, struct exec_list *list);
+static int
+type_size(const struct glsl_type *type)
+{
+ return glsl_count_attribute_slots(type, false);
+}
+
+static int
+uniforms_type_size(const struct glsl_type *type)
+{
+ return st_glsl_storage_type_size(type, false);
+}
+
static void
resize_qreg_array(struct vc4_compile *c,
struct qreg **regs,
/* Clamp to [0, array size). Note that MIN/MAX are signed. */
indirect_offset = qir_MAX(c, indirect_offset, qir_uniform_ui(c, 0));
- indirect_offset = qir_MIN(c, indirect_offset,
- qir_uniform_ui(c, (range->dst_offset +
- range->size - 4)));
+ indirect_offset = qir_MIN_NOIMM(c, indirect_offset,
+ qir_uniform_ui(c, (range->dst_offset +
+ range->size - 4)));
+
+ qir_ADD_dest(c, qir_reg(QFILE_TEX_S_DIRECT, 0),
+ indirect_offset,
+ qir_uniform(c, QUNIFORM_UBO_ADDR, 0));
+
+ c->num_texture_samples++;
+
+ ntq_emit_thrsw(c);
+
+ return qir_TEX_RESULT(c);
+}
+
+static struct qreg
+vc4_ubo_load(struct vc4_compile *c, nir_intrinsic_instr *intr)
+{
+ nir_const_value *buffer_index =
+ nir_src_as_const_value(intr->src[0]);
+ assert(buffer_index->u32[0] == 1);
+ assert(c->stage == QSTAGE_FRAG);
+
+ struct qreg offset = ntq_get_src(c, intr->src[1], 0);
+
+ /* Clamp to [0, array size). Note that MIN/MAX are signed. */
+ offset = qir_MAX(c, offset, qir_uniform_ui(c, 0));
+ offset = qir_MIN_NOIMM(c, offset,
+ qir_uniform_ui(c, c->fs_key->ubo_1_size - 4));
+
+ qir_ADD_dest(c, qir_reg(QFILE_TEX_S_DIRECT, 0),
+ offset,
+ qir_uniform(c, QUNIFORM_UBO_ADDR, buffer_index->u32[0]));
- qir_TEX_DIRECT(c, indirect_offset, qir_uniform(c, QUNIFORM_UBO_ADDR, 0));
c->num_texture_samples++;
ntq_emit_thrsw(c);
ntq_get_alu_src(struct vc4_compile *c, nir_alu_instr *instr,
unsigned src)
{
- assert(util_is_power_of_two(instr->dest.write_mask));
+ assert(util_is_power_of_two_or_zero(instr->dest.write_mask));
unsigned chan = ffs(instr->dest.write_mask) - 1;
struct qreg r = ntq_get_src(c, instr->src[src].src,
instr->src[src].swizzle[chan]);
/* Perform the clamping required by kernel validation. */
addr = qir_MAX(c, addr, qir_uniform_ui(c, 0));
- addr = qir_MIN(c, addr, qir_uniform_ui(c, size - 4));
+ addr = qir_MIN_NOIMM(c, addr, qir_uniform_ui(c, size - 4));
- qir_TEX_DIRECT(c, addr, qir_uniform(c, QUNIFORM_TEXTURE_MSAA_ADDR, unit));
+ qir_ADD_dest(c, qir_reg(QFILE_TEX_S_DIRECT, 0),
+ addr, qir_uniform(c, QUNIFORM_TEXTURE_MSAA_ADDR, unit));
ntq_emit_thrsw(c);
lod = ntq_get_src(c, instr->src[i].src, 0);
is_txl = true;
break;
- case nir_tex_src_comparitor:
+ case nir_tex_src_comparator:
compare = ntq_get_src(c, instr->src[i].src, 0);
break;
default:
unit | (is_txl << 16));
}
+ struct qinst *tmu;
if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
- qir_TEX_R(c, r, texture_u[next_texture_u++]);
+ tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_R, 0), r);
+ tmu->src[qir_get_tex_uniform_src(tmu)] =
+ texture_u[next_texture_u++];
} else if (c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP_TO_BORDER ||
c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP ||
c->key->tex[unit].wrap_t == PIPE_TEX_WRAP_CLAMP_TO_BORDER ||
c->key->tex[unit].wrap_t == PIPE_TEX_WRAP_CLAMP) {
- qir_TEX_R(c, qir_uniform(c, QUNIFORM_TEXTURE_BORDER_COLOR, unit),
- texture_u[next_texture_u++]);
+ tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_R, 0),
+ qir_uniform(c, QUNIFORM_TEXTURE_BORDER_COLOR,
+ unit));
+ tmu->src[qir_get_tex_uniform_src(tmu)] =
+ texture_u[next_texture_u++];
}
if (c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP) {
t = qir_SAT(c, t);
}
- qir_TEX_T(c, t, texture_u[next_texture_u++]);
+ tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_T, 0), t);
+ tmu->src[qir_get_tex_uniform_src(tmu)] =
+ texture_u[next_texture_u++];
- if (is_txl || is_txb)
- qir_TEX_B(c, lod, texture_u[next_texture_u++]);
+ if (is_txl || is_txb) {
+ tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_B, 0), lod);
+ tmu->src[qir_get_tex_uniform_src(tmu)] =
+ texture_u[next_texture_u++];
+ }
- qir_TEX_S(c, s, texture_u[next_texture_u++]);
+ tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_S, 0), s);
+ tmu->src[qir_get_tex_uniform_src(tmu)] = texture_u[next_texture_u++];
c->num_texture_samples++;
struct qreg trunc = qir_ITOF(c, qir_FTOI(c, src));
struct qreg diff = qir_FSUB(c, src, trunc);
qir_SF(c, diff);
- return qir_MOV(c, qir_SEL(c, QPU_COND_NS,
- qir_FADD(c, diff, qir_uniform_f(c, 1.0)),
- diff));
+
+ qir_FADD_dest(c, diff,
+ diff, qir_uniform_f(c, 1.0))->cond = QPU_COND_NS;
+
+ return qir_MOV(c, diff);
}
/**
static struct qreg
ntq_ffloor(struct vc4_compile *c, struct qreg src)
{
- struct qreg trunc = qir_ITOF(c, qir_FTOI(c, src));
+ struct qreg result = qir_ITOF(c, qir_FTOI(c, src));
/* This will be < 0 if we truncated and the truncation was of a value
* that was < 0 in the first place.
*/
- qir_SF(c, qir_FSUB(c, src, trunc));
+ qir_SF(c, qir_FSUB(c, src, result));
+
+ struct qinst *sub = qir_FSUB_dest(c, result,
+ result, qir_uniform_f(c, 1.0));
+ sub->cond = QPU_COND_NS;
- return qir_MOV(c, qir_SEL(c, QPU_COND_NS,
- qir_FSUB(c, trunc, qir_uniform_f(c, 1.0)),
- trunc));
+ return qir_MOV(c, result);
}
/**
static struct qreg
ntq_fceil(struct vc4_compile *c, struct qreg src)
{
- struct qreg trunc = qir_ITOF(c, qir_FTOI(c, src));
+ struct qreg result = qir_ITOF(c, qir_FTOI(c, src));
/* This will be < 0 if we truncated and the truncation was of a value
* that was > 0 in the first place.
*/
- qir_SF(c, qir_FSUB(c, trunc, src));
+ qir_SF(c, qir_FSUB(c, result, src));
+
+ qir_FADD_dest(c, result,
+ result, qir_uniform_f(c, 1.0))->cond = QPU_COND_NS;
- return qir_MOV(c, qir_SEL(c, QPU_COND_NS,
- qir_FADD(c, trunc, qir_uniform_f(c, 1.0)),
- trunc));
+ return qir_MOV(c, result);
+}
+
+static struct qreg
+ntq_shrink_sincos_input_range(struct vc4_compile *c, struct qreg x)
+{
+ /* Since we're using a Taylor approximation, we want to have a small
+ * number of coefficients and take advantage of sin/cos repeating
+ * every 2pi. We keep our x as close to 0 as we can, since the series
+ * will be less accurate as |x| increases. (Also, be careful of
+ * shifting the input x value to be tricky with sin/cos relations,
+ * because getting accurate values for x==0 is very important for SDL
+ * rendering)
+ */
+ struct qreg scaled_x =
+ qir_FMUL(c, x,
+ qir_uniform_f(c, 1.0f / (M_PI * 2.0f)));
+ /* Note: FTOI truncates toward 0. */
+ struct qreg x_frac = qir_FSUB(c, scaled_x,
+ qir_ITOF(c, qir_FTOI(c, scaled_x)));
+ /* Map [0.5, 1] to [-0.5, 0] */
+ qir_SF(c, qir_FSUB(c, x_frac, qir_uniform_f(c, 0.5)));
+ qir_FSUB_dest(c, x_frac, x_frac, qir_uniform_f(c, 1.0))->cond = QPU_COND_NC;
+ /* Map [-1, -0.5] to [0, 0.5] */
+ qir_SF(c, qir_FADD(c, x_frac, qir_uniform_f(c, 0.5)));
+ qir_FADD_dest(c, x_frac, x_frac, qir_uniform_f(c, 1.0))->cond = QPU_COND_NS;
+
+ return x_frac;
}
static struct qreg
ntq_fsin(struct vc4_compile *c, struct qreg src)
{
float coeff[] = {
- -2.0 * M_PI,
- pow(2.0 * M_PI, 3) / (3 * 2 * 1),
- -pow(2.0 * M_PI, 5) / (5 * 4 * 3 * 2 * 1),
- pow(2.0 * M_PI, 7) / (7 * 6 * 5 * 4 * 3 * 2 * 1),
- -pow(2.0 * M_PI, 9) / (9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
+ 2.0 * M_PI,
+ -pow(2.0 * M_PI, 3) / (3 * 2 * 1),
+ pow(2.0 * M_PI, 5) / (5 * 4 * 3 * 2 * 1),
+ -pow(2.0 * M_PI, 7) / (7 * 6 * 5 * 4 * 3 * 2 * 1),
+ pow(2.0 * M_PI, 9) / (9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
};
- struct qreg scaled_x =
- qir_FMUL(c,
- src,
- qir_uniform_f(c, 1.0 / (M_PI * 2.0)));
-
- struct qreg x = qir_FADD(c,
- ntq_ffract(c, scaled_x),
- qir_uniform_f(c, -0.5));
+ struct qreg x = ntq_shrink_sincos_input_range(c, src);
struct qreg x2 = qir_FMUL(c, x, x);
struct qreg sum = qir_FMUL(c, x, qir_uniform_f(c, coeff[0]));
for (int i = 1; i < ARRAY_SIZE(coeff); i++) {
ntq_fcos(struct vc4_compile *c, struct qreg src)
{
float coeff[] = {
- -1.0f,
- pow(2.0 * M_PI, 2) / (2 * 1),
- -pow(2.0 * M_PI, 4) / (4 * 3 * 2 * 1),
- pow(2.0 * M_PI, 6) / (6 * 5 * 4 * 3 * 2 * 1),
- -pow(2.0 * M_PI, 8) / (8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
- pow(2.0 * M_PI, 10) / (10 * 9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
+ 1.0f,
+ -pow(2.0 * M_PI, 2) / (2 * 1),
+ pow(2.0 * M_PI, 4) / (4 * 3 * 2 * 1),
+ -pow(2.0 * M_PI, 6) / (6 * 5 * 4 * 3 * 2 * 1),
+ pow(2.0 * M_PI, 8) / (8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
+ -pow(2.0 * M_PI, 10) / (10 * 9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
};
- struct qreg scaled_x =
- qir_FMUL(c, src,
- qir_uniform_f(c, 1.0f / (M_PI * 2.0f)));
- struct qreg x_frac = qir_FADD(c,
- ntq_ffract(c, scaled_x),
- qir_uniform_f(c, -0.5));
-
+ struct qreg x_frac = ntq_shrink_sincos_input_range(c, src);
struct qreg sum = qir_uniform_f(c, coeff[0]);
struct qreg x2 = qir_FMUL(c, x_frac, x_frac);
struct qreg x = x2; /* Current x^2, x^4, or x^6 */
if (i != 1)
x = qir_FMUL(c, x, x2);
- struct qreg mul = qir_FMUL(c,
+ sum = qir_FADD(c, qir_FMUL(c,
x,
- qir_uniform_f(c, coeff[i]));
- if (i == 0)
- sum = mul;
- else
- sum = qir_FADD(c, sum, mul);
+ qir_uniform_f(c, coeff[i])),
+ sum);
}
return sum;
}
enum qpu_cond cond;
switch (compare_instr->op) {
- case nir_op_feq:
- case nir_op_ieq:
+ case nir_op_feq32:
+ case nir_op_ieq32:
case nir_op_seq:
cond = QPU_COND_ZS;
break;
- case nir_op_fne:
- case nir_op_ine:
+ case nir_op_fne32:
+ case nir_op_ine32:
case nir_op_sne:
cond = QPU_COND_ZC;
break;
- case nir_op_fge:
- case nir_op_ige:
- case nir_op_uge:
+ case nir_op_fge32:
+ case nir_op_ige32:
+ case nir_op_uge32:
case nir_op_sge:
cond = QPU_COND_NC;
break;
- case nir_op_flt:
- case nir_op_ilt:
+ case nir_op_flt32:
+ case nir_op_ilt32:
case nir_op_slt:
cond = QPU_COND_NS;
break;
qir_uniform_f(c, 1.0), qir_uniform_f(c, 0.0));
break;
- case nir_op_bcsel:
+ case nir_op_b32csel:
*dest = qir_SEL(c, cond,
ntq_get_alu_src(c, sel_instr, 1),
ntq_get_alu_src(c, sel_instr, 2));
result = qir_FMAX(c, src[0], src[1]);
break;
- case nir_op_f2i:
- case nir_op_f2u:
+ case nir_op_f2i32:
+ case nir_op_f2u32:
result = qir_FTOI(c, src[0]);
break;
- case nir_op_i2f:
- case nir_op_u2f:
+ case nir_op_i2f32:
+ case nir_op_u2f32:
result = qir_ITOF(c, src[0]);
break;
- case nir_op_b2f:
+ case nir_op_b2f32:
result = qir_AND(c, src[0], qir_uniform_f(c, 1.0));
break;
- case nir_op_b2i:
+ case nir_op_b2i32:
result = qir_AND(c, src[0], qir_uniform_ui(c, 1));
break;
- case nir_op_i2b:
- case nir_op_f2b:
+ case nir_op_i2b32:
+ case nir_op_f2b32:
qir_SF(c, src[0]);
result = qir_MOV(c, qir_SEL(c, QPU_COND_ZC,
qir_uniform_ui(c, ~0),
case nir_op_sne:
case nir_op_sge:
case nir_op_slt:
- case nir_op_feq:
- case nir_op_fne:
- case nir_op_fge:
- case nir_op_flt:
- case nir_op_ieq:
- case nir_op_ine:
- case nir_op_ige:
- case nir_op_uge:
- case nir_op_ilt:
+ case nir_op_feq32:
+ case nir_op_fne32:
+ case nir_op_fge32:
+ case nir_op_flt32:
+ case nir_op_ieq32:
+ case nir_op_ine32:
+ case nir_op_ige32:
+ case nir_op_uge32:
+ case nir_op_ilt32:
if (!ntq_emit_comparison(c, &result, instr, instr)) {
fprintf(stderr, "Bad comparison instruction\n");
}
break;
- case nir_op_bcsel:
+ case nir_op_b32csel:
result = ntq_emit_bcsel(c, instr, src);
break;
case nir_op_fcsel:
/* We have a scalar result, so the instruction should only have a
* single channel written to.
*/
- assert(util_is_power_of_two(instr->dest.write_mask));
+ assert(util_is_power_of_two_or_zero(instr->dest.write_mask));
ntq_store_dest(c, &instr->dest.dest,
ffs(instr->dest.write_mask) - 1, result);
}
}
uint32_t discard_cond = QPU_COND_ALWAYS;
- if (c->s->info->fs.uses_discard) {
+ if (c->s->info.fs.uses_discard) {
qir_SF(c, c->discard);
discard_cond = QPU_COND_ZS;
}
static void
emit_coord_end(struct vc4_compile *c)
{
- struct qreg rcp_w = qir_RCP(c, c->outputs[c->output_position_index + 3]);
+ struct qreg rcp_w = ntq_rcp(c, c->outputs[c->output_position_index + 3]);
emit_stub_vpm_read(c);
NIR_PASS(progress, s, nir_opt_dce);
NIR_PASS(progress, s, nir_opt_dead_cf);
NIR_PASS(progress, s, nir_opt_cse);
- NIR_PASS(progress, s, nir_opt_peephole_select, 8);
+ NIR_PASS(progress, s, nir_opt_peephole_select, 8, true, true);
NIR_PASS(progress, s, nir_opt_algebraic);
NIR_PASS(progress, s, nir_opt_constant_folding);
NIR_PASS(progress, s, nir_opt_undef);
+ NIR_PASS(progress, s, nir_opt_loop_unroll,
+ nir_var_shader_in |
+ nir_var_shader_out |
+ nir_var_function_temp);
} while (progress);
}
ntq_setup_uniforms(struct vc4_compile *c)
{
nir_foreach_variable(var, &c->s->uniforms) {
- uint32_t vec4_count = st_glsl_type_size(var->type);
+ uint32_t vec4_count = uniforms_type_size(var->type);
unsigned vec4_size = 4 * sizeof(float);
declare_uniform_range(c, var->data.driver_location * vec4_size,
qregs[i] = qir_uniform_ui(c, 0);
}
+static void
+ntq_emit_color_read(struct vc4_compile *c, nir_intrinsic_instr *instr)
+{
+ assert(nir_src_as_const_value(instr->src[0])->u32[0] == 0);
+
+ /* Reads of the per-sample color need to be done in
+ * order.
+ */
+ int sample_index = (nir_intrinsic_base(instr) -
+ VC4_NIR_TLB_COLOR_READ_INPUT);
+ for (int i = 0; i <= sample_index; i++) {
+ if (c->color_reads[i].file == QFILE_NULL) {
+ c->color_reads[i] =
+ qir_TLB_COLOR_READ(c);
+ }
+ }
+ ntq_store_dest(c, &instr->dest, 0,
+ qir_MOV(c, c->color_reads[sample_index]));
+}
+
+static void
+ntq_emit_load_input(struct vc4_compile *c, nir_intrinsic_instr *instr)
+{
+ assert(instr->num_components == 1);
+
+ nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
+ assert(const_offset && "vc4 doesn't support indirect inputs");
+
+ if (c->stage == QSTAGE_FRAG &&
+ nir_intrinsic_base(instr) >= VC4_NIR_TLB_COLOR_READ_INPUT) {
+ ntq_emit_color_read(c, instr);
+ return;
+ }
+
+ uint32_t offset = nir_intrinsic_base(instr) + const_offset->u32[0];
+ int comp = nir_intrinsic_component(instr);
+ ntq_store_dest(c, &instr->dest, 0,
+ qir_MOV(c, c->inputs[offset * 4 + comp]));
+}
+
static void
ntq_emit_intrinsic(struct vc4_compile *c, nir_intrinsic_instr *instr)
{
}
break;
+ case nir_intrinsic_load_ubo:
+ assert(instr->num_components == 1);
+ ntq_store_dest(c, &instr->dest, 0, vc4_ubo_load(c, instr));
+ break;
+
case nir_intrinsic_load_user_clip_plane:
for (int i = 0; i < instr->num_components; i++) {
ntq_store_dest(c, &instr->dest, i,
break;
case nir_intrinsic_load_input:
- assert(instr->num_components == 1);
- const_offset = nir_src_as_const_value(instr->src[0]);
- assert(const_offset && "vc4 doesn't support indirect inputs");
- if (c->stage == QSTAGE_FRAG &&
- nir_intrinsic_base(instr) >= VC4_NIR_TLB_COLOR_READ_INPUT) {
- assert(const_offset->u32[0] == 0);
- /* Reads of the per-sample color need to be done in
- * order.
- */
- int sample_index = (nir_intrinsic_base(instr) -
- VC4_NIR_TLB_COLOR_READ_INPUT);
- for (int i = 0; i <= sample_index; i++) {
- if (c->color_reads[i].file == QFILE_NULL) {
- c->color_reads[i] =
- qir_TLB_COLOR_READ(c);
- }
- }
- ntq_store_dest(c, &instr->dest, 0,
- qir_MOV(c, c->color_reads[sample_index]));
- } else {
- offset = nir_intrinsic_base(instr) + const_offset->u32[0];
- int comp = nir_intrinsic_component(instr);
- ntq_store_dest(c, &instr->dest, 0,
- qir_MOV(c, c->inputs[offset * 4 + comp]));
- }
+ ntq_emit_load_input(c, instr);
break;
case nir_intrinsic_store_output:
qir_link_blocks(c->cur_block, after_block);
qir_set_emit_block(c, after_block);
- if (was_top_level)
+ if (was_top_level) {
c->execute = c->undef;
- else
+ c->last_top_block = c->cur_block;
+ } else {
ntq_activate_execute_for_block(c);
-
+ }
}
static void
ntq_emit_jump(struct vc4_compile *c, nir_jump_instr *jump)
{
+ struct qblock *jump_block;
switch (jump->type) {
case nir_jump_break:
- qir_SF(c, c->execute);
- qir_MOV_cond(c, QPU_COND_ZS, c->execute,
- qir_uniform_ui(c, c->loop_break_block->index));
+ jump_block = c->loop_break_block;
break;
-
case nir_jump_continue:
- qir_SF(c, c->execute);
- qir_MOV_cond(c, QPU_COND_ZS, c->execute,
- qir_uniform_ui(c, c->loop_cont_block->index));
+ jump_block = c->loop_cont_block;
break;
-
- case nir_jump_return:
- unreachable("All returns shouold be lowered\n");
+ default:
+ unreachable("Unsupported jump type\n");
}
+
+ qir_SF(c, c->execute);
+ qir_MOV_cond(c, QPU_COND_ZS, c->execute,
+ qir_uniform_ui(c, jump_block->index));
+
+ /* Jump to the destination block if everyone has taken the jump. */
+ qir_SF(c, qir_SUB(c, c->execute, qir_uniform_ui(c, jump_block->index)));
+ qir_BRANCH(c, QPU_COND_BRANCH_ALL_ZS);
+ struct qblock *new_block = qir_new_block(c);
+ qir_link_blocks(c->cur_block, jump_block);
+ qir_link_blocks(c->cur_block, new_block);
+ qir_set_emit_block(c, new_block);
}
static void
qir_link_blocks(c->cur_block, c->loop_break_block);
qir_set_emit_block(c, c->loop_break_block);
- if (was_top_level)
+ if (was_top_level) {
c->execute = c->undef;
- else
+ c->last_top_block = c->cur_block;
+ } else {
ntq_activate_execute_for_block(c);
+ }
c->loop_break_block = save_loop_break_block;
c->loop_cont_block = save_loop_cont_block;
static void
nir_to_qir(struct vc4_compile *c)
{
- if (c->stage == QSTAGE_FRAG && c->s->info->fs.uses_discard)
+ if (c->stage == QSTAGE_FRAG && c->s->info.fs.uses_discard)
c->discard = qir_MOV(c, qir_uniform_ui(c, 0));
ntq_setup_inputs(c);
}
static const nir_shader_compiler_options nir_options = {
+ .lower_all_io_to_temps = true,
.lower_extract_byte = true,
.lower_extract_word = true,
+ .lower_fdiv = true,
.lower_ffma = true,
.lower_flrp32 = true,
.lower_fpow = true,
.lower_fsat = true,
.lower_fsqrt = true,
+ .lower_ldexp = true,
.lower_negate = true,
.native_integers = true,
+ .max_unroll_iterations = 32,
};
const void *
vc4_screen_get_compiler_options(struct pipe_screen *pscreen,
- enum pipe_shader_ir ir, unsigned shader)
+ enum pipe_shader_ir ir,
+ enum pipe_shader_type shader)
{
return &nir_options;
}
c->s = nir_shader_clone(c, key->shader_state->base.ir.nir);
- if (stage == QSTAGE_FRAG)
+ if (stage == QSTAGE_FRAG) {
+ if (c->fs_key->alpha_test_func != COMPARE_FUNC_ALWAYS) {
+ NIR_PASS_V(c->s, nir_lower_alpha_test,
+ c->fs_key->alpha_test_func,
+ c->fs_key->sample_alpha_to_one &&
+ c->fs_key->msaa);
+ }
NIR_PASS_V(c->s, vc4_nir_lower_blend, c);
+ }
struct nir_lower_tex_options tex_options = {
/* We would need to implement txs, but we don't want the
if (stage == QSTAGE_FRAG) {
NIR_PASS_V(c->s, nir_lower_clip_fs, c->key->ucp_enables);
} else {
- NIR_PASS_V(c->s, nir_lower_clip_vs, c->key->ucp_enables);
+ NIR_PASS_V(c->s, nir_lower_clip_vs,
+ c->key->ucp_enables, false);
NIR_PASS_V(c->s, nir_lower_io_to_scalar,
nir_var_shader_out);
}
vc4_optimize_nir(c->s);
+ NIR_PASS_V(c->s, nir_lower_bool_to_int32);
+
NIR_PASS_V(c->s, nir_convert_from_ssa, true);
if (vc4_debug & VC4_DEBUG_SHADERDB) {
* creation.
*/
s = cso->ir.nir;
- } else {
+
+ NIR_PASS_V(s, nir_lower_io, nir_var_uniform,
+ uniforms_type_size,
+ (nir_lower_io_options)0);
+ } else {
assert(cso->type == PIPE_SHADER_IR_TGSI);
if (vc4_debug & VC4_DEBUG_TGSI) {
tgsi_dump(cso->tokens, 0);
fprintf(stderr, "\n");
}
- s = tgsi_to_nir(cso->tokens, &nir_options);
+ s = tgsi_to_nir(cso->tokens, pctx->screen);
}
+ NIR_PASS_V(s, nir_lower_io, nir_var_all & ~nir_var_uniform,
+ type_size,
+ (nir_lower_io_options)0);
+
NIR_PASS_V(s, nir_opt_global_to_local);
- NIR_PASS_V(s, nir_convert_to_ssa);
+ NIR_PASS_V(s, nir_lower_regs_to_ssa);
NIR_PASS_V(s, nir_normalize_cubemap_coords);
NIR_PASS_V(s, nir_lower_load_const_to_scalar);
vc4_optimize_nir(s);
- NIR_PASS_V(s, nir_remove_dead_variables, nir_var_local);
+ NIR_PASS_V(s, nir_remove_dead_variables, nir_var_function_temp);
/* Garbage collect dead instructions */
nir_sweep(s);
if (vc4_debug & VC4_DEBUG_NIR) {
fprintf(stderr, "%s prog %d NIR:\n",
- gl_shader_stage_name(s->stage),
+ gl_shader_stage_name(s->info.stage),
so->program_id);
nir_print_shader(s, stderr);
fprintf(stderr, "\n");
/* Note: the temporary clone in c->s has been freed. */
nir_shader *orig_shader = key->shader_state->base.ir.nir;
- if (orig_shader->info->outputs_written & (1 << FRAG_RESULT_DEPTH))
+ if (orig_shader->info.outputs_written & (1 << FRAG_RESULT_DEPTH))
shader->disable_early_z = true;
} else {
shader->num_inputs = c->num_inputs;
}
}
+ if ((vc4_debug & VC4_DEBUG_SHADERDB) && stage == QSTAGE_FRAG) {
+ fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d FS threads\n",
+ qir_get_stage_name(c->stage),
+ c->program_id, c->variant_id,
+ 1 + shader->fs_threaded);
+ }
+
qir_compile_destroy(c);
struct vc4_key *dup_key;
VC4_DIRTY_RASTERIZER |
VC4_DIRTY_SAMPLE_MASK |
VC4_DIRTY_FRAGTEX |
- VC4_DIRTY_UNCOMPILED_FS))) {
+ VC4_DIRTY_UNCOMPILED_FS |
+ VC4_DIRTY_UBO_1_SIZE))) {
return;
}
}
if (job->msaa) {
key->msaa = vc4->rasterizer->base.multisample;
- key->sample_coverage = (vc4->rasterizer->base.multisample &&
- vc4->sample_mask != (1 << VC4_MAX_SAMPLES) - 1);
+ key->sample_coverage = (vc4->sample_mask != (1 << VC4_MAX_SAMPLES) - 1);
key->sample_alpha_to_coverage = vc4->blend->alpha_to_coverage;
key->sample_alpha_to_one = vc4->blend->alpha_to_one;
}
key->stencil_full_writemasks = vc4->zsa->stencil_uniforms[2] != 0;
key->depth_enabled = (vc4->zsa->base.depth.enabled ||
key->stencil_enabled);
- if (vc4->zsa->base.alpha.enabled) {
- key->alpha_test = true;
+ if (vc4->zsa->base.alpha.enabled)
key->alpha_test_func = vc4->zsa->base.alpha.func;
- }
+ else
+ key->alpha_test_func = COMPARE_FUNC_ALWAYS;
if (key->is_points) {
key->point_sprite_mask =
PIPE_SPRITE_COORD_UPPER_LEFT);
}
+ key->ubo_1_size = vc4->constbuf[PIPE_SHADER_FRAGMENT].cb[1].buffer_size;
key->light_twoside = vc4->rasterizer->base.light_twoside;
struct vc4_compiled_shader *old_fs = vc4->prog.fs;
vc4->dirty |= VC4_DIRTY_COMPILED_FS;
if (vc4->rasterizer->base.flatshade &&
- old_fs && vc4->prog.fs->color_inputs != old_fs->color_inputs) {
+ (!old_fs || vc4->prog.fs->color_inputs != old_fs->color_inputs)) {
vc4->dirty |= VC4_DIRTY_FLAT_SHADE_FLAGS;
}
- if (old_fs && vc4->prog.fs->fs_inputs != old_fs->fs_inputs)
+ if (!old_fs || vc4->prog.fs->fs_inputs != old_fs->fs_inputs)
vc4->dirty |= VC4_DIRTY_FS_INPUTS;
}
static void
delete_from_cache_if_matches(struct hash_table *ht,
+ struct vc4_compiled_shader **last_compile,
struct hash_entry *entry,
struct vc4_uncompiled_shader *so)
{
struct vc4_compiled_shader *shader = entry->data;
_mesa_hash_table_remove(ht, entry);
vc4_bo_unreference(&shader->bo);
+
+ if (shader == *last_compile)
+ *last_compile = NULL;
+
ralloc_free(shader);
}
}
struct vc4_context *vc4 = vc4_context(pctx);
struct vc4_uncompiled_shader *so = hwcso;
- struct hash_entry *entry;
- hash_table_foreach(vc4->fs_cache, entry)
- delete_from_cache_if_matches(vc4->fs_cache, entry, so);
- hash_table_foreach(vc4->vs_cache, entry)
- delete_from_cache_if_matches(vc4->vs_cache, entry, so);
+ hash_table_foreach(vc4->fs_cache, entry) {
+ delete_from_cache_if_matches(vc4->fs_cache, &vc4->prog.fs,
+ entry, so);
+ }
+ hash_table_foreach(vc4->vs_cache, entry) {
+ delete_from_cache_if_matches(vc4->vs_cache, &vc4->prog.vs,
+ entry, so);
+ }
ralloc_free(so->base.ir.nir);
free(so);
{
struct vc4_context *vc4 = vc4_context(pctx);
- struct hash_entry *entry;
hash_table_foreach(vc4->fs_cache, entry) {
struct vc4_compiled_shader *shader = entry->data;
vc4_bo_unreference(&shader->bo);