#include "vc4_context.h"
#include "vc4_qpu.h"
#include "vc4_qir.h"
+#include "mesa/state_tracker/st_glsl_types.h"
static struct qreg
ntq_get_src(struct vc4_compile *c, nir_src src, int i);
return glsl_count_attribute_slots(type, false);
}
+static int
+uniforms_type_size(const struct glsl_type *type)
+{
+ return st_glsl_storage_type_size(type, false);
+}
+
static void
resize_qreg_array(struct vc4_compile *c,
struct qreg **regs,
return qir_TEX_RESULT(c);
}
+static struct qreg
+vc4_ubo_load(struct vc4_compile *c, nir_intrinsic_instr *intr)
+{
+ nir_const_value *buffer_index =
+ nir_src_as_const_value(intr->src[0]);
+ assert(buffer_index->u32[0] == 1);
+ assert(c->stage == QSTAGE_FRAG);
+
+ struct qreg offset = ntq_get_src(c, intr->src[1], 0);
+
+ /* Clamp to [0, array size). Note that MIN/MAX are signed. */
+ offset = qir_MAX(c, offset, qir_uniform_ui(c, 0));
+ offset = qir_MIN_NOIMM(c, offset,
+ qir_uniform_ui(c, c->fs_key->ubo_1_size - 4));
+
+ qir_ADD_dest(c, qir_reg(QFILE_TEX_S_DIRECT, 0),
+ offset,
+ qir_uniform(c, QUNIFORM_UBO_ADDR, buffer_index->u32[0]));
+
+ c->num_texture_samples++;
+
+ ntq_emit_thrsw(c);
+
+ return qir_TEX_RESULT(c);
+}
+
nir_ssa_def *
vc4_nir_get_swizzled_channel(nir_builder *b, nir_ssa_def **srcs, int swiz)
{
ntq_get_alu_src(struct vc4_compile *c, nir_alu_instr *instr,
unsigned src)
{
- assert(util_is_power_of_two(instr->dest.write_mask));
+ assert(util_is_power_of_two_or_zero(instr->dest.write_mask));
unsigned chan = ffs(instr->dest.write_mask) - 1;
struct qreg r = ntq_get_src(c, instr->src[src].src,
instr->src[src].swizzle[chan]);
return qir_MOV(c, result);
}
+static struct qreg
+ntq_shrink_sincos_input_range(struct vc4_compile *c, struct qreg x)
+{
+ /* Since we're using a Taylor approximation, we want to have a small
+ * number of coefficients and take advantage of sin/cos repeating
+ * every 2pi. We keep our x as close to 0 as we can, since the series
+ * will be less accurate as |x| increases. (Also, be careful of
+ * shifting the input x value to be tricky with sin/cos relations,
+ * because getting accurate values for x==0 is very important for SDL
+ * rendering)
+ */
+ struct qreg scaled_x =
+ qir_FMUL(c, x,
+ qir_uniform_f(c, 1.0f / (M_PI * 2.0f)));
+ /* Note: FTOI truncates toward 0. */
+ struct qreg x_frac = qir_FSUB(c, scaled_x,
+ qir_ITOF(c, qir_FTOI(c, scaled_x)));
+ /* Map [0.5, 1] to [-0.5, 0] */
+ qir_SF(c, qir_FSUB(c, x_frac, qir_uniform_f(c, 0.5)));
+ qir_FSUB_dest(c, x_frac, x_frac, qir_uniform_f(c, 1.0))->cond = QPU_COND_NC;
+ /* Map [-1, -0.5] to [0, 0.5] */
+ qir_SF(c, qir_FADD(c, x_frac, qir_uniform_f(c, 0.5)));
+ qir_FADD_dest(c, x_frac, x_frac, qir_uniform_f(c, 1.0))->cond = QPU_COND_NS;
+
+ return x_frac;
+}
+
static struct qreg
ntq_fsin(struct vc4_compile *c, struct qreg src)
{
float coeff[] = {
- -2.0 * M_PI,
- pow(2.0 * M_PI, 3) / (3 * 2 * 1),
- -pow(2.0 * M_PI, 5) / (5 * 4 * 3 * 2 * 1),
- pow(2.0 * M_PI, 7) / (7 * 6 * 5 * 4 * 3 * 2 * 1),
- -pow(2.0 * M_PI, 9) / (9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
+ 2.0 * M_PI,
+ -pow(2.0 * M_PI, 3) / (3 * 2 * 1),
+ pow(2.0 * M_PI, 5) / (5 * 4 * 3 * 2 * 1),
+ -pow(2.0 * M_PI, 7) / (7 * 6 * 5 * 4 * 3 * 2 * 1),
+ pow(2.0 * M_PI, 9) / (9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
};
- struct qreg scaled_x =
- qir_FMUL(c,
- src,
- qir_uniform_f(c, 1.0 / (M_PI * 2.0)));
-
- struct qreg x = qir_FADD(c,
- ntq_ffract(c, scaled_x),
- qir_uniform_f(c, -0.5));
+ struct qreg x = ntq_shrink_sincos_input_range(c, src);
struct qreg x2 = qir_FMUL(c, x, x);
struct qreg sum = qir_FMUL(c, x, qir_uniform_f(c, coeff[0]));
for (int i = 1; i < ARRAY_SIZE(coeff); i++) {
ntq_fcos(struct vc4_compile *c, struct qreg src)
{
float coeff[] = {
- -1.0f,
- pow(2.0 * M_PI, 2) / (2 * 1),
- -pow(2.0 * M_PI, 4) / (4 * 3 * 2 * 1),
- pow(2.0 * M_PI, 6) / (6 * 5 * 4 * 3 * 2 * 1),
- -pow(2.0 * M_PI, 8) / (8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
- pow(2.0 * M_PI, 10) / (10 * 9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
+ 1.0f,
+ -pow(2.0 * M_PI, 2) / (2 * 1),
+ pow(2.0 * M_PI, 4) / (4 * 3 * 2 * 1),
+ -pow(2.0 * M_PI, 6) / (6 * 5 * 4 * 3 * 2 * 1),
+ pow(2.0 * M_PI, 8) / (8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
+ -pow(2.0 * M_PI, 10) / (10 * 9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
};
- struct qreg scaled_x =
- qir_FMUL(c, src,
- qir_uniform_f(c, 1.0f / (M_PI * 2.0f)));
- struct qreg x_frac = qir_FADD(c,
- ntq_ffract(c, scaled_x),
- qir_uniform_f(c, -0.5));
-
+ struct qreg x_frac = ntq_shrink_sincos_input_range(c, src);
struct qreg sum = qir_uniform_f(c, coeff[0]);
struct qreg x2 = qir_FMUL(c, x_frac, x_frac);
struct qreg x = x2; /* Current x^2, x^4, or x^6 */
if (i != 1)
x = qir_FMUL(c, x, x2);
- struct qreg mul = qir_FMUL(c,
+ sum = qir_FADD(c, qir_FMUL(c,
x,
- qir_uniform_f(c, coeff[i]));
- if (i == 0)
- sum = mul;
- else
- sum = qir_FADD(c, sum, mul);
+ qir_uniform_f(c, coeff[i])),
+ sum);
}
return sum;
}
case nir_op_u2f32:
result = qir_ITOF(c, src[0]);
break;
- case nir_op_b2f:
+ case nir_op_b2f32:
result = qir_AND(c, src[0], qir_uniform_f(c, 1.0));
break;
- case nir_op_b2i:
+ case nir_op_b2i32:
result = qir_AND(c, src[0], qir_uniform_ui(c, 1));
break;
- case nir_op_i2b:
- case nir_op_f2b:
+ case nir_op_i2b32:
+ case nir_op_f2b32:
qir_SF(c, src[0]);
result = qir_MOV(c, qir_SEL(c, QPU_COND_ZC,
qir_uniform_ui(c, ~0),
/* We have a scalar result, so the instruction should only have a
* single channel written to.
*/
- assert(util_is_power_of_two(instr->dest.write_mask));
+ assert(util_is_power_of_two_or_zero(instr->dest.write_mask));
ntq_store_dest(c, &instr->dest.dest,
ffs(instr->dest.write_mask) - 1, result);
}
ntq_setup_uniforms(struct vc4_compile *c)
{
nir_foreach_variable(var, &c->s->uniforms) {
- uint32_t vec4_count = type_size(var->type);
+ uint32_t vec4_count = uniforms_type_size(var->type);
unsigned vec4_size = 4 * sizeof(float);
declare_uniform_range(c, var->data.driver_location * vec4_size,
}
break;
+ case nir_intrinsic_load_ubo:
+ assert(instr->num_components == 1);
+ ntq_store_dest(c, &instr->dest, 0, vc4_ubo_load(c, instr));
+ break;
+
case nir_intrinsic_load_user_clip_plane:
for (int i = 0; i < instr->num_components; i++) {
ntq_store_dest(c, &instr->dest, i,
.lower_all_io_to_temps = true,
.lower_extract_byte = true,
.lower_extract_word = true,
+ .lower_fdiv = true,
.lower_ffma = true,
.lower_flrp32 = true,
.lower_fpow = true,
if (stage == QSTAGE_FRAG) {
NIR_PASS_V(c->s, nir_lower_clip_fs, c->key->ucp_enables);
} else {
- NIR_PASS_V(c->s, nir_lower_clip_vs, c->key->ucp_enables);
+ NIR_PASS_V(c->s, nir_lower_clip_vs,
+ c->key->ucp_enables, false);
NIR_PASS_V(c->s, nir_lower_io_to_scalar,
nir_var_shader_out);
}
*/
s = cso->ir.nir;
- NIR_PASS_V(s, nir_lower_io, nir_var_all, type_size,
+ NIR_PASS_V(s, nir_lower_io, nir_var_uniform,
+ uniforms_type_size,
(nir_lower_io_options)0);
- } else {
+ } else {
assert(cso->type == PIPE_SHADER_IR_TGSI);
if (vc4_debug & VC4_DEBUG_TGSI) {
s = tgsi_to_nir(cso->tokens, &nir_options);
}
+ NIR_PASS_V(s, nir_lower_io, nir_var_all & ~nir_var_uniform,
+ type_size,
+ (nir_lower_io_options)0);
+
NIR_PASS_V(s, nir_opt_global_to_local);
NIR_PASS_V(s, nir_lower_regs_to_ssa);
NIR_PASS_V(s, nir_normalize_cubemap_coords);
VC4_DIRTY_RASTERIZER |
VC4_DIRTY_SAMPLE_MASK |
VC4_DIRTY_FRAGTEX |
- VC4_DIRTY_UNCOMPILED_FS))) {
+ VC4_DIRTY_UNCOMPILED_FS |
+ VC4_DIRTY_UBO_1_SIZE))) {
return;
}
PIPE_SPRITE_COORD_UPPER_LEFT);
}
+ key->ubo_1_size = vc4->constbuf[PIPE_SHADER_FRAGMENT].cb[1].buffer_size;
key->light_twoside = vc4->rasterizer->base.light_twoside;
struct vc4_compiled_shader *old_fs = vc4->prog.fs;
struct vc4_context *vc4 = vc4_context(pctx);
struct vc4_uncompiled_shader *so = hwcso;
- struct hash_entry *entry;
hash_table_foreach(vc4->fs_cache, entry) {
delete_from_cache_if_matches(vc4->fs_cache, &vc4->prog.fs,
entry, so);
{
struct vc4_context *vc4 = vc4_context(pctx);
- struct hash_entry *entry;
hash_table_foreach(vc4->fs_cache, entry) {
struct vc4_compiled_shader *shader = entry->data;
vc4_bo_unreference(&shader->bo);