#include "vc4_qir.h"
#include "compiler/nir/nir_builder.h"
-#include "util/u_format.h"
+#include "util/format/u_format.h"
+#include "util/u_helpers.h"
/**
- * Walks the NIR generated by TGSI-to-NIR to lower its io intrinsics into
- * something amenable to the VC4 architecture.
+ * Walks the NIR generated by TGSI-to-NIR or GLSL-to-NIR to lower its io
+ * intrinsics into something amenable to the VC4 architecture.
*
* Currently, it splits VS inputs and uniforms into scalars, drops any
* non-position outputs in coordinate shaders, and fixes up the addressing on
*/
static void
-replace_intrinsic_with_vec4(nir_builder *b, nir_intrinsic_instr *intr,
- nir_ssa_def **comps)
+replace_intrinsic_with_vec(nir_builder *b, nir_intrinsic_instr *intr,
+ nir_ssa_def **comps)
{
- /* Batch things back together into a vec4. This will get split by the
- * later ALU scalarization pass.
+ /* Batch things back together into a vector. This will get split by
+ * the later ALU scalarization pass.
*/
- nir_ssa_def *vec = nir_vec4(b, comps[0], comps[1], comps[2], comps[3]);
+ nir_ssa_def *vec = nir_vec(b, comps, intr->num_components);
/* Replace the old intrinsic with a reference to our reconstructed
- * vec4.
+ * vector.
*/
nir_ssa_def_rewrite_uses(&intr->dest.ssa, nir_src_for_ssa(vec));
nir_instr_remove(&intr->instr);
} else if (chan->size == 32 && chan->type == UTIL_FORMAT_TYPE_SIGNED) {
if (chan->normalized) {
return nir_fmul(b,
- nir_i2f(b, vpm_reads[swiz]),
+ nir_i2f32(b, vpm_reads[swiz]),
nir_imm_float(b,
1.0 / 0x7fffffff));
} else {
- return nir_i2f(b, vpm_reads[swiz]);
+ return nir_i2f32(b, vpm_reads[swiz]);
}
} else if (chan->size == 8 &&
(chan->type == UTIL_FORMAT_TYPE_UNSIGNED ||
nir_imm_float(b, 1.0));
} else {
return nir_fadd(b,
- nir_i2f(b,
- vc4_nir_unpack_8i(b, temp,
- swiz)),
+ nir_i2f32(b,
+ vc4_nir_unpack_8i(b, temp,
+ swiz)),
nir_imm_float(b, -128.0));
}
} else {
if (chan->normalized) {
return vc4_nir_unpack_8f(b, vpm, swiz);
} else {
- return nir_i2f(b, vc4_nir_unpack_8i(b, vpm, swiz));
+ return nir_i2f32(b, vc4_nir_unpack_8i(b, vpm, swiz));
}
}
} else if (chan->size == 16 &&
* UNPACK_16_I for all of these.
*/
if (chan->type == UTIL_FORMAT_TYPE_SIGNED) {
- temp = nir_i2f(b, vc4_nir_unpack_16i(b, vpm, swiz & 1));
+ temp = nir_i2f32(b, vc4_nir_unpack_16i(b, vpm, swiz & 1));
if (chan->normalized) {
return nir_fmul(b, temp,
nir_imm_float(b, 1/32768.0f));
return temp;
}
} else {
- temp = nir_i2f(b, vc4_nir_unpack_16u(b, vpm, swiz & 1));
+ temp = nir_i2f32(b, vc4_nir_unpack_16u(b, vpm, swiz & 1));
if (chan->normalized) {
return nir_fmul(b, temp,
nir_imm_float(b, 1 / 65535.0));
enum pipe_format format = c->vs_key->attr_formats[attr];
uint32_t attr_size = util_format_get_blocksize(format);
- /* All TGSI-to-NIR inputs are vec4. */
- assert(intr->num_components == 4);
-
/* We only accept direct outputs and TGSI only ever gives them to us
* with an offset value of 0.
*/
- assert(nir_src_as_const_value(intr->src[0]) &&
- nir_src_as_const_value(intr->src[0])->u32[0] == 0);
+ assert(nir_src_as_uint(intr->src[0]) == 0);
/* Generate dword loads for the VPM values (Since these intrinsics may
* be reordered, the actual reads will be generated at the top of the
util_format_description(format);
nir_ssa_def *dests[4];
- for (int i = 0; i < 4; i++) {
+ for (int i = 0; i < intr->num_components; i++) {
uint8_t swiz = desc->swizzle[i];
dests[i] = vc4_nir_get_vattr_channel_vpm(c, b, vpm_reads, swiz,
desc);
}
}
- replace_intrinsic_with_vec4(b, intr, dests);
-}
-
-static bool
-is_point_sprite(struct vc4_compile *c, nir_variable *var)
-{
- if (var->data.location < VARYING_SLOT_VAR0 ||
- var->data.location > VARYING_SLOT_VAR31)
- return false;
-
- return (c->fs_key->point_sprite_mask &
- (1 << (var->data.location - VARYING_SLOT_VAR0)));
+ replace_intrinsic_with_vec(b, intr, dests);
}
static void
return;
}
- nir_variable *input_var = NULL;
- nir_foreach_variable(var, &c->s->inputs) {
- if (var->data.driver_location == nir_intrinsic_base(intr)) {
- input_var = var;
- break;
- }
- }
+ nir_variable *input_var =
+ nir_find_variable_with_driver_location(c->s, nir_var_shader_in,
+ nir_intrinsic_base(intr));
assert(input_var);
int comp = nir_intrinsic_component(intr);
/* Lower away point coordinates, and fix up PNTC. */
- if (is_point_sprite(c, input_var) ||
- input_var->data.location == VARYING_SLOT_PNTC) {
+ if (util_varying_is_point_coord(input_var->data.location,
+ c->fs_key->point_sprite_mask)) {
assert(intr->num_components == 1);
nir_ssa_def *result = &intr->dest.ssa;
vc4_nir_lower_output(struct vc4_compile *c, nir_builder *b,
nir_intrinsic_instr *intr)
{
- nir_variable *output_var = NULL;
- nir_foreach_variable(var, &c->s->outputs) {
- if (var->data.driver_location == nir_intrinsic_base(intr)) {
- output_var = var;
- break;
- }
- }
+ nir_variable *output_var =
+ nir_find_variable_with_driver_location(c->s, nir_var_shader_out,
+ nir_intrinsic_base(intr));
assert(output_var);
if (c->stage == QSTAGE_COORD &&
vc4_nir_lower_uniform(struct vc4_compile *c, nir_builder *b,
nir_intrinsic_instr *intr)
{
- /* All TGSI-to-NIR uniform loads are vec4, but we need byte offsets
- * in the backend.
- */
- if (intr->num_components == 1)
- return;
- assert(intr->num_components == 4);
-
b->cursor = nir_before_instr(&intr->instr);
- /* Generate scalar loads equivalent to the original VEC4. */
+ /* Generate scalar loads equivalent to the original vector. */
nir_ssa_def *dests[4];
for (unsigned i = 0; i < intr->num_components; i++) {
nir_intrinsic_instr *intr_comp =
nir_intrinsic_instr_create(c->s, intr->intrinsic);
intr_comp->num_components = 1;
- nir_ssa_dest_init(&intr_comp->instr, &intr_comp->dest, 1, 32, NULL);
+ nir_ssa_dest_init(&intr_comp->instr, &intr_comp->dest, 1,
+ intr->dest.ssa.bit_size, NULL);
- /* Convert the uniform offset to bytes. If it happens to be a
- * constant, constant-folding will clean up the shift for us.
+ /* Convert the uniform offset to bytes. If it happens
+ * to be a constant, constant-folding will clean up
+ * the shift for us.
*/
nir_intrinsic_set_base(intr_comp,
- nir_intrinsic_base(intr) * 16 + i * 4);
+ nir_intrinsic_base(intr) * 16 +
+ i * 4);
+ nir_intrinsic_set_range(intr_comp,
+ nir_intrinsic_range(intr) * 16 - i * 4);
intr_comp->src[0] =
nir_src_for_ssa(nir_ishl(b, intr->src[0].ssa,
nir_builder_instr_insert(b, &intr_comp->instr);
}
- replace_intrinsic_with_vec4(b, intr, dests);
+ replace_intrinsic_with_vec(b, intr, dests);
}
static void