*/
#include "vc4_qir.h"
-#include "glsl/nir/nir_builder.h"
+#include "compiler/nir/nir_builder.h"
#include "util/u_format.h"
/**
&desc->channel[swiz];
nir_ssa_def *temp;
- if (swiz > UTIL_FORMAT_SWIZZLE_W) {
+ if (swiz > PIPE_SWIZZLE_W) {
return vc4_nir_get_swizzled_channel(b, vpm_reads, swiz);
} else if (chan->size == 32 && chan->type == UTIL_FORMAT_TYPE_FLOAT) {
return vc4_nir_get_swizzled_channel(b, vpm_reads, swiz);
/* All TGSI-to-NIR inputs are vec4. */
assert(intr->num_components == 4);
+ /* We only accept direct outputs and TGSI only ever gives them to us
+ * with an offset value of 0.
+ */
+ assert(nir_src_as_const_value(intr->src[0]) &&
+ nir_src_as_const_value(intr->src[0])->u32[0] == 0);
+
/* Generate dword loads for the VPM values (Since these intrinsics may
* be reordered, the actual reads will be generated at the top of the
* shader by ntq_setup_inputs().
nir_intrinsic_load_input);
intr_comp->num_components = 1;
intr_comp->const_index[0] = intr->const_index[0] * 4 + i;
- nir_ssa_dest_init(&intr_comp->instr, &intr_comp->dest, 1, NULL);
+ intr_comp->src[0] = nir_src_for_ssa(nir_imm_int(b, 0));
+ nir_ssa_dest_init(&intr_comp->instr, &intr_comp->dest, 1, 32, NULL);
nir_builder_instr_insert(b, &intr_comp->instr);
vpm_reads[i] = &intr_comp->dest.ssa;
/* All TGSI-to-NIR inputs are vec4. */
assert(intr->num_components == 4);
+ /* We only accept direct inputs and TGSI only ever gives them to us
+ * with an offset value of 0.
+ */
+ assert(nir_src_as_const_value(intr->src[0]) &&
+ nir_src_as_const_value(intr->src[0])->u32[0] == 0);
+
/* Generate scalar loads equivalent to the original VEC4. */
nir_ssa_def *dests[4];
for (unsigned i = 0; i < intr->num_components; i++) {
nir_intrinsic_instr_create(c->s, nir_intrinsic_load_input);
intr_comp->num_components = 1;
intr_comp->const_index[0] = intr->const_index[0] * 4 + i;
- nir_ssa_dest_init(&intr_comp->instr, &intr_comp->dest, 1, NULL);
+ intr_comp->src[0] = nir_src_for_ssa(nir_imm_int(b, 0));
+
+ nir_ssa_dest_init(&intr_comp->instr, &intr_comp->dest, 1, 32, NULL);
nir_builder_instr_insert(b, &intr_comp->instr);
dests[i] = &intr_comp->dest.ssa;
/* Color output is lowered by vc4_nir_lower_blend(). */
if (c->stage == QSTAGE_FRAG &&
(output_var->data.location == FRAG_RESULT_COLOR ||
- output_var->data.location == FRAG_RESULT_DATA0)) {
+ output_var->data.location == FRAG_RESULT_DATA0 ||
+ output_var->data.location == FRAG_RESULT_SAMPLE_MASK)) {
intr->const_index[0] *= 4;
return;
}
/* All TGSI-to-NIR outputs are VEC4. */
assert(intr->num_components == 4);
+ /* We only accept direct outputs and TGSI only ever gives them to us
+ * with an offset value of 0.
+ */
+ assert(nir_src_as_const_value(intr->src[1]) &&
+ nir_src_as_const_value(intr->src[1])->u32[0] == 0);
+
b->cursor = nir_before_instr(&intr->instr);
for (unsigned i = 0; i < intr->num_components; i++) {
assert(intr->src[0].is_ssa);
intr_comp->src[0] =
nir_src_for_ssa(nir_channel(b, intr->src[0].ssa, i));
+ intr_comp->src[1] = nir_src_for_ssa(nir_imm_int(b, 0));
nir_builder_instr_insert(b, &intr_comp->instr);
}
vc4_nir_lower_uniform(struct vc4_compile *c, nir_builder *b,
nir_intrinsic_instr *intr)
{
- /* All TGSI-to-NIR uniform loads are vec4, but we may create dword
- * loads in our lowering passes.
+ /* All TGSI-to-NIR uniform loads are vec4, but we need byte offsets
+ * in the backend.
*/
if (intr->num_components == 1)
return;
nir_intrinsic_instr *intr_comp =
nir_intrinsic_instr_create(c->s, intr->intrinsic);
intr_comp->num_components = 1;
- nir_ssa_dest_init(&intr_comp->instr, &intr_comp->dest, 1, NULL);
-
- if (intr->intrinsic == nir_intrinsic_load_uniform_indirect) {
- /* Convert the variable TGSI register index to a byte
- * offset.
- */
- intr_comp->src[0] =
- nir_src_for_ssa(nir_ishl(b,
- intr->src[0].ssa,
- nir_imm_int(b, 4)));
-
- /* Convert the offset to be a byte index, too. */
- intr_comp->const_index[0] = (intr->const_index[0] * 16 +
- i * 4);
- } else {
- /* We want a dword index for non-indirect uniform
- * loads.
- */
- intr_comp->const_index[0] = (intr->const_index[0] * 4 +
- i);
- }
+ nir_ssa_dest_init(&intr_comp->instr, &intr_comp->dest, 1, 32, NULL);
+
+ /* Convert the uniform offset to bytes. If it happens to be a
+ * constant, constant-folding will clean up the shift for us.
+ */
+ intr_comp->const_index[0] = (intr->const_index[0] * 16 + i * 4);
+
+ intr_comp->src[0] =
+ nir_src_for_ssa(nir_ishl(b, intr->src[0].ssa,
+ nir_imm_int(b, 4)));
dests[i] = &intr_comp->dest.ssa;
break;
case nir_intrinsic_load_uniform:
- case nir_intrinsic_load_uniform_indirect:
- case nir_intrinsic_load_user_clip_plane:
vc4_nir_lower_uniform(c, b, intr);
break;
+ case nir_intrinsic_load_user_clip_plane:
default:
break;
}
}
static bool
-vc4_nir_lower_io_block(nir_block *block, void *arg)
+vc4_nir_lower_io_impl(struct vc4_compile *c, nir_function_impl *impl)
{
- struct vc4_compile *c = arg;
- nir_function_impl *impl =
- nir_cf_node_get_function(&block->cf_node);
-
nir_builder b;
nir_builder_init(&b, impl);
- nir_foreach_instr_safe(block, instr)
- vc4_nir_lower_io_instr(c, &b, instr);
-
- return true;
-}
-
-static bool
-vc4_nir_lower_io_impl(struct vc4_compile *c, nir_function_impl *impl)
-{
- nir_foreach_block(impl, vc4_nir_lower_io_block, c);
+ nir_foreach_block(block, impl) {
+ nir_foreach_instr_safe(instr, block)
+ vc4_nir_lower_io_instr(c, &b, instr);
+ }
nir_metadata_preserve(impl, nir_metadata_block_index |
nir_metadata_dominance);
}
void
-vc4_nir_lower_io(struct vc4_compile *c)
+vc4_nir_lower_io(nir_shader *s, struct vc4_compile *c)
{
- nir_foreach_overload(c->s, overload) {
- if (overload->impl)
- vc4_nir_lower_io_impl(c, overload->impl);
+ nir_foreach_function(function, s) {
+ if (function->impl)
+ vc4_nir_lower_io_impl(c, function->impl);
}
}