#include "tgsi/tgsi_info.h"
#include "tgsi/tgsi_lowering.h"
#include "tgsi/tgsi_parse.h"
+#include "glsl/nir/nir.h"
+#include "glsl/nir/nir_builder.h"
#include "nir/tgsi_to_nir.h"
-
#include "vc4_context.h"
#include "vc4_qpu.h"
#include "vc4_qir.h"
return qir_TEX_RESULT(c);
}
+nir_ssa_def *vc4_nir_get_state_uniform(struct nir_builder *b,
+ enum quniform_contents contents)
+{
+ nir_intrinsic_instr *intr =
+ nir_intrinsic_instr_create(b->shader,
+ nir_intrinsic_load_uniform);
+ intr->const_index[0] = VC4_NIR_STATE_UNIFORM_OFFSET + contents;
+ intr->num_components = 1;
+ nir_ssa_dest_init(&intr->instr, &intr->dest, 1, NULL);
+ nir_builder_instr_insert(b, &intr->instr);
+ return &intr->dest.ssa;
+}
+
static struct qreg *
ntq_init_ssa_def(struct vc4_compile *c, nir_ssa_def *def)
{
switch (instr->intrinsic) {
case nir_intrinsic_load_uniform:
assert(instr->num_components == 1);
- *dest = qir_uniform(c, QUNIFORM_UNIFORM, instr->const_index[0]);
+ if (instr->const_index[0] < VC4_NIR_STATE_UNIFORM_OFFSET) {
+ *dest = qir_uniform(c, QUNIFORM_UNIFORM,
+ instr->const_index[0]);
+ } else {
+ *dest = qir_uniform(c, instr->const_index[0] -
+ VC4_NIR_STATE_UNIFORM_OFFSET,
+ 0);
+ }
break;
case nir_intrinsic_load_uniform_indirect:
uint32_t variant_id;
};
+/* Special offset for nir_load_uniform values to get a QUNIFORM_*
+ * state-dependent value.
+ */
+#define VC4_NIR_STATE_UNIFORM_OFFSET 2000000000
+
struct vc4_compile *qir_compile_init(void);
void qir_compile_destroy(struct vc4_compile *c);
struct qinst *qir_inst(enum qop op, struct qreg dst,
bool qir_opt_small_immediates(struct vc4_compile *c);
bool qir_opt_vpm_writes(struct vc4_compile *c);
void vc4_nir_lower_io(struct vc4_compile *c);
+nir_ssa_def *vc4_nir_get_state_uniform(struct nir_builder *b,
+ enum quniform_contents contents);
void qir_lower_uniforms(struct vc4_compile *c);
void qpu_schedule_instructions(struct vc4_compile *c);