gl_shader_stage stage,
const nir_shader_compiler_options *options)
{
- build->shader = nir_shader_create(mem_ctx, stage, options);
+ build->shader = nir_shader_create(mem_ctx, stage, options, NULL);
nir_function *func = nir_function_create(build->shader, "main");
build->exact = false;
build->impl = nir_function_impl_create(func);
build->cursor = nir_after_instr(instr);
}
+static inline nir_instr *
+nir_builder_last_instr(nir_builder *build)
+{
+ assert(build->cursor.option == nir_cursor_after_instr);
+ return build->cursor.instr;
+}
+
static inline void
nir_builder_cf_insert(nir_builder *build, nir_cf_node *cf)
{
nir_cf_node_insert(build->cursor, cf);
}
+static inline bool
+nir_builder_is_inside_cf(nir_builder *build, nir_cf_node *cf_node)
+{
+ nir_block *block = nir_cursor_current_block(build->cursor);
+ for (nir_cf_node *n = &block->cf_node; n; n = n->parent) {
+ if (n == cf_node)
+ return true;
+ }
+ return false;
+}
+
+static inline nir_if *
+nir_push_if(nir_builder *build, nir_ssa_def *condition)
+{
+ nir_if *nif = nir_if_create(build->shader);
+ nif->condition = nir_src_for_ssa(condition);
+ nir_builder_cf_insert(build, &nif->cf_node);
+ build->cursor = nir_before_cf_list(&nif->then_list);
+ return nif;
+}
+
+static inline nir_if *
+nir_push_else(nir_builder *build, nir_if *nif)
+{
+ if (nif) {
+ assert(nir_builder_is_inside_cf(build, &nif->cf_node));
+ } else {
+ nir_block *block = nir_cursor_current_block(build->cursor);
+ nif = nir_cf_node_as_if(block->cf_node.parent);
+ }
+ build->cursor = nir_before_cf_list(&nif->else_list);
+ return nif;
+}
+
+static inline void
+nir_pop_if(nir_builder *build, nir_if *nif)
+{
+ if (nif) {
+ assert(nir_builder_is_inside_cf(build, &nif->cf_node));
+ } else {
+ nir_block *block = nir_cursor_current_block(build->cursor);
+ nif = nir_cf_node_as_if(block->cf_node.parent);
+ }
+ build->cursor = nir_after_cf_node(&nif->cf_node);
+}
+
+static inline nir_ssa_def *
+nir_if_phi(nir_builder *build, nir_ssa_def *then_def, nir_ssa_def *else_def)
+{
+ nir_block *block = nir_cursor_current_block(build->cursor);
+ nir_if *nif = nir_cf_node_as_if(nir_cf_node_prev(&block->cf_node));
+
+ nir_phi_instr *phi = nir_phi_instr_create(build->shader);
+
+ nir_phi_src *src = ralloc(phi, nir_phi_src);
+ src->pred = nir_if_last_then_block(nif);
+ src->src = nir_src_for_ssa(then_def);
+ exec_list_push_tail(&phi->srcs, &src->node);
+
+ src = ralloc(phi, nir_phi_src);
+ src->pred = nir_if_last_else_block(nif);
+ src->src = nir_src_for_ssa(else_def);
+ exec_list_push_tail(&phi->srcs, &src->node);
+
+ assert(then_def->num_components == else_def->num_components);
+ assert(then_def->bit_size == else_def->bit_size);
+ nir_ssa_dest_init(&phi->instr, &phi->dest,
+ then_def->num_components, then_def->bit_size, NULL);
+
+ nir_builder_instr_insert(build, &phi->instr);
+
+ return &phi->dest.ssa;
+}
+
+static inline nir_loop *
+nir_push_loop(nir_builder *build)
+{
+ nir_loop *loop = nir_loop_create(build->shader);
+ nir_builder_cf_insert(build, &loop->cf_node);
+ build->cursor = nir_before_cf_list(&loop->body);
+ return loop;
+}
+
+static inline void
+nir_pop_loop(nir_builder *build, nir_loop *loop)
+{
+ if (loop) {
+ assert(nir_builder_is_inside_cf(build, &loop->cf_node));
+ } else {
+ nir_block *block = nir_cursor_current_block(build->cursor);
+ loop = nir_cf_node_as_loop(block->cf_node.parent);
+ }
+ build->cursor = nir_after_cf_node(&loop->cf_node);
+}
+
static inline nir_ssa_def *
nir_ssa_undef(nir_builder *build, unsigned num_components, unsigned bit_size)
{
}
static inline nir_ssa_def *
-nir_build_imm(nir_builder *build, unsigned num_components, nir_const_value value)
+nir_build_imm(nir_builder *build, unsigned num_components,
+ unsigned bit_size, nir_const_value value)
{
nir_load_const_instr *load_const =
- nir_load_const_instr_create(build->shader, num_components, 32);
+ nir_load_const_instr_create(build->shader, num_components, bit_size);
if (!load_const)
return NULL;
memset(&v, 0, sizeof(v));
v.f32[0] = x;
- return nir_build_imm(build, 1, v);
+ return nir_build_imm(build, 1, 32, v);
+}
+
+static inline nir_ssa_def *
+nir_imm_double(nir_builder *build, double x)
+{
+ nir_const_value v;
+
+ memset(&v, 0, sizeof(v));
+ v.f64[0] = x;
+
+ return nir_build_imm(build, 1, 64, v);
}
static inline nir_ssa_def *
v.f32[2] = z;
v.f32[3] = w;
- return nir_build_imm(build, 4, v);
+ return nir_build_imm(build, 4, 32, v);
}
static inline nir_ssa_def *
memset(&v, 0, sizeof(v));
v.i32[0] = x;
- return nir_build_imm(build, 1, v);
+ return nir_build_imm(build, 1, 32, v);
+}
+
+static inline nir_ssa_def *
+nir_imm_int64(nir_builder *build, int64_t x)
+{
+ nir_const_value v;
+
+ memset(&v, 0, sizeof(v));
+ v.i64[0] = x;
+
+ return nir_build_imm(build, 1, 64, v);
}
static inline nir_ssa_def *
v.i32[2] = z;
v.i32[3] = w;
- return nir_build_imm(build, 4, v);
+ return nir_build_imm(build, 4, 32, v);
}
static inline nir_ssa_def *
}
}
+ /* When in doubt, assume 32. */
+ if (bit_size == 0)
+ bit_size = 32;
+
/* Make sure we don't swizzle from outside of our source vector (like if a
* scalar value was passed into a multiply with a vector).
*/
return &instr->dest.dest.ssa;
}
-#define ALU1(op) \
-static inline nir_ssa_def * \
-nir_##op(nir_builder *build, nir_ssa_def *src0) \
-{ \
- return nir_build_alu(build, nir_op_##op, src0, NULL, NULL, NULL); \
-}
-
-#define ALU2(op) \
-static inline nir_ssa_def * \
-nir_##op(nir_builder *build, nir_ssa_def *src0, nir_ssa_def *src1) \
-{ \
- return nir_build_alu(build, nir_op_##op, src0, src1, NULL, NULL); \
-}
-
-#define ALU3(op) \
-static inline nir_ssa_def * \
-nir_##op(nir_builder *build, nir_ssa_def *src0, \
- nir_ssa_def *src1, nir_ssa_def *src2) \
-{ \
- return nir_build_alu(build, nir_op_##op, src0, src1, src2, NULL); \
-}
-
-#define ALU4(op) \
-static inline nir_ssa_def * \
-nir_##op(nir_builder *build, nir_ssa_def *src0, \
- nir_ssa_def *src1, nir_ssa_def *src2, nir_ssa_def *src3) \
-{ \
- return nir_build_alu(build, nir_op_##op, src0, src1, src2, src3); \
-}
-
#include "nir_builder_opcodes.h"
static inline nir_ssa_def *
return NULL;
}
+static inline nir_ssa_def *
+nir_bany_inequal(nir_builder *b, nir_ssa_def *src0, nir_ssa_def *src1)
+{
+ switch (src0->num_components) {
+ case 1: return nir_ine(b, src0, src1);
+ case 2: return nir_bany_inequal2(b, src0, src1);
+ case 3: return nir_bany_inequal3(b, src0, src1);
+ case 4: return nir_bany_inequal4(b, src0, src1);
+ default:
+ unreachable("bad component size");
+ }
+}
+
+static inline nir_ssa_def *
+nir_bany(nir_builder *b, nir_ssa_def *src)
+{
+ return nir_bany_inequal(b, src, nir_imm_int(b, 0));
+}
+
static inline nir_ssa_def *
nir_channel(nir_builder *b, nir_ssa_def *def, unsigned c)
{
return nir_swizzle(b, def, swizzle, 1, false);
}
+static inline nir_ssa_def *
+nir_channels(nir_builder *b, nir_ssa_def *def, unsigned mask)
+{
+ unsigned num_channels = 0, swizzle[4] = { 0, 0, 0, 0 };
+
+ for (unsigned i = 0; i < 4; i++) {
+ if ((mask & (1 << i)) == 0)
+ continue;
+ swizzle[num_channels++] = i;
+ }
+
+ return nir_swizzle(b, def, swizzle, num_channels, false);
+}
+
/**
* Turns a nir_src into a nir_ssa_def * so it can be passed to
* nir_build_alu()-based builder calls.
}
/**
- * Similar to nir_ssa_for_src(), but for alu src's, respecting the
+ * Similar to nir_ssa_for_src(), but for alu srcs, respecting the
* nir_alu_src's swizzle.
*/
static inline nir_ssa_def *
load->num_components = num_components;
load->variables[0] = nir_deref_var_create(load, var);
nir_ssa_dest_init(&load->instr, &load->dest, num_components,
- glsl_get_bit_size(glsl_get_base_type(var->type)), NULL);
+ glsl_get_bit_size(var->type), NULL);
+ nir_builder_instr_insert(build, &load->instr);
+ return &load->dest.ssa;
+}
+
+static inline nir_ssa_def *
+nir_load_deref_var(nir_builder *build, nir_deref_var *deref)
+{
+ const struct glsl_type *type = nir_deref_tail(&deref->deref)->type;
+ const unsigned num_components = glsl_get_vector_elements(type);
+
+ nir_intrinsic_instr *load =
+ nir_intrinsic_instr_create(build->shader, nir_intrinsic_load_var);
+ load->num_components = num_components;
+ load->variables[0] = nir_deref_var_clone(deref, load);
+ nir_ssa_dest_init(&load->instr, &load->dest, num_components,
+ glsl_get_bit_size(type), NULL);
nir_builder_instr_insert(build, &load->instr);
return &load->dest.ssa;
}
nir_intrinsic_instr_create(build->shader, nir_intrinsic_store_var);
store->num_components = num_components;
store->const_index[0] = writemask & ((1 << num_components) - 1);
- store->variables[0] = nir_deref_as_var(nir_copy_deref(store, &deref->deref));
+ store->variables[0] = nir_deref_var_clone(deref, store);
store->src[0] = nir_src_for_ssa(value);
nir_builder_instr_insert(build, &store->instr);
}
nir_intrinsic_instr *copy =
nir_intrinsic_instr_create(build->shader, nir_intrinsic_copy_var);
- copy->variables[0] = nir_deref_as_var(nir_copy_deref(copy, &dest->deref));
- copy->variables[1] = nir_deref_as_var(nir_copy_deref(copy, &src->deref));
+ copy->variables[0] = nir_deref_var_clone(dest, copy);
+ copy->variables[1] = nir_deref_var_clone(src, copy);
nir_builder_instr_insert(build, ©->instr);
}
nir_builder_instr_insert(build, ©->instr);
}
+/* Generic builder for system values. */
static inline nir_ssa_def *
nir_load_system_value(nir_builder *build, nir_intrinsic_op op, int index)
{
return &load->dest.ssa;
}
+/* Generate custom builders for system values. */
+#define INTRINSIC(name, num_srcs, src_components, has_dest, dest_components, \
+ num_variables, num_indices, idx0, idx1, idx2, flags)
+#define LAST_INTRINSIC(name)
+
+#define DEFINE_SYSTEM_VALUE(name) \
+ static inline nir_ssa_def * \
+ nir_load_##name(nir_builder *build) \
+ { \
+ return nir_load_system_value(build, nir_intrinsic_load_##name, 0); \
+ } \
+
+#include "nir_intrinsics.h"
+
+static inline nir_ssa_def *
+nir_load_barycentric(nir_builder *build, nir_intrinsic_op op,
+ unsigned interp_mode)
+{
+ nir_intrinsic_instr *bary = nir_intrinsic_instr_create(build->shader, op);
+ nir_ssa_dest_init(&bary->instr, &bary->dest, 2, 32, NULL);
+ nir_intrinsic_set_interp_mode(bary, interp_mode);
+ nir_builder_instr_insert(build, &bary->instr);
+ return &bary->dest.ssa;
+}
+
static inline void
nir_jump(nir_builder *build, nir_jump_type jump_type)
{