+ return nir_swizzle(b, def, swizzle, num_channels);
+}
+
+static inline nir_ssa_def *
+_nir_vector_extract_helper(nir_builder *b, nir_ssa_def *vec, nir_ssa_def *c,
+ unsigned start, unsigned end)
+{
+ if (start == end - 1) {
+ return nir_channel(b, vec, start);
+ } else {
+ unsigned mid = start + (end - start) / 2;
+ return nir_bcsel(b, nir_ilt(b, c, nir_imm_intN_t(b, mid, c->bit_size)),
+ _nir_vector_extract_helper(b, vec, c, start, mid),
+ _nir_vector_extract_helper(b, vec, c, mid, end));
+ }
+}
+
+static inline nir_ssa_def *
+nir_vector_extract(nir_builder *b, nir_ssa_def *vec, nir_ssa_def *c)
+{
+ nir_src c_src = nir_src_for_ssa(c);
+ if (nir_src_is_const(c_src)) {
+ uint64_t c_const = nir_src_as_uint(c_src);
+ if (c_const < vec->num_components)
+ return nir_channel(b, vec, c_const);
+ else
+ return nir_ssa_undef(b, 1, vec->bit_size);
+ } else {
+ return _nir_vector_extract_helper(b, vec, c, 0, vec->num_components);
+ }
+}
+
+/** Replaces the component of `vec` specified by `c` with `scalar` */
+static inline nir_ssa_def *
+nir_vector_insert_imm(nir_builder *b, nir_ssa_def *vec,
+ nir_ssa_def *scalar, unsigned c)
+{
+ assert(scalar->num_components == 1);
+ assert(c < vec->num_components);
+
+ nir_op vec_op = nir_op_vec(vec->num_components);
+ nir_alu_instr *vec_instr = nir_alu_instr_create(b->shader, vec_op);
+
+ for (unsigned i = 0; i < vec->num_components; i++) {
+ if (i == c) {
+ vec_instr->src[i].src = nir_src_for_ssa(scalar);
+ vec_instr->src[i].swizzle[0] = 0;
+ } else {
+ vec_instr->src[i].src = nir_src_for_ssa(vec);
+ vec_instr->src[i].swizzle[0] = i;
+ }
+ }
+
+ return nir_builder_alu_instr_finish_and_insert(b, vec_instr);
+}
+
+/** Replaces the component of `vec` specified by `c` with `scalar` */
+static inline nir_ssa_def *
+nir_vector_insert(nir_builder *b, nir_ssa_def *vec, nir_ssa_def *scalar,
+ nir_ssa_def *c)
+{
+ assert(scalar->num_components == 1);
+ assert(c->num_components == 1);
+
+ nir_src c_src = nir_src_for_ssa(c);
+ if (nir_src_is_const(c_src)) {
+ uint64_t c_const = nir_src_as_uint(c_src);
+ if (c_const < vec->num_components)
+ return nir_vector_insert_imm(b, vec, scalar, c_const);
+ else
+ return vec;
+ } else {
+ nir_const_value per_comp_idx_const[NIR_MAX_VEC_COMPONENTS];
+ for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++)
+ per_comp_idx_const[i] = nir_const_value_for_int(i, c->bit_size);
+ nir_ssa_def *per_comp_idx =
+ nir_build_imm(b, vec->num_components,
+ c->bit_size, per_comp_idx_const);
+
+ /* nir_builder will automatically splat out scalars to vectors so an
+ * insert is as simple as "if I'm the channel, replace me with the
+ * scalar."
+ */
+ return nir_bcsel(b, nir_ieq(b, c, per_comp_idx), scalar, vec);
+ }
+}
+
+static inline nir_ssa_def *
+nir_i2i(nir_builder *build, nir_ssa_def *x, unsigned dest_bit_size)
+{
+ if (x->bit_size == dest_bit_size)
+ return x;
+
+ switch (dest_bit_size) {
+ case 64: return nir_i2i64(build, x);
+ case 32: return nir_i2i32(build, x);
+ case 16: return nir_i2i16(build, x);
+ case 8: return nir_i2i8(build, x);
+ default: unreachable("Invalid bit size");
+ }
+}
+
+static inline nir_ssa_def *
+nir_u2u(nir_builder *build, nir_ssa_def *x, unsigned dest_bit_size)
+{
+ if (x->bit_size == dest_bit_size)
+ return x;
+
+ switch (dest_bit_size) {
+ case 64: return nir_u2u64(build, x);
+ case 32: return nir_u2u32(build, x);
+ case 16: return nir_u2u16(build, x);
+ case 8: return nir_u2u8(build, x);
+ default: unreachable("Invalid bit size");
+ }