bool lower_vote_trivial:1;
bool lower_subgroup_masks:1;
bool lower_shuffle:1;
+ bool lower_quad:1;
} nir_lower_subgroups_options;
bool nir_lower_subgroups(nir_shader *shader,
INTRINSIC(shuffle_down, 2, ARR(0, 1), true, 0, 0,
0, xx, xx, xx, NIR_INTRINSIC_CAN_ELIMINATE)
+/** Quad operations from SPIR-V. */
+INTRINSIC(quad_broadcast, 2, ARR(0, 1), true, 0, 0,
+ 0, xx, xx, xx, NIR_INTRINSIC_CAN_ELIMINATE)
+INTRINSIC(quad_swap_horizontal, 1, ARR(0), true, 0, 0,
+ 0, xx, xx, xx, NIR_INTRINSIC_CAN_ELIMINATE)
+INTRINSIC(quad_swap_vertical, 1, ARR(0), true, 0, 0,
+ 0, xx, xx, xx, NIR_INTRINSIC_CAN_ELIMINATE)
+INTRINSIC(quad_swap_diagonal, 1, ARR(0), true, 0, 0,
+ 0, xx, xx, xx, NIR_INTRINSIC_CAN_ELIMINATE)
+
/**
* Basic Geometry Shader intrinsics.
*
assert(intrin->src[1].is_ssa);
index = nir_iadd(b, index, intrin->src[1].ssa);
break;
+ case nir_intrinsic_quad_broadcast:
+ assert(intrin->src[1].is_ssa);
+ index = nir_ior(b, nir_iand(b, index, nir_imm_int(b, ~0x3)),
+ intrin->src[1].ssa);
+ break;
+ case nir_intrinsic_quad_swap_horizontal:
+ /* For Quad operations, subgroups are divided into quads where
+ * (invocation % 4) is the index to a square arranged as follows:
+ *
+ * +---+---+
+ * | 0 | 1 |
+ * +---+---+
+ * | 2 | 3 |
+ * +---+---+
+ */
+ index = nir_ixor(b, index, nir_imm_int(b, 0x1));
+ break;
+ case nir_intrinsic_quad_swap_vertical:
+ index = nir_ixor(b, index, nir_imm_int(b, 0x2));
+ break;
+ case nir_intrinsic_quad_swap_diagonal:
+ index = nir_ixor(b, index, nir_imm_int(b, 0x3));
+ break;
default:
unreachable("Invalid intrinsic");
}
return lower_subgroup_op_to_scalar(b, intrin);
break;
+ case nir_intrinsic_quad_broadcast:
+ case nir_intrinsic_quad_swap_horizontal:
+ case nir_intrinsic_quad_swap_vertical:
+ case nir_intrinsic_quad_swap_diagonal:
+ if (options->lower_quad)
+ return lower_shuffle(b, intrin, options->lower_to_scalar);
+ else if (options->lower_to_scalar && intrin->num_components > 1)
+ return lower_subgroup_op_to_scalar(b, intrin);
+ break;
+
default:
break;
}