#define ARR(...) { __VA_ARGS__ }
-INTRINSIC(load_var_vec1, 0, ARR(), true, 1, 1, 0,
- NIR_INTRINSIC_CAN_ELIMINATE)
-INTRINSIC(load_var_vec2, 0, ARR(), true, 2, 1, 0,
- NIR_INTRINSIC_CAN_ELIMINATE)
-INTRINSIC(load_var_vec3, 0, ARR(), true, 3, 1, 0,
- NIR_INTRINSIC_CAN_ELIMINATE)
-INTRINSIC(load_var_vec4, 0, ARR(), true, 4, 1, 0,
- NIR_INTRINSIC_CAN_ELIMINATE)
-INTRINSIC(store_var_vec1, 1, ARR(1), false, 0, 1, 0, 0)
-INTRINSIC(store_var_vec2, 1, ARR(2), false, 0, 1, 0, 0)
-INTRINSIC(store_var_vec3, 1, ARR(3), false, 0, 1, 0, 0)
-INTRINSIC(store_var_vec4, 1, ARR(4), false, 0, 1, 0, 0)
-INTRINSIC(copy_var, 0, ARR(), false, 0, 2, 0, 0)
+INTRINSIC(load_var, 0, ARR(), true, 0, 1, 0, NIR_INTRINSIC_CAN_ELIMINATE)
+INTRINSIC(store_var, 1, ARR(0), false, 0, 1, 0, 0)
+INTRINSIC(copy_var, 0, ARR(), false, 0, 2, 0, 0)
+
+/*
+ * Interpolation of input. The interp_var_at* intrinsics are similar to the
+ * load_var intrinsic acting an a shader input except that they interpolate
+ * the input differently. The at_sample and at_offset intrinsics take an
+ * aditional source that is a integer sample id or a vec2 position offset
+ * respectively.
+ */
+
+INTRINSIC(interp_var_at_centroid, 0, ARR(0), true, 0, 1, 0,
+ NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
+INTRINSIC(interp_var_at_sample, 1, ARR(1), true, 0, 1, 0,
+ NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
+INTRINSIC(interp_var_at_offset, 1, ARR(2), true, 0, 1, 0,
+ NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
/*
* a barrier is an intrinsic with no inputs/outputs but which can't be moved
SYSTEM_VALUE(sample_mask_in, 1)
SYSTEM_VALUE(invocation_id, 1)
-#define LOAD(name, num_indices, flags) \
- INTRINSIC(load_##name##_vec1, 0, ARR(), true, 1, 0, num_indices, \
- NIR_INTRINSIC_CAN_ELIMINATE | flags) \
- INTRINSIC(load_##name##_vec2, 0, ARR(), true, 2, 0, num_indices, \
- NIR_INTRINSIC_CAN_ELIMINATE | flags) \
- INTRINSIC(load_##name##_vec3, 0, ARR(), true, 3, 0, num_indices, \
- NIR_INTRINSIC_CAN_ELIMINATE | flags) \
- INTRINSIC(load_##name##_vec4, 0, ARR(), true, 4, 0, num_indices, \
- NIR_INTRINSIC_CAN_ELIMINATE | flags) \
- INTRINSIC(load_##name##_vec1_indirect, 1, ARR(1), true, 1, 0, num_indices, \
- NIR_INTRINSIC_CAN_ELIMINATE | flags) \
- INTRINSIC(load_##name##_vec2_indirect, 1, ARR(1), true, 2, 0, num_indices, \
- NIR_INTRINSIC_CAN_ELIMINATE | flags) \
- INTRINSIC(load_##name##_vec3_indirect, 1, ARR(1), true, 3, 0, num_indices, \
- NIR_INTRINSIC_CAN_ELIMINATE | flags) \
- INTRINSIC(load_##name##_vec4_indirect, 1, ARR(1), true, 4, 0, num_indices, \
- NIR_INTRINSIC_CAN_ELIMINATE | flags) \
-
-
/*
* The first index is the address to load from, and the second index is the
- * number of array elements to load. For UBO's (and SSBO's), the first index
- * is the UBO buffer index (TODO nonconstant UBO buffer index) and the second
- * and third indices play the role of the first and second indices in the other
- * loads. Indirect loads have an additional register input, which is added
- * to the constant address to compute the final address to load from.
+ * number of array elements to load. Indirect loads have an additional
+ * register input, which is added to the constant address to compute the
+ * final address to load from. For UBO's (and SSBO's), the first source is
+ * the (possibly constant) UBO buffer index and the indirect (if it exists)
+ * is the second source.
*
* For vector backends, the address is in terms of one vec4, and so each array
* element is +4 scalar components from the previous array element. For scalar
* elements begin immediately after the previous array element.
*/
-LOAD(uniform, 2, NIR_INTRINSIC_CAN_REORDER)
-LOAD(ubo, 3, NIR_INTRINSIC_CAN_REORDER)
-LOAD(input, 2, NIR_INTRINSIC_CAN_REORDER)
-/* LOAD(ssbo, 2, 0) */
+#define LOAD(name, extra_srcs, flags) \
+ INTRINSIC(load_##name, extra_srcs, ARR(1), true, 0, 0, 2, flags) \
+ INTRINSIC(load_##name##_indirect, extra_srcs + 1, ARR(1, 1), \
+ true, 0, 0, 2, flags)
-#define STORE(name, num_indices, flags) \
- INTRINSIC(store_##name##_vec1, 1, ARR(1), false, 0, 0, num_indices, flags) \
- INTRINSIC(store_##name##_vec2, 1, ARR(2), false, 0, 0, num_indices, flags) \
- INTRINSIC(store_##name##_vec3, 1, ARR(3), false, 0, 0, num_indices, flags) \
- INTRINSIC(store_##name##_vec4, 1, ARR(4), false, 0, 0, num_indices, flags) \
- INTRINSIC(store_##name##_vec1_indirect, 2, ARR(1, 1), false, 0, 0, \
- num_indices, flags) \
- INTRINSIC(store_##name##_vec2_indirect, 2, ARR(2, 1), false, 0, 0, \
- num_indices, flags) \
- INTRINSIC(store_##name##_vec3_indirect, 2, ARR(3, 1), false, 0, 0, \
- num_indices, flags) \
- INTRINSIC(store_##name##_vec4_indirect, 2, ARR(4, 1), false, 0, 0, \
- num_indices, flags) \
+LOAD(uniform, 0, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
+LOAD(ubo, 1, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
+LOAD(input, 0, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
+/* LOAD(ssbo, 1, 0) */
/*
* Stores work the same way as loads, except now the first register input is
* offset.
*/
+#define STORE(name, num_indices, flags) \
+ INTRINSIC(store_##name, 1, ARR(0), false, 0, 0, num_indices, flags) \
+ INTRINSIC(store_##name##_indirect, 2, ARR(0, 1), false, 0, 0, \
+ num_indices, flags) \
+
STORE(output, 2, 0)
/* STORE(ssbo, 3, 0) */
-LAST_INTRINSIC(store_output_vec4_indirect)
+LAST_INTRINSIC(store_output_indirect)