*/
#define BARRIER(name) INTRINSIC(name, 0, ARR(), false, 0, 0, 0, 0)
+BARRIER(barrier)
BARRIER(discard)
+/*
+ * Memory barrier with semantics analogous to the memoryBarrier() GLSL
+ * intrinsic.
+ */
+BARRIER(memory_barrier)
+
+/** A conditional discard, with a single boolean source. */
+INTRINSIC(discard_if, 1, ARR(1), false, 0, 0, 0, 0)
+
INTRINSIC(emit_vertex, 0, ARR(), false, 0, 0, 1, 0)
INTRINSIC(end_primitive, 0, ARR(), false, 0, 0, 1, 0)
ATOMIC(dec, 0)
ATOMIC(read, NIR_INTRINSIC_CAN_ELIMINATE)
+/*
+ * Image load, store and atomic intrinsics.
+ *
+ * All image intrinsics take an image target passed as a nir_variable. Image
+ * variables contain a number of memory and layout qualifiers that influence
+ * the semantics of the intrinsic.
+ *
+ * All image intrinsics take a four-coordinate vector and a sample index as
+ * first two sources, determining the location within the image that will be
+ * accessed by the intrinsic. Components not applicable to the image target
+ * in use are undefined. Image store takes an additional four-component
+ * argument with the value to be written, and image atomic operations take
+ * either one or two additional scalar arguments with the same meaning as in
+ * the ARB_shader_image_load_store specification.
+ */
+INTRINSIC(image_load, 2, ARR(4, 1), true, 4, 1, 0,
+ NIR_INTRINSIC_CAN_ELIMINATE)
+INTRINSIC(image_store, 3, ARR(4, 1, 4), false, 0, 1, 0, 0)
+INTRINSIC(image_atomic_add, 3, ARR(4, 1, 1), true, 1, 1, 0, 0)
+INTRINSIC(image_atomic_min, 3, ARR(4, 1, 1), true, 1, 1, 0, 0)
+INTRINSIC(image_atomic_max, 3, ARR(4, 1, 1), true, 1, 1, 0, 0)
+INTRINSIC(image_atomic_and, 3, ARR(4, 1, 1), true, 1, 1, 0, 0)
+INTRINSIC(image_atomic_or, 3, ARR(4, 1, 1), true, 1, 1, 0, 0)
+INTRINSIC(image_atomic_xor, 3, ARR(4, 1, 1), true, 1, 1, 0, 0)
+INTRINSIC(image_atomic_exchange, 3, ARR(4, 1, 1), true, 1, 1, 0, 0)
+INTRINSIC(image_atomic_comp_swap, 4, ARR(4, 1, 1, 1), true, 1, 1, 0, 0)
+INTRINSIC(image_size, 0, ARR(), true, 4, 1, 0,
+ NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
+
#define SYSTEM_VALUE(name, components) \
INTRINSIC(load_##name, 0, ARR(), true, components, 0, 0, \
NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
SYSTEM_VALUE(front_face, 1)
SYSTEM_VALUE(vertex_id, 1)
+SYSTEM_VALUE(vertex_id_zero_base, 1)
+SYSTEM_VALUE(base_vertex, 1)
SYSTEM_VALUE(instance_id, 1)
SYSTEM_VALUE(sample_id, 1)
SYSTEM_VALUE(sample_pos, 2)
SYSTEM_VALUE(invocation_id, 1)
/*
- * The first index is the address to load from, and the second index is the
- * number of array elements to load. Indirect loads have an additional
- * register input, which is added to the constant address to compute the
- * final address to load from. For UBO's (and SSBO's), the first source is
- * the (possibly constant) UBO buffer index and the indirect (if it exists)
- * is the second source.
+ * The format of the indices depends on the type of the load. For uniforms,
+ * the first index is the base address and the second index is an offset that
+ * should be added to the base address. (This way you can determine in the
+ * back-end which variable is being accessed even in an array.) For inputs,
+ * the one and only index corresponds to the attribute slot. UBO loads also
+ * have a single index which is the base address to load from.
+ *
+ * UBO loads have a (possibly constant) source which is the UBO buffer index.
+ * For each type of load, the _indirect variant has one additional source
+ * (the second in the case of UBO's) that is the is an indirect to be added to
+ * the constant address or base offset to compute the final offset.
*
* For vector backends, the address is in terms of one vec4, and so each array
* element is +4 scalar components from the previous array element. For scalar
* elements begin immediately after the previous array element.
*/
-#define LOAD(name, extra_srcs, flags) \
- INTRINSIC(load_##name, extra_srcs, ARR(1), true, 0, 0, 2, flags) \
+#define LOAD(name, extra_srcs, indices, flags) \
+ INTRINSIC(load_##name, extra_srcs, ARR(1), true, 0, 0, indices, flags) \
INTRINSIC(load_##name##_indirect, extra_srcs + 1, ARR(1, 1), \
- true, 0, 0, 2, flags)
+ true, 0, 0, indices, flags)
-LOAD(uniform, 0, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
-LOAD(ubo, 1, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
-LOAD(input, 0, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
+LOAD(uniform, 0, 2, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
+LOAD(ubo, 1, 1, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
+LOAD(input, 0, 1, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
/* LOAD(ssbo, 1, 0) */
/*
INTRINSIC(store_##name##_indirect, 2, ARR(0, 1), false, 0, 0, \
num_indices, flags) \
-STORE(output, 2, 0)
-/* STORE(ssbo, 3, 0) */
+STORE(output, 1, 0)
+/* STORE(ssbo, 2, 0) */
LAST_INTRINSIC(store_output_indirect)