#define ARR(...) { __VA_ARGS__ }
+INTRINSIC(nop, 0, ARR(0), false, 0, 0, 0, xx, xx, xx,
+ NIR_INTRINSIC_CAN_ELIMINATE)
-INTRINSIC(load_var, 0, ARR(), true, 0, 1, 0, xx, xx, xx, NIR_INTRINSIC_CAN_ELIMINATE)
+INTRINSIC(load_var, 0, ARR(0), true, 0, 1, 0, xx, xx, xx, NIR_INTRINSIC_CAN_ELIMINATE)
INTRINSIC(store_var, 1, ARR(0), false, 0, 1, 1, WRMASK, xx, xx, 0)
-INTRINSIC(copy_var, 0, ARR(), false, 0, 2, 0, xx, xx, xx, 0)
+INTRINSIC(copy_var, 0, ARR(0), false, 0, 2, 0, xx, xx, xx, 0)
/*
* Interpolation of input. The interp_var_at* intrinsics are similar to the
- * load_var intrinsic acting an a shader input except that they interpolate
+ * load_var intrinsic acting on a shader input except that they interpolate
* the input differently. The at_sample and at_offset intrinsics take an
- * aditional source that is a integer sample id or a vec2 position offset
+ * additional source that is an integer sample id or a vec2 position offset
* respectively.
*/
* a barrier is an intrinsic with no inputs/outputs but which can't be moved
* around/optimized in general
*/
-#define BARRIER(name) INTRINSIC(name, 0, ARR(), false, 0, 0, 0, xx, xx, xx, 0)
+#define BARRIER(name) INTRINSIC(name, 0, ARR(0), false, 0, 0, 0, xx, xx, xx, 0)
BARRIER(barrier)
BARRIER(discard)
* The latter can be used as code motion barrier, which is currently not
* feasible with NIR.
*/
-INTRINSIC(shader_clock, 0, ARR(), true, 1, 0, 0, xx, xx, xx, NIR_INTRINSIC_CAN_ELIMINATE)
+INTRINSIC(shader_clock, 0, ARR(0), true, 2, 0, 0, xx, xx, xx, NIR_INTRINSIC_CAN_ELIMINATE)
+
+/*
+ * Shader ballot intrinsics with semantics analogous to the
+ *
+ * ballotARB()
+ * readInvocationARB()
+ * readFirstInvocationARB()
+ *
+ * GLSL functions from ARB_shader_ballot.
+ */
+INTRINSIC(ballot, 1, ARR(1), true, 0, 0, 0, xx, xx, xx, NIR_INTRINSIC_CAN_ELIMINATE)
+INTRINSIC(read_invocation, 2, ARR(0, 1), true, 0, 0, 0, xx, xx, xx, NIR_INTRINSIC_CAN_ELIMINATE)
+INTRINSIC(read_first_invocation, 1, ARR(0), true, 0, 0, 0, xx, xx, xx, NIR_INTRINSIC_CAN_ELIMINATE)
+
+/** Additional SPIR-V ballot intrinsics
+ *
+ * These correspond to the SPIR-V opcodes
+ *
+ * OpGroupUniformElect
+ * OpSubgroupFirstInvocationKHR
+ */
+INTRINSIC(elect, 0, ARR(0), true, 1, 0, 0, xx, xx, xx,
+ NIR_INTRINSIC_CAN_ELIMINATE)
+INTRINSIC(first_invocation, 0, ARR(0), true, 1, 0, 0, xx, xx, xx,
+ NIR_INTRINSIC_CAN_ELIMINATE)
/*
* Memory barrier with semantics analogous to the compute shader
/** A conditional discard, with a single boolean source. */
INTRINSIC(discard_if, 1, ARR(1), false, 0, 0, 0, xx, xx, xx, 0)
+/** ARB_shader_group_vote intrinsics */
+INTRINSIC(vote_any, 1, ARR(1), true, 1, 0, 0, xx, xx, xx, NIR_INTRINSIC_CAN_ELIMINATE)
+INTRINSIC(vote_all, 1, ARR(1), true, 1, 0, 0, xx, xx, xx, NIR_INTRINSIC_CAN_ELIMINATE)
+INTRINSIC(vote_feq, 1, ARR(0), true, 1, 0, 0, xx, xx, xx, NIR_INTRINSIC_CAN_ELIMINATE)
+INTRINSIC(vote_ieq, 1, ARR(0), true, 1, 0, 0, xx, xx, xx, NIR_INTRINSIC_CAN_ELIMINATE)
+
+/** Ballot ALU operations from SPIR-V.
+ *
+ * These operations work like their ALU counterparts except that the operate
+ * on a uvec4 which is treated as a 128bit integer. Also, they are, in
+ * general, free to ignore any bits which are above the subgroup size.
+ */
+INTRINSIC(ballot_bitfield_extract, 2, ARR(4, 1), true, 1, 0,
+ 0, xx, xx, xx, NIR_INTRINSIC_CAN_ELIMINATE)
+INTRINSIC(ballot_bit_count_reduce, 1, ARR(4), true, 1, 0,
+ 0, xx, xx, xx, NIR_INTRINSIC_CAN_ELIMINATE)
+INTRINSIC(ballot_bit_count_inclusive, 1, ARR(4), true, 1, 0,
+ 0, xx, xx, xx, NIR_INTRINSIC_CAN_ELIMINATE)
+INTRINSIC(ballot_bit_count_exclusive, 1, ARR(4), true, 1, 0,
+ 0, xx, xx, xx, NIR_INTRINSIC_CAN_ELIMINATE)
+INTRINSIC(ballot_find_lsb, 1, ARR(4), true, 1, 0,
+ 0, xx, xx, xx, NIR_INTRINSIC_CAN_ELIMINATE)
+INTRINSIC(ballot_find_msb, 1, ARR(4), true, 1, 0,
+ 0, xx, xx, xx, NIR_INTRINSIC_CAN_ELIMINATE)
+
+/** Shuffle operations from SPIR-V. */
+INTRINSIC(shuffle, 2, ARR(0, 1), true, 0, 0,
+ 0, xx, xx, xx, NIR_INTRINSIC_CAN_ELIMINATE)
+INTRINSIC(shuffle_xor, 2, ARR(0, 1), true, 0, 0,
+ 0, xx, xx, xx, NIR_INTRINSIC_CAN_ELIMINATE)
+INTRINSIC(shuffle_up, 2, ARR(0, 1), true, 0, 0,
+ 0, xx, xx, xx, NIR_INTRINSIC_CAN_ELIMINATE)
+INTRINSIC(shuffle_down, 2, ARR(0, 1), true, 0, 0,
+ 0, xx, xx, xx, NIR_INTRINSIC_CAN_ELIMINATE)
+
/**
* Basic Geometry Shader intrinsics.
*
*
* end_primitive implements GLSL's EndPrimitive() built-in.
*/
-INTRINSIC(emit_vertex, 0, ARR(), false, 0, 0, 1, STREAM_ID, xx, xx, 0)
-INTRINSIC(end_primitive, 0, ARR(), false, 0, 0, 1, STREAM_ID, xx, xx, 0)
+INTRINSIC(emit_vertex, 0, ARR(0), false, 0, 0, 1, STREAM_ID, xx, xx, 0)
+INTRINSIC(end_primitive, 0, ARR(0), false, 0, 0, 1, STREAM_ID, xx, xx, 0)
/**
* Geometry Shader intrinsics with a vertex count.
*/
#define ATOMIC(name, flags) \
- INTRINSIC(atomic_counter_##name##_var, 0, ARR(), true, 1, 1, 0, xx, xx, xx, flags) \
- INTRINSIC(atomic_counter_##name, 1, ARR(1), true, 1, 0, 1, BASE, xx, xx, flags)
-
-ATOMIC(inc, 0)
-ATOMIC(dec, 0)
-ATOMIC(read, NIR_INTRINSIC_CAN_ELIMINATE)
+ INTRINSIC(name##_var, 0, ARR(0), true, 1, 1, 0, xx, xx, xx, flags) \
+ INTRINSIC(name, 1, ARR(1), true, 1, 0, 1, BASE, xx, xx, flags)
+#define ATOMIC2(name) \
+ INTRINSIC(name##_var, 1, ARR(1), true, 1, 1, 0, xx, xx, xx, 0) \
+ INTRINSIC(name, 2, ARR(1, 1), true, 1, 0, 1, BASE, xx, xx, 0)
+#define ATOMIC3(name) \
+ INTRINSIC(name##_var, 2, ARR(1, 1), true, 1, 1, 0, xx, xx, xx, 0) \
+ INTRINSIC(name, 3, ARR(1, 1, 1), true, 1, 0, 1, BASE, xx, xx, 0)
+
+ATOMIC(atomic_counter_inc, 0)
+ATOMIC(atomic_counter_dec, 0)
+ATOMIC(atomic_counter_read, NIR_INTRINSIC_CAN_ELIMINATE)
+ATOMIC2(atomic_counter_add)
+ATOMIC2(atomic_counter_min)
+ATOMIC2(atomic_counter_max)
+ATOMIC2(atomic_counter_and)
+ATOMIC2(atomic_counter_or)
+ATOMIC2(atomic_counter_xor)
+ATOMIC2(atomic_counter_exchange)
+ATOMIC3(atomic_counter_comp_swap)
/*
* Image load, store and atomic intrinsics.
INTRINSIC(image_atomic_xor, 3, ARR(4, 1, 1), true, 1, 1, 0, xx, xx, xx, 0)
INTRINSIC(image_atomic_exchange, 3, ARR(4, 1, 1), true, 1, 1, 0, xx, xx, xx, 0)
INTRINSIC(image_atomic_comp_swap, 4, ARR(4, 1, 1, 1), true, 1, 1, 0, xx, xx, xx, 0)
-INTRINSIC(image_size, 0, ARR(), true, 4, 1, 0, xx, xx, xx,
+INTRINSIC(image_size, 0, ARR(0), true, 0, 1, 0, xx, xx, xx,
NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
-INTRINSIC(image_samples, 0, ARR(), true, 1, 1, 0, xx, xx, xx,
+INTRINSIC(image_samples, 0, ARR(0), true, 1, 1, 0, xx, xx, xx,
NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
/*
- * Vulkan descriptor set intrinsic
+ * Vulkan descriptor set intrinsics
*
* The Vulkan API uses a different binding model from GL. In the Vulkan
- * API, all external resources are represented by a tripple:
+ * API, all external resources are represented by a tuple:
*
* (descriptor set, binding, array index)
*
*
* The intended usage is that the shader will call vulkan_surface_index to
* get an index and then pass that as the buffer index ubo/ssbo calls.
+ *
+ * The vulkan_resource_reindex intrinsic takes a resource index in src0
+ * (the result of a vulkan_resource_index or vulkan_resource_reindex) which
+ * corresponds to the tuple (set, binding, index) and computes an index
+ * corresponding to tuple (set, binding, idx + src1).
*/
INTRINSIC(vulkan_resource_index, 1, ARR(1), true, 1, 0, 2,
DESC_SET, BINDING, xx,
NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
+INTRINSIC(vulkan_resource_reindex, 2, ARR(1, 1), true, 1, 0, 0, xx, xx, xx,
+ NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
/*
* variable atomic intrinsics
* in shared_atomic_add, etc).
* 2: For CompSwap only: the second data parameter.
*/
-INTRINSIC(shared_atomic_add, 2, ARR(1, 1), true, 1, 0, 0, xx, xx, xx, 0)
-INTRINSIC(shared_atomic_imin, 2, ARR(1, 1), true, 1, 0, 0, xx, xx, xx, 0)
-INTRINSIC(shared_atomic_umin, 2, ARR(1, 1), true, 1, 0, 0, xx, xx, xx, 0)
-INTRINSIC(shared_atomic_imax, 2, ARR(1, 1), true, 1, 0, 0, xx, xx, xx, 0)
-INTRINSIC(shared_atomic_umax, 2, ARR(1, 1), true, 1, 0, 0, xx, xx, xx, 0)
-INTRINSIC(shared_atomic_and, 2, ARR(1, 1), true, 1, 0, 0, xx, xx, xx, 0)
-INTRINSIC(shared_atomic_or, 2, ARR(1, 1), true, 1, 0, 0, xx, xx, xx, 0)
-INTRINSIC(shared_atomic_xor, 2, ARR(1, 1), true, 1, 0, 0, xx, xx, xx, 0)
-INTRINSIC(shared_atomic_exchange, 2, ARR(1, 1), true, 1, 0, 0, xx, xx, xx, 0)
-INTRINSIC(shared_atomic_comp_swap, 3, ARR(1, 1, 1), true, 1, 0, 0, xx, xx, xx, 0)
+INTRINSIC(shared_atomic_add, 2, ARR(1, 1), true, 1, 0, 1, BASE, xx, xx, 0)
+INTRINSIC(shared_atomic_imin, 2, ARR(1, 1), true, 1, 0, 1, BASE, xx, xx, 0)
+INTRINSIC(shared_atomic_umin, 2, ARR(1, 1), true, 1, 0, 1, BASE, xx, xx, 0)
+INTRINSIC(shared_atomic_imax, 2, ARR(1, 1), true, 1, 0, 1, BASE, xx, xx, 0)
+INTRINSIC(shared_atomic_umax, 2, ARR(1, 1), true, 1, 0, 1, BASE, xx, xx, 0)
+INTRINSIC(shared_atomic_and, 2, ARR(1, 1), true, 1, 0, 1, BASE, xx, xx, 0)
+INTRINSIC(shared_atomic_or, 2, ARR(1, 1), true, 1, 0, 1, BASE, xx, xx, 0)
+INTRINSIC(shared_atomic_xor, 2, ARR(1, 1), true, 1, 0, 1, BASE, xx, xx, 0)
+INTRINSIC(shared_atomic_exchange, 2, ARR(1, 1), true, 1, 0, 1, BASE, xx, xx, 0)
+INTRINSIC(shared_atomic_comp_swap, 3, ARR(1, 1, 1), true, 1, 0, 1, BASE, xx, xx, 0)
+
+/* Used by nir_builder.h to generate loader helpers for the system values. */
+#ifndef DEFINE_SYSTEM_VALUE
+#define DEFINE_SYSTEM_VALUE(name)
+#endif
#define SYSTEM_VALUE(name, components, num_indices, idx0, idx1, idx2) \
- INTRINSIC(load_##name, 0, ARR(), true, components, 0, num_indices, \
+ DEFINE_SYSTEM_VALUE(name) \
+ INTRINSIC(load_##name, 0, ARR(0), true, components, 0, num_indices, \
idx0, idx1, idx2, \
NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
+SYSTEM_VALUE(frag_coord, 4, 0, xx, xx, xx)
SYSTEM_VALUE(front_face, 1, 0, xx, xx, xx)
SYSTEM_VALUE(vertex_id, 1, 0, xx, xx, xx)
SYSTEM_VALUE(vertex_id_zero_base, 1, 0, xx, xx, xx)
SYSTEM_VALUE(tess_level_inner, 2, 0, xx, xx, xx)
SYSTEM_VALUE(patch_vertices_in, 1, 0, xx, xx, xx)
SYSTEM_VALUE(local_invocation_id, 3, 0, xx, xx, xx)
+SYSTEM_VALUE(local_invocation_index, 1, 0, xx, xx, xx)
SYSTEM_VALUE(work_group_id, 3, 0, xx, xx, xx)
SYSTEM_VALUE(user_clip_plane, 4, 1, UCP_ID, xx, xx)
SYSTEM_VALUE(num_work_groups, 3, 0, xx, xx, xx)
SYSTEM_VALUE(helper_invocation, 1, 0, xx, xx, xx)
+SYSTEM_VALUE(alpha_ref_float, 1, 0, xx, xx, xx)
+SYSTEM_VALUE(layer_id, 1, 0, xx, xx, xx)
+SYSTEM_VALUE(view_index, 1, 0, xx, xx, xx)
+SYSTEM_VALUE(subgroup_size, 1, 0, xx, xx, xx)
+SYSTEM_VALUE(subgroup_invocation, 1, 0, xx, xx, xx)
+SYSTEM_VALUE(subgroup_eq_mask, 0, 0, xx, xx, xx)
+SYSTEM_VALUE(subgroup_ge_mask, 0, 0, xx, xx, xx)
+SYSTEM_VALUE(subgroup_gt_mask, 0, 0, xx, xx, xx)
+SYSTEM_VALUE(subgroup_le_mask, 0, 0, xx, xx, xx)
+SYSTEM_VALUE(subgroup_lt_mask, 0, 0, xx, xx, xx)
+SYSTEM_VALUE(num_subgroups, 1, 0, xx, xx, xx)
+SYSTEM_VALUE(subgroup_id, 1, 0, xx, xx, xx)
+SYSTEM_VALUE(local_group_size, 3, 0, xx, xx, xx)
+
+/* Blend constant color values. Float values are clamped. */
+SYSTEM_VALUE(blend_const_color_r_float, 1, 0, xx, xx, xx)
+SYSTEM_VALUE(blend_const_color_g_float, 1, 0, xx, xx, xx)
+SYSTEM_VALUE(blend_const_color_b_float, 1, 0, xx, xx, xx)
+SYSTEM_VALUE(blend_const_color_a_float, 1, 0, xx, xx, xx)
+SYSTEM_VALUE(blend_const_color_rgba8888_unorm, 1, 0, xx, xx, xx)
+SYSTEM_VALUE(blend_const_color_aaaa8888_unorm, 1, 0, xx, xx, xx)
+
+/**
+ * Barycentric coordinate intrinsics.
+ *
+ * These set up the barycentric coordinates for a particular interpolation.
+ * The first three are for the simple cases: pixel, centroid, or per-sample
+ * (at gl_SampleID). The next two handle interpolating at a specified
+ * sample location, or interpolating with a vec2 offset,
+ *
+ * The interp_mode index should be either the INTERP_MODE_SMOOTH or
+ * INTERP_MODE_NOPERSPECTIVE enum values.
+ *
+ * The vec2 value produced by these intrinsics is intended for use as the
+ * barycoord source of a load_interpolated_input intrinsic.
+ */
+
+#define BARYCENTRIC(name, sources, source_components) \
+ INTRINSIC(load_barycentric_##name, sources, ARR(source_components), \
+ true, 2, 0, 1, INTERP_MODE, xx, xx, \
+ NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
+
+/* no sources. const_index[] = { interp_mode } */
+BARYCENTRIC(pixel, 0, 0)
+BARYCENTRIC(centroid, 0, 0)
+BARYCENTRIC(sample, 0, 0)
+/* src[] = { sample_id }. const_index[] = { interp_mode } */
+BARYCENTRIC(at_sample, 1, 1)
+/* src[] = { offset.xy }. const_index[] = { interp_mode } */
+BARYCENTRIC(at_offset, 1, 2)
/*
* Load operations pull data from some piece of GPU memory. All load
* of the start of the variable being loaded and and the offset source is a
* offset into that variable.
*
- * Uniform load operations have a second index that specifies the size of the
- * variable being loaded. If const_index[1] == 0, then the size is unknown.
+ * Uniform load operations have a second "range" index that specifies the
+ * range (starting at base) of the data from which we are loading. If
+ * const_index[1] == 0, then the range is unknown.
*
* Some load operations such as UBO/SSBO load and per_vertex loads take an
* additional source to specify which UBO/SSBO/vertex to load from.
INTRINSIC(load_##name, srcs, ARR(1, 1, 1, 1), true, 0, 0, num_indices, idx0, idx1, idx2, flags)
/* src[] = { offset }. const_index[] = { base, range } */
-LOAD(uniform, 1, 2, BASE, RANGE, xx,
- NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
+LOAD(uniform, 1, 2, BASE, RANGE, xx, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
/* src[] = { buffer_index, offset }. No const_index */
LOAD(ubo, 2, 0, xx, xx, xx, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
-/* src[] = { offset }. const_index[] = { base } */
-LOAD(input, 1, 1, BASE, xx, xx, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
-/* src[] = { vertex, offset }. const_index[] = { base } */
-LOAD(per_vertex_input, 2, 1, BASE, xx, xx, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
+/* src[] = { offset }. const_index[] = { base, component } */
+LOAD(input, 1, 2, BASE, COMPONENT, xx, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
+/* src[] = { vertex, offset }. const_index[] = { base, component } */
+LOAD(per_vertex_input, 2, 2, BASE, COMPONENT, xx, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
+/* src[] = { barycoord, offset }. const_index[] = { base, component } */
+INTRINSIC(load_interpolated_input, 2, ARR(2, 1), true, 0, 0,
+ 2, BASE, COMPONENT, xx,
+ NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
+
/* src[] = { buffer_index, offset }. No const_index */
LOAD(ssbo, 2, 0, xx, xx, xx, NIR_INTRINSIC_CAN_ELIMINATE)
-/* src[] = { offset }. const_index[] = { base } */
-LOAD(output, 1, 1, BASE, xx, xx, NIR_INTRINSIC_CAN_ELIMINATE)
-/* src[] = { vertex, offset }. const_index[] = { base } */
-LOAD(per_vertex_output, 2, 1, BASE, xx, xx, NIR_INTRINSIC_CAN_ELIMINATE)
+/* src[] = { offset }. const_index[] = { base, component } */
+LOAD(output, 1, 2, BASE, COMPONENT, xx, NIR_INTRINSIC_CAN_ELIMINATE)
+/* src[] = { vertex, offset }. const_index[] = { base, component } */
+LOAD(per_vertex_output, 2, 1, BASE, COMPONENT, xx, NIR_INTRINSIC_CAN_ELIMINATE)
/* src[] = { offset }. const_index[] = { base } */
LOAD(shared, 1, 1, BASE, xx, xx, NIR_INTRINSIC_CAN_ELIMINATE)
/* src[] = { offset }. const_index[] = { base, range } */
#define STORE(name, srcs, num_indices, idx0, idx1, idx2, flags) \
INTRINSIC(store_##name, srcs, ARR(0, 1, 1, 1), false, 0, 0, num_indices, idx0, idx1, idx2, flags)
-/* src[] = { value, offset }. const_index[] = { base, write_mask } */
-STORE(output, 2, 2, BASE, WRMASK, xx, 0)
-/* src[] = { value, vertex, offset }. const_index[] = { base, write_mask } */
-STORE(per_vertex_output, 3, 2, BASE, WRMASK, xx, 0)
+/* src[] = { value, offset }. const_index[] = { base, write_mask, component } */
+STORE(output, 2, 3, BASE, WRMASK, COMPONENT, 0)
+/* src[] = { value, vertex, offset }.
+ * const_index[] = { base, write_mask, component }
+ */
+STORE(per_vertex_output, 3, 3, BASE, WRMASK, COMPONENT, 0)
/* src[] = { value, block_index, offset }. const_index[] = { write_mask } */
STORE(ssbo, 3, 1, WRMASK, xx, xx, 0)
/* src[] = { value, offset }. const_index[] = { base, write_mask } */
STORE(shared, 2, 2, BASE, WRMASK, xx, 0)
LAST_INTRINSIC(store_shared)
+
+#undef DEFINE_SYSTEM_VALUE
+#undef INTRINSIC
+#undef LAST_INTRINSIC