IMAGE_ARRAY = "NIR_INTRINSIC_IMAGE_ARRAY"
# Access qualifiers for image and memory access intrinsics
ACCESS = "NIR_INTRINSIC_ACCESS"
+DST_ACCESS = "NIR_INTRINSIC_DST_ACCESS"
+SRC_ACCESS = "NIR_INTRINSIC_SRC_ACCESS"
# Image format for image intrinsics
FORMAT = "NIR_INTRINSIC_FORMAT"
# Offset or address alignment
ALIGN_OFFSET = "NIR_INTRINSIC_ALIGN_OFFSET"
# The vulkan descriptor type for vulkan_resource_index
DESC_TYPE = "NIR_INTRINSIC_DESC_TYPE"
+# The nir_alu_type of a uniform/input/output
+TYPE = "NIR_INTRINSIC_TYPE"
+# The swizzle mask for quad_swizzle_amd & masked_swizzle_amd
+SWIZZLE_MASK = "NIR_INTRINSIC_SWIZZLE_MASK"
+# Driver location of attribute
+DRIVER_LOCATION = "NIR_INTRINSIC_DRIVER_LOCATION"
+# Ordering and visibility of a memory operation
+MEMORY_SEMANTICS = "NIR_INTRINSIC_MEMORY_SEMANTICS"
+# Modes affected by a memory operation
+MEMORY_MODES = "NIR_INTRINSIC_MEMORY_MODES"
+# Scope of a memory operation
+MEMORY_SCOPE = "NIR_INTRINSIC_MEMORY_SCOPE"
#
# Possible flags:
INTR_OPCODES = {}
+# Defines a new NIR intrinsic. By default, the intrinsic will have no sources
+# and no destination.
+#
+# You can set dest_comp=n to enable a destination for the intrinsic, in which
+# case it will have that many components, or =0 for "as many components as the
+# NIR destination value."
+#
+# Set src_comp=n to enable sources for the intruction. It can be an array of
+# component counts, or (for convenience) a scalar component count if there's
+# only one source. If a component count is 0, it will be as many components as
+# the intrinsic has based on the dest_comp.
def intrinsic(name, src_comp=[], dest_comp=-1, indices=[],
flags=[], sysval=False, bit_sizes=[]):
assert name not in INTR_OPCODES
intrinsic("load_deref", dest_comp=0, src_comp=[-1],
indices=[ACCESS], flags=[CAN_ELIMINATE])
intrinsic("store_deref", src_comp=[-1, 0], indices=[WRMASK, ACCESS])
-intrinsic("copy_deref", src_comp=[-1, -1])
+intrinsic("copy_deref", src_comp=[-1, -1], indices=[DST_ACCESS, SRC_ACCESS])
# Interpolation of input. The interp_deref_at* intrinsics are similar to the
# load_var intrinsic acting on a shader input except that they interpolate the
barrier("barrier")
barrier("discard")
+# Demote fragment shader invocation to a helper invocation. Any stores to
+# memory after this instruction are suppressed and the fragment does not write
+# outputs to the framebuffer. Unlike discard, demote needs to ensure that
+# derivatives will still work for invocations that were not demoted.
+#
+# As specified by SPV_EXT_demote_to_helper_invocation.
+barrier("demote")
+intrinsic("is_helper_invocation", dest_comp=1, flags=[CAN_ELIMINATE])
+
+
# Memory barrier with semantics analogous to the memoryBarrier() GLSL
# intrinsic.
barrier("memory_barrier")
+# Memory barrier with explicit scope. Follows the semantics of SPIR-V
+# OpMemoryBarrier, used to implement Vulkan Memory Model. Storage that the
+# barrierr applies is represented using NIR variable modes.
+intrinsic("scoped_memory_barrier",
+ indices=[MEMORY_SEMANTICS, MEMORY_MODES, MEMORY_SCOPE])
+
# Shader clock intrinsic with semantics analogous to the clock2x32ARB()
# GLSL intrinsic.
# The latter can be used as code motion barrier, which is currently not
barrier("begin_invocation_interlock")
barrier("end_invocation_interlock")
-# A conditional discard, with a single boolean source.
+# A conditional discard/demote, with a single boolean source.
intrinsic("discard_if", src_comp=[1])
+intrinsic("demote_if", src_comp=[1])
# ARB_shader_group_vote intrinsics
intrinsic("vote_any", src_comp=[1], dest_comp=1, flags=[CAN_ELIMINATE])
intrinsic("exclusive_scan", src_comp=[0], dest_comp=0, indices=[REDUCTION_OP],
flags=[CAN_ELIMINATE])
+# AMD shader ballot operations
+intrinsic("quad_swizzle_amd", src_comp=[0], dest_comp=0, indices=[SWIZZLE_MASK],
+ flags=[CAN_ELIMINATE])
+intrinsic("masked_swizzle_amd", src_comp=[0], dest_comp=0, indices=[SWIZZLE_MASK],
+ flags=[CAN_ELIMINATE])
+intrinsic("write_invocation_amd", src_comp=[0, 0, 1], dest_comp=0, flags=[CAN_ELIMINATE])
+intrinsic("mbcnt_amd", src_comp=[1], dest_comp=1, flags=[CAN_ELIMINATE])
+
# Basic Geometry Shader intrinsics.
#
# emit_vertex implements GLSL's EmitStreamVertex() built-in. It takes a single
# Image load, store and atomic intrinsics.
#
-# All image intrinsics come in two versions. One which take an image target
-# passed as a deref chain as the first source and one which takes an index or
-# handle as the first source. In the first version, the image variable
-# contains the memory and layout qualifiers that influence the semantics of
-# the intrinsic. In the second, the image format and access qualifiers are
-# provided as constant indices.
+# All image intrinsics come in three versions. One which take an image target
+# passed as a deref chain as the first source, one which takes an index as the
+# first source, and one which takes a bindless handle as the first source.
+# In the first version, the image variable contains the memory and layout
+# qualifiers that influence the semantics of the intrinsic. In the second and
+# third, the image format and access qualifiers are provided as constant
+# indices.
#
# All image intrinsics take a four-coordinate vector and a sample index as
# 2nd and 3rd sources, determining the location within the image that will be
# either one or two additional scalar arguments with the same meaning as in
# the ARB_shader_image_load_store specification.
def image(name, src_comp=[], **kwargs):
- intrinsic("image_deref_" + name, src_comp=[1] + src_comp, **kwargs)
+ intrinsic("image_deref_" + name, src_comp=[1] + src_comp,
+ indices=[ACCESS], **kwargs)
intrinsic("image_" + name, src_comp=[1] + src_comp,
indices=[IMAGE_DIM, IMAGE_ARRAY, FORMAT, ACCESS], **kwargs)
+ intrinsic("bindless_image_" + name, src_comp=[1] + src_comp,
+ indices=[IMAGE_DIM, IMAGE_ARRAY, FORMAT, ACCESS], **kwargs)
image("load", src_comp=[4, 1], dest_comp=0, flags=[CAN_ELIMINATE])
image("store", src_comp=[4, 1, 0])
image("atomic_add", src_comp=[4, 1, 1], dest_comp=1)
-image("atomic_min", src_comp=[4, 1, 1], dest_comp=1)
-image("atomic_max", src_comp=[4, 1, 1], dest_comp=1)
+image("atomic_imin", src_comp=[4, 1, 1], dest_comp=1)
+image("atomic_umin", src_comp=[4, 1, 1], dest_comp=1)
+image("atomic_imax", src_comp=[4, 1, 1], dest_comp=1)
+image("atomic_umax", src_comp=[4, 1, 1], dest_comp=1)
image("atomic_and", src_comp=[4, 1, 1], dest_comp=1)
image("atomic_or", src_comp=[4, 1, 1], dest_comp=1)
image("atomic_xor", src_comp=[4, 1, 1], dest_comp=1)
image("atomic_fadd", src_comp=[1, 4, 1, 1], dest_comp=1)
image("size", dest_comp=0, flags=[CAN_ELIMINATE, CAN_REORDER])
image("samples", dest_comp=1, flags=[CAN_ELIMINATE, CAN_REORDER])
+image("atomic_inc_wrap", src_comp=[4, 1, 1], dest_comp=1)
+image("atomic_dec_wrap", src_comp=[4, 1, 1], dest_comp=1)
# Intel-specific query for loading from the brw_image_param struct passed
# into the shader as a uniform. The variable is a deref to the image
# 1: The data parameter to the atomic function (i.e. the value to add
# in shared_atomic_add, etc).
# 2: For CompSwap only: the second data parameter.
-intrinsic("deref_atomic_add", src_comp=[-1, 1], dest_comp=1)
-intrinsic("deref_atomic_imin", src_comp=[-1, 1], dest_comp=1)
-intrinsic("deref_atomic_umin", src_comp=[-1, 1], dest_comp=1)
-intrinsic("deref_atomic_imax", src_comp=[-1, 1], dest_comp=1)
-intrinsic("deref_atomic_umax", src_comp=[-1, 1], dest_comp=1)
-intrinsic("deref_atomic_and", src_comp=[-1, 1], dest_comp=1)
-intrinsic("deref_atomic_or", src_comp=[-1, 1], dest_comp=1)
-intrinsic("deref_atomic_xor", src_comp=[-1, 1], dest_comp=1)
-intrinsic("deref_atomic_exchange", src_comp=[-1, 1], dest_comp=1)
-intrinsic("deref_atomic_comp_swap", src_comp=[-1, 1, 1], dest_comp=1)
-intrinsic("deref_atomic_fadd", src_comp=[-1, 1], dest_comp=1)
-intrinsic("deref_atomic_fmin", src_comp=[-1, 1], dest_comp=1)
-intrinsic("deref_atomic_fmax", src_comp=[-1, 1], dest_comp=1)
-intrinsic("deref_atomic_fcomp_swap", src_comp=[-1, 1, 1], dest_comp=1)
+intrinsic("deref_atomic_add", src_comp=[-1, 1], dest_comp=1, indices=[ACCESS])
+intrinsic("deref_atomic_imin", src_comp=[-1, 1], dest_comp=1, indices=[ACCESS])
+intrinsic("deref_atomic_umin", src_comp=[-1, 1], dest_comp=1, indices=[ACCESS])
+intrinsic("deref_atomic_imax", src_comp=[-1, 1], dest_comp=1, indices=[ACCESS])
+intrinsic("deref_atomic_umax", src_comp=[-1, 1], dest_comp=1, indices=[ACCESS])
+intrinsic("deref_atomic_and", src_comp=[-1, 1], dest_comp=1, indices=[ACCESS])
+intrinsic("deref_atomic_or", src_comp=[-1, 1], dest_comp=1, indices=[ACCESS])
+intrinsic("deref_atomic_xor", src_comp=[-1, 1], dest_comp=1, indices=[ACCESS])
+intrinsic("deref_atomic_exchange", src_comp=[-1, 1], dest_comp=1, indices=[ACCESS])
+intrinsic("deref_atomic_comp_swap", src_comp=[-1, 1, 1], dest_comp=1, indices=[ACCESS])
+intrinsic("deref_atomic_fadd", src_comp=[-1, 1], dest_comp=1, indices=[ACCESS])
+intrinsic("deref_atomic_fmin", src_comp=[-1, 1], dest_comp=1, indices=[ACCESS])
+intrinsic("deref_atomic_fmax", src_comp=[-1, 1], dest_comp=1, indices=[ACCESS])
+intrinsic("deref_atomic_fcomp_swap", src_comp=[-1, 1, 1], dest_comp=1, indices=[ACCESS])
# SSBO atomic intrinsics
#
# 2: The data parameter to the atomic function (i.e. the value to add
# in ssbo_atomic_add, etc).
# 3: For CompSwap only: the second data parameter.
-intrinsic("ssbo_atomic_add", src_comp=[1, 1, 1], dest_comp=1)
-intrinsic("ssbo_atomic_imin", src_comp=[1, 1, 1], dest_comp=1)
-intrinsic("ssbo_atomic_umin", src_comp=[1, 1, 1], dest_comp=1)
-intrinsic("ssbo_atomic_imax", src_comp=[1, 1, 1], dest_comp=1)
-intrinsic("ssbo_atomic_umax", src_comp=[1, 1, 1], dest_comp=1)
-intrinsic("ssbo_atomic_and", src_comp=[1, 1, 1], dest_comp=1)
-intrinsic("ssbo_atomic_or", src_comp=[1, 1, 1], dest_comp=1)
-intrinsic("ssbo_atomic_xor", src_comp=[1, 1, 1], dest_comp=1)
-intrinsic("ssbo_atomic_exchange", src_comp=[1, 1, 1], dest_comp=1)
-intrinsic("ssbo_atomic_comp_swap", src_comp=[1, 1, 1, 1], dest_comp=1)
-intrinsic("ssbo_atomic_fadd", src_comp=[1, 1, 1], dest_comp=1)
-intrinsic("ssbo_atomic_fmin", src_comp=[1, 1, 1], dest_comp=1)
-intrinsic("ssbo_atomic_fmax", src_comp=[1, 1, 1], dest_comp=1)
-intrinsic("ssbo_atomic_fcomp_swap", src_comp=[1, 1, 1, 1], dest_comp=1)
+intrinsic("ssbo_atomic_add", src_comp=[1, 1, 1], dest_comp=1, indices=[ACCESS])
+intrinsic("ssbo_atomic_imin", src_comp=[1, 1, 1], dest_comp=1, indices=[ACCESS])
+intrinsic("ssbo_atomic_umin", src_comp=[1, 1, 1], dest_comp=1, indices=[ACCESS])
+intrinsic("ssbo_atomic_imax", src_comp=[1, 1, 1], dest_comp=1, indices=[ACCESS])
+intrinsic("ssbo_atomic_umax", src_comp=[1, 1, 1], dest_comp=1, indices=[ACCESS])
+intrinsic("ssbo_atomic_and", src_comp=[1, 1, 1], dest_comp=1, indices=[ACCESS])
+intrinsic("ssbo_atomic_or", src_comp=[1, 1, 1], dest_comp=1, indices=[ACCESS])
+intrinsic("ssbo_atomic_xor", src_comp=[1, 1, 1], dest_comp=1, indices=[ACCESS])
+intrinsic("ssbo_atomic_exchange", src_comp=[1, 1, 1], dest_comp=1, indices=[ACCESS])
+intrinsic("ssbo_atomic_comp_swap", src_comp=[1, 1, 1, 1], dest_comp=1, indices=[ACCESS])
+intrinsic("ssbo_atomic_fadd", src_comp=[1, 1, 1], dest_comp=1, indices=[ACCESS])
+intrinsic("ssbo_atomic_fmin", src_comp=[1, 1, 1], dest_comp=1, indices=[ACCESS])
+intrinsic("ssbo_atomic_fmax", src_comp=[1, 1, 1], dest_comp=1, indices=[ACCESS])
+intrinsic("ssbo_atomic_fcomp_swap", src_comp=[1, 1, 1, 1], dest_comp=1, indices=[ACCESS])
# CS shared variable atomic intrinsics
#
bit_sizes=bit_sizes)
system_value("frag_coord", 4)
+system_value("point_coord", 2)
system_value("front_face", 1, bit_sizes=[1, 32])
system_value("vertex_id", 1)
system_value("vertex_id_zero_base", 1)
system_value("tess_coord", 3)
system_value("tess_level_outer", 4)
system_value("tess_level_inner", 2)
+system_value("tess_level_outer_default", 4)
+system_value("tess_level_inner_default", 2)
system_value("patch_vertices_in", 1)
system_value("local_invocation_id", 3)
system_value("local_invocation_index", 1)
# VC4 and V3D need to emit a scaled version of the position in the vertex
# shaders for binning, and having system values lets us move the math for that
# into NIR.
+#
+# Panfrost needs to implement all coordinate transformation in the
+# vertex shader; system values allow us to share this routine in NIR.
system_value("viewport_x_scale", 1)
system_value("viewport_y_scale", 1)
system_value("viewport_z_scale", 1)
system_value("viewport_z_offset", 1)
+system_value("viewport_scale", 3)
+system_value("viewport_offset", 3)
+
+# Blend constant color values. Float values are clamped. Vectored versions are
+# provided as well for driver convenience
-# Blend constant color values. Float values are clamped.#
system_value("blend_const_color_r_float", 1)
system_value("blend_const_color_g_float", 1)
system_value("blend_const_color_b_float", 1)
system_value("blend_const_color_a_float", 1)
+system_value("blend_const_color_rgba", 4)
system_value("blend_const_color_rgba8888_unorm", 1)
system_value("blend_const_color_aaaa8888_unorm", 1)
+# System values for gl_Color, for radeonsi which interpolates these in the
+# shader prolog to handle two-sided color without recompiles and therefore
+# doesn't handle these in the main shader part like normal varyings.
+system_value("color0", 4)
+system_value("color1", 4)
+
+# System value for internal compute shaders in radeonsi.
+system_value("user_data_amd", 4)
+
# Barycentric coordinate intrinsics.
#
# These set up the barycentric coordinates for a particular interpolation.
intrinsic("load_barycentric_" + name, src_comp=src_comp, dest_comp=2,
indices=[INTERP_MODE], flags=[CAN_ELIMINATE, CAN_REORDER])
-# no sources. const_index[] = { interp_mode }
+# no sources.
barycentric("pixel")
barycentric("centroid")
barycentric("sample")
-# src[] = { sample_id }. const_index[] = { interp_mode }
+# src[] = { sample_id }.
barycentric("at_sample", [1])
-# src[] = { offset.xy }. const_index[] = { interp_mode }
+# src[] = { offset.xy }.
barycentric("at_offset", [2])
+# Load sample position:
+#
+# Takes a sample # and returns a sample position. Used for lowering
+# interpolateAtSample() to interpolateAtOffset()
+intrinsic("load_sample_pos_from_id", src_comp=[1], dest_comp=2,
+ flags=[CAN_ELIMINATE, CAN_REORDER])
+
+# Loads what I believe is the primitive size, for scaling ij to pixel size:
+intrinsic("load_size_ir3", dest_comp=1, flags=[CAN_ELIMINATE, CAN_REORDER])
+
+# Fragment shader input interpolation delta intrinsic.
+#
+# For hw where fragment shader input interpolation is handled in shader, the
+# load_fs_input_interp deltas intrinsics can be used to load the input deltas
+# used for interpolation as follows:
+#
+# vec3 iid = load_fs_input_interp_deltas(varying_slot)
+# vec2 bary = load_barycentric_*(...)
+# float result = iid.x + iid.y * bary.y + iid.z * bary.x
+
+intrinsic("load_fs_input_interp_deltas", src_comp=[1], dest_comp=3,
+ indices=[BASE, COMPONENT], flags=[CAN_ELIMINATE, CAN_REORDER])
+
# Load operations pull data from some piece of GPU memory. All load
# operations operate in terms of offsets into some piece of theoretical
# memory. Loads from externally visible memory (UBO and SSBO) simply take a
# byte offset as a source. Loads from opaque memory (uniforms, inputs, etc.)
-# take a base+offset pair where the base (const_index[0]) gives the location
+# take a base+offset pair where the nir_intrinsic_base() gives the location
# of the start of the variable being loaded and and the offset source is a
# offset into that variable.
#
-# Uniform load operations have a second "range" index that specifies the
+# Uniform load operations have a nir_intrinsic_range() index that specifies the
# range (starting at base) of the data from which we are loading. If
-# const_index[1] == 0, then the range is unknown.
+# range == 0, then the range is unknown.
#
# Some load operations such as UBO/SSBO load and per_vertex loads take an
# additional source to specify which UBO/SSBO/vertex to load from.
intrinsic("load_" + name, [1] * num_srcs, dest_comp=0, indices=indices,
flags=flags)
-# src[] = { offset }. const_index[] = { base, range }
-load("uniform", 1, [BASE, RANGE], [CAN_ELIMINATE, CAN_REORDER])
-# src[] = { buffer_index, offset }. const_index[] = { align_mul, align_offset }
-load("ubo", 2, [ALIGN_MUL, ALIGN_OFFSET], flags=[CAN_ELIMINATE, CAN_REORDER])
-# src[] = { offset }. const_index[] = { base, component }
-load("input", 1, [BASE, COMPONENT], [CAN_ELIMINATE, CAN_REORDER])
-# src[] = { vertex, offset }. const_index[] = { base, component }
+# src[] = { offset }.
+load("uniform", 1, [BASE, RANGE, TYPE], [CAN_ELIMINATE, CAN_REORDER])
+# src[] = { buffer_index, offset }.
+load("ubo", 2, [ACCESS, ALIGN_MUL, ALIGN_OFFSET], flags=[CAN_ELIMINATE, CAN_REORDER])
+# src[] = { offset }.
+load("input", 1, [BASE, COMPONENT, TYPE], [CAN_ELIMINATE, CAN_REORDER])
+# src[] = { vertex, offset }.
load("per_vertex_input", 2, [BASE, COMPONENT], [CAN_ELIMINATE, CAN_REORDER])
-# src[] = { barycoord, offset }. const_index[] = { base, component }
+# src[] = { barycoord, offset }.
intrinsic("load_interpolated_input", src_comp=[2, 1], dest_comp=0,
indices=[BASE, COMPONENT], flags=[CAN_ELIMINATE, CAN_REORDER])
# src[] = { buffer_index, offset }.
-# const_index[] = { access, align_mul, align_offset }
load("ssbo", 2, [ACCESS, ALIGN_MUL, ALIGN_OFFSET], [CAN_ELIMINATE])
-# src[] = { offset }. const_index[] = { base, component }
+# src[] = { offset }.
load("output", 1, [BASE, COMPONENT], flags=[CAN_ELIMINATE])
-# src[] = { vertex, offset }. const_index[] = { base }
+# src[] = { vertex, offset }.
load("per_vertex_output", 2, [BASE, COMPONENT], [CAN_ELIMINATE])
-# src[] = { offset }. const_index[] = { base, align_mul, align_offset }
+# src[] = { offset }.
load("shared", 1, [BASE, ALIGN_MUL, ALIGN_OFFSET], [CAN_ELIMINATE])
-# src[] = { offset }. const_index[] = { base, range }
+# src[] = { offset }.
load("push_constant", 1, [BASE, RANGE], [CAN_ELIMINATE, CAN_REORDER])
-# src[] = { offset }. const_index[] = { base, range }
+# src[] = { offset }.
load("constant", 1, [BASE, RANGE], [CAN_ELIMINATE, CAN_REORDER])
# src[] = { address }.
-# const_index[] = { access, align_mul, align_offset }
load("global", 1, [ACCESS, ALIGN_MUL, ALIGN_OFFSET], [CAN_ELIMINATE])
-# src[] = { address }. const_index[] = { base, range, align_mul, align_offset }
+# src[] = { address }.
load("kernel_input", 1, [BASE, RANGE, ALIGN_MUL, ALIGN_OFFSET], [CAN_ELIMINATE, CAN_REORDER])
+# src[] = { offset }.
+load("scratch", 1, [ALIGN_MUL, ALIGN_OFFSET], [CAN_ELIMINATE])
# Stores work the same way as loads, except now the first source is the value
# to store and the second (and possibly third) source specify where to store
-# the value. SSBO and shared memory stores also have a write mask as
-# const_index[0].
+# the value. SSBO and shared memory stores also have a
+# nir_intrinsic_write_mask()
def store(name, num_srcs, indices=[], flags=[]):
intrinsic("store_" + name, [0] + ([1] * (num_srcs - 1)), indices=indices, flags=flags)
-# src[] = { value, offset }. const_index[] = { base, write_mask, component }
-store("output", 2, [BASE, WRMASK, COMPONENT])
+# src[] = { value, offset }.
+store("output", 2, [BASE, WRMASK, COMPONENT, TYPE])
# src[] = { value, vertex, offset }.
-# const_index[] = { base, write_mask, component }
store("per_vertex_output", 3, [BASE, WRMASK, COMPONENT])
# src[] = { value, block_index, offset }
-# const_index[] = { write_mask, access, align_mul, align_offset }
store("ssbo", 3, [WRMASK, ACCESS, ALIGN_MUL, ALIGN_OFFSET])
# src[] = { value, offset }.
-# const_index[] = { base, write_mask, align_mul, align_offset }
store("shared", 2, [BASE, WRMASK, ALIGN_MUL, ALIGN_OFFSET])
# src[] = { value, address }.
-# const_index[] = { write_mask, align_mul, align_offset }
store("global", 2, [WRMASK, ACCESS, ALIGN_MUL, ALIGN_OFFSET])
-
+# src[] = { value, offset }.
+store("scratch", 2, [ALIGN_MUL, ALIGN_OFFSET, WRMASK])
# IR3-specific version of most SSBO intrinsics. The only different
# compare to the originals is that they add an extra source to hold
intrinsic("ssbo_atomic_xor_ir3", src_comp=[1, 1, 1, 1], dest_comp=1)
intrinsic("ssbo_atomic_exchange_ir3", src_comp=[1, 1, 1, 1], dest_comp=1)
intrinsic("ssbo_atomic_comp_swap_ir3", src_comp=[1, 1, 1, 1, 1], dest_comp=1)
+
+# System values for freedreno geometry shaders.
+system_value("vs_primitive_stride_ir3", 1)
+system_value("vs_vertex_stride_ir3", 1)
+system_value("gs_header_ir3", 1)
+system_value("primitive_location_ir3", 1, indices=[DRIVER_LOCATION])
+
+# System values for freedreno tessellation shaders.
+system_value("hs_patch_stride_ir3", 1)
+system_value("tess_factor_base_ir3", 2)
+system_value("tess_param_base_ir3", 2)
+system_value("tcs_header_ir3", 1)
+
+# IR3-specific intrinsics for tessellation control shaders. cond_end_ir3 end
+# the shader when src0 is false and is used to narrow down the TCS shader to
+# just thread 0 before writing out tessellation levels.
+intrinsic("cond_end_ir3", src_comp=[1])
+# end_patch_ir3 is used just before thread 0 exist the TCS and presumably
+# signals the TE that the patch is complete and can be tessellated.
+intrinsic("end_patch_ir3")
+
+# IR3-specific load/store intrinsics. These access a buffer used to pass data
+# between geometry stages - perhaps it's explicit access to the vertex cache.
+
+# src[] = { value, offset }.
+store("shared_ir3", 2, [BASE, WRMASK, ALIGN_MUL, ALIGN_OFFSET])
+# src[] = { offset }.
+load("shared_ir3", 1, [BASE, ALIGN_MUL, ALIGN_OFFSET], [CAN_ELIMINATE])
+
+# IR3-specific load/store global intrinsics. They take a 64-bit base address
+# and a 32-bit offset. The hardware will add the base and the offset, which
+# saves us from doing 64-bit math on the base address.
+
+# src[] = { value, address(vec2 of hi+lo uint32_t), offset }.
+# const_index[] = { write_mask, align_mul, align_offset }
+intrinsic("store_global_ir3", [0, 2, 1], indices=[WRMASK, ACCESS, ALIGN_MUL, ALIGN_OFFSET])
+# src[] = { address(vec2 of hi+lo uint32_t), offset }.
+# const_index[] = { access, align_mul, align_offset }
+intrinsic("load_global_ir3", [2, 1], dest_comp=0, indices=[ACCESS, ALIGN_MUL, ALIGN_OFFSET], flags=[CAN_ELIMINATE])
+
+# Intrinsics used by the Midgard/Bifrost blend pipeline. These are defined
+# within a blend shader to read/write the raw value from the tile buffer,
+# without applying any format conversion in the process. If the shader needs
+# usable pixel values, it must apply format conversions itself.
+#
+# These definitions are generic, but they are explicitly vendored to prevent
+# other drivers from using them, as their semantics is defined in terms of the
+# Midgard/Bifrost hardware tile buffer and may not line up with anything sane.
+# One notable divergence is sRGB, which is asymmetric: raw_input_pan requires
+# an sRGB->linear conversion, but linear values should be written to
+# raw_output_pan and the hardware handles linear->sRGB.
+#
+# We also have format-specific Midgard intrinsics. There are rather
+# here-be-dragons. load_output_u8_as_fp16_pan does the equivalent of
+# load_raw_out_pan on an RGBA8 UNORM framebuffer followed by u2u16 -> fp16 ->
+# division by 255.
+
+# src[] = { value }
+store("raw_output_pan", 1, [])
+load("raw_output_pan", 0, [], [CAN_ELIMINATE, CAN_REORDER])
+load("output_u8_as_fp16_pan", 0, [], [CAN_ELIMINATE, CAN_REORDER])
+
+# Loads the sampler paramaters <min_lod, max_lod, lod_bias>
+# src[] = { sampler_index }
+load("sampler_lod_parameters_pan", 1, [CAN_ELIMINATE, CAN_REORDER])
+
+# V3D-specific instrinc for tile buffer color reads.
+#
+# The hardware requires that we read the samples and components of a pixel
+# in order, so we cannot eliminate or remove any loads in a sequence.
+#
+# src[] = { render_target }
+# BASE = sample index
+load("tlb_color_v3d", 1, [BASE, COMPONENT], [])
+
+# V3D-specific instrinc for per-sample tile buffer color writes.
+#
+# The driver backend needs to identify per-sample color writes and emit
+# specific code for them.
+#
+# src[] = { value, render_target }
+# BASE = sample index
+store("tlb_sample_color_v3d", 2, [BASE, COMPONENT, TYPE], [])