X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fcompiler%2Fnir%2Fnir_intrinsics.py;h=33012d4bb01d17b1fcc3022c7b2a0060e49cb565;hb=f1dc4c9554ce913acf4d3236b4d43b829fb92073;hp=0ae33a17fe062a58c6f63858aac1f3f9ced807cc;hpb=c12750527b7c4d1f4265ffa2346fd5c2346f41cd;p=mesa.git diff --git a/src/compiler/nir/nir_intrinsics.py b/src/compiler/nir/nir_intrinsics.py index 0ae33a17fe0..33012d4bb01 100644 --- a/src/compiler/nir/nir_intrinsics.py +++ b/src/compiler/nir/nir_intrinsics.py @@ -111,6 +111,8 @@ IMAGE_DIM = "NIR_INTRINSIC_IMAGE_DIM" IMAGE_ARRAY = "NIR_INTRINSIC_IMAGE_ARRAY" # Access qualifiers for image and memory access intrinsics ACCESS = "NIR_INTRINSIC_ACCESS" +DST_ACCESS = "NIR_INTRINSIC_DST_ACCESS" +SRC_ACCESS = "NIR_INTRINSIC_SRC_ACCESS" # Image format for image intrinsics FORMAT = "NIR_INTRINSIC_FORMAT" # Offset or address alignment @@ -120,6 +122,16 @@ ALIGN_OFFSET = "NIR_INTRINSIC_ALIGN_OFFSET" DESC_TYPE = "NIR_INTRINSIC_DESC_TYPE" # The nir_alu_type of a uniform/input/output TYPE = "NIR_INTRINSIC_TYPE" +# The swizzle mask for quad_swizzle_amd & masked_swizzle_amd +SWIZZLE_MASK = "NIR_INTRINSIC_SWIZZLE_MASK" +# Driver location of attribute +DRIVER_LOCATION = "NIR_INTRINSIC_DRIVER_LOCATION" +# Ordering and visibility of a memory operation +MEMORY_SEMANTICS = "NIR_INTRINSIC_MEMORY_SEMANTICS" +# Modes affected by a memory operation +MEMORY_MODES = "NIR_INTRINSIC_MEMORY_MODES" +# Scope of a memory operation +MEMORY_SCOPE = "NIR_INTRINSIC_MEMORY_SCOPE" # # Possible flags: @@ -154,13 +166,13 @@ intrinsic("load_param", dest_comp=0, indices=[PARAM_IDX], flags=[CAN_ELIMINATE]) intrinsic("load_deref", dest_comp=0, src_comp=[-1], indices=[ACCESS], flags=[CAN_ELIMINATE]) intrinsic("store_deref", src_comp=[-1, 0], indices=[WRMASK, ACCESS]) -intrinsic("copy_deref", src_comp=[-1, -1]) +intrinsic("copy_deref", src_comp=[-1, -1], indices=[DST_ACCESS, SRC_ACCESS]) # Interpolation of input. The interp_deref_at* intrinsics are similar to the # load_var intrinsic acting on a shader input except that they interpolate the -# input differently. The at_sample and at_offset intrinsics take an -# additional source that is an integer sample id or a vec2 position offset -# respectively. +# input differently. The at_sample, at_offset and at_vertex intrinsics take an +# additional source that is an integer sample id, a vec2 position offset, or a +# vertex ID respectively. intrinsic("interp_deref_at_centroid", dest_comp=0, src_comp=[1], flags=[ CAN_ELIMINATE, CAN_REORDER]) @@ -168,6 +180,8 @@ intrinsic("interp_deref_at_sample", src_comp=[1, 1], dest_comp=0, flags=[CAN_ELIMINATE, CAN_REORDER]) intrinsic("interp_deref_at_offset", src_comp=[1, 2], dest_comp=0, flags=[CAN_ELIMINATE, CAN_REORDER]) +intrinsic("interp_deref_at_vertex", src_comp=[1, 1], dest_comp=0, + flags=[CAN_ELIMINATE, CAN_REORDER]) # Gets the length of an unsized array at the end of a buffer intrinsic("deref_buffer_array_length", src_comp=[-1], dest_comp=1, @@ -183,13 +197,34 @@ intrinsic("get_buffer_size", src_comp=[-1], dest_comp=1, def barrier(name): intrinsic(name) -barrier("barrier") barrier("discard") +# Demote fragment shader invocation to a helper invocation. Any stores to +# memory after this instruction are suppressed and the fragment does not write +# outputs to the framebuffer. Unlike discard, demote needs to ensure that +# derivatives will still work for invocations that were not demoted. +# +# As specified by SPV_EXT_demote_to_helper_invocation. +barrier("demote") +intrinsic("is_helper_invocation", dest_comp=1, flags=[CAN_ELIMINATE]) + +# A workgroup-level control barrier. Any thread which hits this barrier will +# pause until all threads within the current workgroup have also hit the +# barrier. For compute shaders, the workgroup is defined as the local group. +# For tessellation control shaders, the workgroup is defined as the current +# patch. This intrinsic does not imply any sort of memory barrier. +barrier("control_barrier") + # Memory barrier with semantics analogous to the memoryBarrier() GLSL # intrinsic. barrier("memory_barrier") +# Memory barrier with explicit scope. Follows the semantics of SPIR-V +# OpMemoryBarrier, used to implement Vulkan Memory Model. Storage that the +# barrierr applies is represented using NIR variable modes. +intrinsic("scoped_memory_barrier", + indices=[MEMORY_SEMANTICS, MEMORY_MODES, MEMORY_SCOPE]) + # Shader clock intrinsic with semantics analogous to the clock2x32ARB() # GLSL intrinsic. # The latter can be used as code motion barrier, which is currently not @@ -227,8 +262,12 @@ barrier("memory_barrier_shared") barrier("begin_invocation_interlock") barrier("end_invocation_interlock") -# A conditional discard, with a single boolean source. +# Memory barrier for synchronizing TCS patch outputs +barrier("memory_barrier_tcs_patch") + +# A conditional discard/demote, with a single boolean source. intrinsic("discard_if", src_comp=[1]) +intrinsic("demote_if", src_comp=[1]) # ARB_shader_group_vote intrinsics intrinsic("vote_any", src_comp=[1], dest_comp=1, flags=[CAN_ELIMINATE]) @@ -267,6 +306,14 @@ intrinsic("inclusive_scan", src_comp=[0], dest_comp=0, indices=[REDUCTION_OP], intrinsic("exclusive_scan", src_comp=[0], dest_comp=0, indices=[REDUCTION_OP], flags=[CAN_ELIMINATE]) +# AMD shader ballot operations +intrinsic("quad_swizzle_amd", src_comp=[0], dest_comp=0, indices=[SWIZZLE_MASK], + flags=[CAN_ELIMINATE]) +intrinsic("masked_swizzle_amd", src_comp=[0], dest_comp=0, indices=[SWIZZLE_MASK], + flags=[CAN_ELIMINATE]) +intrinsic("write_invocation_amd", src_comp=[0, 0, 1], dest_comp=0, flags=[CAN_ELIMINATE]) +intrinsic("mbcnt_amd", src_comp=[1], dest_comp=1, flags=[CAN_ELIMINATE]) + # Basic Geometry Shader intrinsics. # # emit_vertex implements GLSL's EmitStreamVertex() built-in. It takes a single @@ -335,17 +382,20 @@ atomic3("atomic_counter_comp_swap") # either one or two additional scalar arguments with the same meaning as in # the ARB_shader_image_load_store specification. def image(name, src_comp=[], **kwargs): - intrinsic("image_deref_" + name, src_comp=[1] + src_comp, **kwargs) + intrinsic("image_deref_" + name, src_comp=[1] + src_comp, + indices=[ACCESS], **kwargs) intrinsic("image_" + name, src_comp=[1] + src_comp, indices=[IMAGE_DIM, IMAGE_ARRAY, FORMAT, ACCESS], **kwargs) intrinsic("bindless_image_" + name, src_comp=[1] + src_comp, indices=[IMAGE_DIM, IMAGE_ARRAY, FORMAT, ACCESS], **kwargs) -image("load", src_comp=[4, 1], dest_comp=0, flags=[CAN_ELIMINATE]) -image("store", src_comp=[4, 1, 0]) +image("load", src_comp=[4, 1, 1], dest_comp=0, flags=[CAN_ELIMINATE]) +image("store", src_comp=[4, 1, 0, 1]) image("atomic_add", src_comp=[4, 1, 1], dest_comp=1) -image("atomic_min", src_comp=[4, 1, 1], dest_comp=1) -image("atomic_max", src_comp=[4, 1, 1], dest_comp=1) +image("atomic_imin", src_comp=[4, 1, 1], dest_comp=1) +image("atomic_umin", src_comp=[4, 1, 1], dest_comp=1) +image("atomic_imax", src_comp=[4, 1, 1], dest_comp=1) +image("atomic_umax", src_comp=[4, 1, 1], dest_comp=1) image("atomic_and", src_comp=[4, 1, 1], dest_comp=1) image("atomic_or", src_comp=[4, 1, 1], dest_comp=1) image("atomic_xor", src_comp=[4, 1, 1], dest_comp=1) @@ -354,6 +404,8 @@ image("atomic_comp_swap", src_comp=[4, 1, 1, 1], dest_comp=1) image("atomic_fadd", src_comp=[1, 4, 1, 1], dest_comp=1) image("size", dest_comp=0, flags=[CAN_ELIMINATE, CAN_REORDER]) image("samples", dest_comp=1, flags=[CAN_ELIMINATE, CAN_REORDER]) +image("atomic_inc_wrap", src_comp=[4, 1, 1], dest_comp=1) +image("atomic_dec_wrap", src_comp=[4, 1, 1], dest_comp=1) # Intel-specific query for loading from the brw_image_param struct passed # into the shader as a uniform. The variable is a deref to the image @@ -512,6 +564,7 @@ def system_value(name, dest_comp, indices=[], bit_sizes=[32]): bit_sizes=bit_sizes) system_value("frag_coord", 4) +system_value("point_coord", 2) system_value("front_face", 1, bit_sizes=[1, 32]) system_value("vertex_id", 1) system_value("vertex_id_zero_base", 1) @@ -532,6 +585,8 @@ system_value("invocation_id", 1) system_value("tess_coord", 3) system_value("tess_level_outer", 4) system_value("tess_level_inner", 2) +system_value("tess_level_outer_default", 4) +system_value("tess_level_inner_default", 2) system_value("patch_vertices_in", 1) system_value("local_invocation_id", 3) system_value("local_invocation_index", 1) @@ -581,12 +636,22 @@ system_value("blend_const_color_rgba", 4) system_value("blend_const_color_rgba8888_unorm", 1) system_value("blend_const_color_aaaa8888_unorm", 1) +# System values for gl_Color, for radeonsi which interpolates these in the +# shader prolog to handle two-sided color without recompiles and therefore +# doesn't handle these in the main shader part like normal varyings. +system_value("color0", 4) +system_value("color1", 4) + +# System value for internal compute shaders in radeonsi. +system_value("user_data_amd", 4) + # Barycentric coordinate intrinsics. # # These set up the barycentric coordinates for a particular interpolation. -# The first three are for the simple cases: pixel, centroid, or per-sample -# (at gl_SampleID). The next two handle interpolating at a specified -# sample location, or interpolating with a vec2 offset, +# The first four are for the simple cases: pixel, centroid, per-sample +# (at gl_SampleID), or pull model (1/W, 1/I, 1/J) at the pixel center. The next +# three two handle interpolating at a specified sample location, or +# interpolating with a vec2 offset, # # The interp_mode index should be either the INTERP_MODE_SMOOTH or # INTERP_MODE_NOPERSPECTIVE enum values. @@ -594,18 +659,19 @@ system_value("blend_const_color_aaaa8888_unorm", 1) # The vec2 value produced by these intrinsics is intended for use as the # barycoord source of a load_interpolated_input intrinsic. -def barycentric(name, src_comp=[]): - intrinsic("load_barycentric_" + name, src_comp=src_comp, dest_comp=2, +def barycentric(name, dst_comp, src_comp=[]): + intrinsic("load_barycentric_" + name, src_comp=src_comp, dest_comp=dst_comp, indices=[INTERP_MODE], flags=[CAN_ELIMINATE, CAN_REORDER]) # no sources. -barycentric("pixel") -barycentric("centroid") -barycentric("sample") +barycentric("pixel", 2) +barycentric("centroid", 2) +barycentric("sample", 2) +barycentric("model", 3) # src[] = { sample_id }. -barycentric("at_sample", [1]) +barycentric("at_sample", 2, [1]) # src[] = { offset.xy }. -barycentric("at_offset", [2]) +barycentric("at_offset", 2, [2]) # Load sample position: # @@ -617,6 +683,19 @@ intrinsic("load_sample_pos_from_id", src_comp=[1], dest_comp=2, # Loads what I believe is the primitive size, for scaling ij to pixel size: intrinsic("load_size_ir3", dest_comp=1, flags=[CAN_ELIMINATE, CAN_REORDER]) +# Fragment shader input interpolation delta intrinsic. +# +# For hw where fragment shader input interpolation is handled in shader, the +# load_fs_input_interp deltas intrinsics can be used to load the input deltas +# used for interpolation as follows: +# +# vec3 iid = load_fs_input_interp_deltas(varying_slot) +# vec2 bary = load_barycentric_*(...) +# float result = iid.x + iid.y * bary.y + iid.z * bary.x + +intrinsic("load_fs_input_interp_deltas", src_comp=[1], dest_comp=3, + indices=[BASE, COMPONENT], flags=[CAN_ELIMINATE, CAN_REORDER]) + # Load operations pull data from some piece of GPU memory. All load # operations operate in terms of offsets into some piece of theoretical # memory. Loads from externally visible memory (UBO and SSBO) simply take a @@ -647,6 +726,8 @@ load("uniform", 1, [BASE, RANGE, TYPE], [CAN_ELIMINATE, CAN_REORDER]) load("ubo", 2, [ACCESS, ALIGN_MUL, ALIGN_OFFSET], flags=[CAN_ELIMINATE, CAN_REORDER]) # src[] = { offset }. load("input", 1, [BASE, COMPONENT, TYPE], [CAN_ELIMINATE, CAN_REORDER]) +# src[] = { vertex_id, offset }. +load("input_vertex", 2, [BASE, COMPONENT, TYPE], [CAN_ELIMINATE, CAN_REORDER]) # src[] = { vertex, offset }. load("per_vertex_input", 2, [BASE, COMPONENT], [CAN_ELIMINATE, CAN_REORDER]) # src[] = { barycoord, offset }. @@ -718,3 +799,99 @@ intrinsic("ssbo_atomic_or_ir3", src_comp=[1, 1, 1, 1], dest_comp=1) intrinsic("ssbo_atomic_xor_ir3", src_comp=[1, 1, 1, 1], dest_comp=1) intrinsic("ssbo_atomic_exchange_ir3", src_comp=[1, 1, 1, 1], dest_comp=1) intrinsic("ssbo_atomic_comp_swap_ir3", src_comp=[1, 1, 1, 1, 1], dest_comp=1) + +# System values for freedreno geometry shaders. +system_value("vs_primitive_stride_ir3", 1) +system_value("vs_vertex_stride_ir3", 1) +system_value("gs_header_ir3", 1) +system_value("primitive_location_ir3", 1, indices=[DRIVER_LOCATION]) + +# System values for freedreno tessellation shaders. +system_value("hs_patch_stride_ir3", 1) +system_value("tess_factor_base_ir3", 2) +system_value("tess_param_base_ir3", 2) +system_value("tcs_header_ir3", 1) + +# IR3-specific intrinsics for tessellation control shaders. cond_end_ir3 end +# the shader when src0 is false and is used to narrow down the TCS shader to +# just thread 0 before writing out tessellation levels. +intrinsic("cond_end_ir3", src_comp=[1]) +# end_patch_ir3 is used just before thread 0 exist the TCS and presumably +# signals the TE that the patch is complete and can be tessellated. +intrinsic("end_patch_ir3") + +# IR3-specific load/store intrinsics. These access a buffer used to pass data +# between geometry stages - perhaps it's explicit access to the vertex cache. + +# src[] = { value, offset }. +store("shared_ir3", 2, [BASE, WRMASK, ALIGN_MUL, ALIGN_OFFSET]) +# src[] = { offset }. +load("shared_ir3", 1, [BASE, ALIGN_MUL, ALIGN_OFFSET], [CAN_ELIMINATE]) + +# IR3-specific load/store global intrinsics. They take a 64-bit base address +# and a 32-bit offset. The hardware will add the base and the offset, which +# saves us from doing 64-bit math on the base address. + +# src[] = { value, address(vec2 of hi+lo uint32_t), offset }. +# const_index[] = { write_mask, align_mul, align_offset } +intrinsic("store_global_ir3", [0, 2, 1], indices=[WRMASK, ACCESS, ALIGN_MUL, ALIGN_OFFSET]) +# src[] = { address(vec2 of hi+lo uint32_t), offset }. +# const_index[] = { access, align_mul, align_offset } +intrinsic("load_global_ir3", [2, 1], dest_comp=0, indices=[ACCESS, ALIGN_MUL, ALIGN_OFFSET], flags=[CAN_ELIMINATE]) + +# Intrinsics used by the Midgard/Bifrost blend pipeline. These are defined +# within a blend shader to read/write the raw value from the tile buffer, +# without applying any format conversion in the process. If the shader needs +# usable pixel values, it must apply format conversions itself. +# +# These definitions are generic, but they are explicitly vendored to prevent +# other drivers from using them, as their semantics is defined in terms of the +# Midgard/Bifrost hardware tile buffer and may not line up with anything sane. +# One notable divergence is sRGB, which is asymmetric: raw_input_pan requires +# an sRGB->linear conversion, but linear values should be written to +# raw_output_pan and the hardware handles linear->sRGB. +# +# We also have format-specific Midgard intrinsics. There are rather +# here-be-dragons. load_output_u8_as_fp16_pan does the equivalent of +# load_raw_out_pan on an RGBA8 UNORM framebuffer followed by u2u16 -> fp16 -> +# division by 255. + +# src[] = { value } +store("raw_output_pan", 1, []) +store("zs_output_pan", 1, [COMPONENT]) +load("raw_output_pan", 0, [], [CAN_ELIMINATE, CAN_REORDER]) +load("output_u8_as_fp16_pan", 0, [], [CAN_ELIMINATE, CAN_REORDER]) + +# Loads the sampler paramaters +# src[] = { sampler_index } +load("sampler_lod_parameters_pan", 1, [CAN_ELIMINATE, CAN_REORDER]) + +# R600 specific instrincs +# +# R600 can only fetch 16 byte aligned data from an UBO, and the actual offset +# is given in vec4 units, so we have to fetch the a vec4 and get the component +# later +# src[] = { buffer_index, offset }. +load("ubo_r600", 2, [ACCESS, ALIGN_MUL, ALIGN_OFFSET], flags=[CAN_ELIMINATE, CAN_REORDER]) + +# V3D-specific instrinc for tile buffer color reads. +# +# The hardware requires that we read the samples and components of a pixel +# in order, so we cannot eliminate or remove any loads in a sequence. +# +# src[] = { render_target } +# BASE = sample index +load("tlb_color_v3d", 1, [BASE, COMPONENT], []) + +# V3D-specific instrinc for per-sample tile buffer color writes. +# +# The driver backend needs to identify per-sample color writes and emit +# specific code for them. +# +# src[] = { value, render_target } +# BASE = sample index +store("tlb_sample_color_v3d", 2, [BASE, COMPONENT, TYPE], []) + +# V3D-specific intrinsic to load the number of layers attached to +# the target framebuffer +intrinsic("load_fb_layers_v3d", dest_comp=1, flags=[CAN_ELIMINATE, CAN_REORDER])