X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fcompiler%2Fnir%2Fnir_intrinsics.py;h=da4f95c5ed3d63e13099ef54fa4770095cadf515;hb=1ccd681109e80516430a3be489dca1be15316d50;hp=a648995f7fda471c56c1b0b106798f4e8e69fdd6;hpb=beb6639a9d064b7c4dce389f9ea56fd1373edf33;p=mesa.git diff --git a/src/compiler/nir/nir_intrinsics.py b/src/compiler/nir/nir_intrinsics.py index a648995f7fd..da4f95c5ed3 100644 --- a/src/compiler/nir/nir_intrinsics.py +++ b/src/compiler/nir/nir_intrinsics.py @@ -126,6 +126,14 @@ TYPE = "NIR_INTRINSIC_TYPE" SWIZZLE_MASK = "NIR_INTRINSIC_SWIZZLE_MASK" # Driver location of attribute DRIVER_LOCATION = "NIR_INTRINSIC_DRIVER_LOCATION" +# Ordering and visibility of a memory operation +MEMORY_SEMANTICS = "NIR_INTRINSIC_MEMORY_SEMANTICS" +# Modes affected by a memory operation +MEMORY_MODES = "NIR_INTRINSIC_MEMORY_MODES" +# Scope of a memory operation +MEMORY_SCOPE = "NIR_INTRINSIC_MEMORY_SCOPE" +# Scope of a control barrier +EXECUTION_SCOPE = "NIR_INTRINSIC_EXECUTION_SCOPE" # # Possible flags: @@ -164,9 +172,9 @@ intrinsic("copy_deref", src_comp=[-1, -1], indices=[DST_ACCESS, SRC_ACCESS]) # Interpolation of input. The interp_deref_at* intrinsics are similar to the # load_var intrinsic acting on a shader input except that they interpolate the -# input differently. The at_sample and at_offset intrinsics take an -# additional source that is an integer sample id or a vec2 position offset -# respectively. +# input differently. The at_sample, at_offset and at_vertex intrinsics take an +# additional source that is an integer sample id, a vec2 position offset, or a +# vertex ID respectively. intrinsic("interp_deref_at_centroid", dest_comp=0, src_comp=[1], flags=[ CAN_ELIMINATE, CAN_REORDER]) @@ -174,6 +182,8 @@ intrinsic("interp_deref_at_sample", src_comp=[1, 1], dest_comp=0, flags=[CAN_ELIMINATE, CAN_REORDER]) intrinsic("interp_deref_at_offset", src_comp=[1, 2], dest_comp=0, flags=[CAN_ELIMINATE, CAN_REORDER]) +intrinsic("interp_deref_at_vertex", src_comp=[1, 1], dest_comp=0, + flags=[CAN_ELIMINATE, CAN_REORDER]) # Gets the length of an unsized array at the end of a buffer intrinsic("deref_buffer_array_length", src_comp=[-1], dest_comp=1, @@ -189,7 +199,6 @@ intrinsic("get_buffer_size", src_comp=[-1], dest_comp=1, def barrier(name): intrinsic(name) -barrier("barrier") barrier("discard") # Demote fragment shader invocation to a helper invocation. Any stores to @@ -201,16 +210,30 @@ barrier("discard") barrier("demote") intrinsic("is_helper_invocation", dest_comp=1, flags=[CAN_ELIMINATE]) +# A workgroup-level control barrier. Any thread which hits this barrier will +# pause until all threads within the current workgroup have also hit the +# barrier. For compute shaders, the workgroup is defined as the local group. +# For tessellation control shaders, the workgroup is defined as the current +# patch. This intrinsic does not imply any sort of memory barrier. +barrier("control_barrier") # Memory barrier with semantics analogous to the memoryBarrier() GLSL # intrinsic. barrier("memory_barrier") +# Control/Memory barrier with explicit scope. Follows the semantics of SPIR-V +# OpMemoryBarrier and OpControlBarrier, used to implement Vulkan Memory Model. +# Storage that the barrier applies is represented using NIR variable modes. +# For an OpMemoryBarrier, set EXECUTION_SCOPE to NIR_SCOPE_NONE. +intrinsic("scoped_barrier", + indices=[EXECUTION_SCOPE, MEMORY_SEMANTICS, MEMORY_MODES, MEMORY_SCOPE]) + # Shader clock intrinsic with semantics analogous to the clock2x32ARB() # GLSL intrinsic. # The latter can be used as code motion barrier, which is currently not # feasible with NIR. -intrinsic("shader_clock", dest_comp=2, flags=[CAN_ELIMINATE]) +intrinsic("shader_clock", dest_comp=2, flags=[CAN_ELIMINATE], + indices=[MEMORY_SCOPE]) # Shader ballot intrinsics with semantics analogous to the # @@ -243,6 +266,9 @@ barrier("memory_barrier_shared") barrier("begin_invocation_interlock") barrier("end_invocation_interlock") +# Memory barrier for synchronizing TCS patch outputs +barrier("memory_barrier_tcs_patch") + # A conditional discard/demote, with a single boolean source. intrinsic("discard_if", src_comp=[1]) intrinsic("demote_if", src_comp=[1]) @@ -367,8 +393,8 @@ def image(name, src_comp=[], **kwargs): intrinsic("bindless_image_" + name, src_comp=[1] + src_comp, indices=[IMAGE_DIM, IMAGE_ARRAY, FORMAT, ACCESS], **kwargs) -image("load", src_comp=[4, 1], dest_comp=0, flags=[CAN_ELIMINATE]) -image("store", src_comp=[4, 1, 0]) +image("load", src_comp=[4, 1, 1], dest_comp=0, flags=[CAN_ELIMINATE]) +image("store", src_comp=[4, 1, 0, 1]) image("atomic_add", src_comp=[4, 1, 1], dest_comp=1) image("atomic_imin", src_comp=[4, 1, 1], dest_comp=1) image("atomic_umin", src_comp=[4, 1, 1], dest_comp=1) @@ -379,21 +405,12 @@ image("atomic_or", src_comp=[4, 1, 1], dest_comp=1) image("atomic_xor", src_comp=[4, 1, 1], dest_comp=1) image("atomic_exchange", src_comp=[4, 1, 1], dest_comp=1) image("atomic_comp_swap", src_comp=[4, 1, 1, 1], dest_comp=1) -image("atomic_fadd", src_comp=[1, 4, 1, 1], dest_comp=1) -image("size", dest_comp=0, flags=[CAN_ELIMINATE, CAN_REORDER]) +image("atomic_fadd", src_comp=[4, 1, 1], dest_comp=1) +image("size", dest_comp=0, src_comp=[1], flags=[CAN_ELIMINATE, CAN_REORDER]) image("samples", dest_comp=1, flags=[CAN_ELIMINATE, CAN_REORDER]) image("atomic_inc_wrap", src_comp=[4, 1, 1], dest_comp=1) image("atomic_dec_wrap", src_comp=[4, 1, 1], dest_comp=1) -# Intel-specific query for loading from the brw_image_param struct passed -# into the shader as a uniform. The variable is a deref to the image -# variable. The const index specifies which of the six parameters to load. -intrinsic("image_deref_load_param_intel", src_comp=[1], dest_comp=0, - indices=[BASE], flags=[CAN_ELIMINATE, CAN_REORDER]) -image("load_raw_intel", src_comp=[1], dest_comp=0, - flags=[CAN_ELIMINATE]) -image("store_raw_intel", src_comp=[1, 0]) - # Vulkan descriptor set intrinsics # # The Vulkan API uses a different binding model from GL. In the Vulkan @@ -464,20 +481,20 @@ intrinsic("deref_atomic_fcomp_swap", src_comp=[-1, 1, 1], dest_comp=1, indices=[ # 2: The data parameter to the atomic function (i.e. the value to add # in ssbo_atomic_add, etc). # 3: For CompSwap only: the second data parameter. -intrinsic("ssbo_atomic_add", src_comp=[1, 1, 1], dest_comp=1, indices=[ACCESS]) -intrinsic("ssbo_atomic_imin", src_comp=[1, 1, 1], dest_comp=1, indices=[ACCESS]) -intrinsic("ssbo_atomic_umin", src_comp=[1, 1, 1], dest_comp=1, indices=[ACCESS]) -intrinsic("ssbo_atomic_imax", src_comp=[1, 1, 1], dest_comp=1, indices=[ACCESS]) -intrinsic("ssbo_atomic_umax", src_comp=[1, 1, 1], dest_comp=1, indices=[ACCESS]) -intrinsic("ssbo_atomic_and", src_comp=[1, 1, 1], dest_comp=1, indices=[ACCESS]) -intrinsic("ssbo_atomic_or", src_comp=[1, 1, 1], dest_comp=1, indices=[ACCESS]) -intrinsic("ssbo_atomic_xor", src_comp=[1, 1, 1], dest_comp=1, indices=[ACCESS]) -intrinsic("ssbo_atomic_exchange", src_comp=[1, 1, 1], dest_comp=1, indices=[ACCESS]) -intrinsic("ssbo_atomic_comp_swap", src_comp=[1, 1, 1, 1], dest_comp=1, indices=[ACCESS]) -intrinsic("ssbo_atomic_fadd", src_comp=[1, 1, 1], dest_comp=1, indices=[ACCESS]) -intrinsic("ssbo_atomic_fmin", src_comp=[1, 1, 1], dest_comp=1, indices=[ACCESS]) -intrinsic("ssbo_atomic_fmax", src_comp=[1, 1, 1], dest_comp=1, indices=[ACCESS]) -intrinsic("ssbo_atomic_fcomp_swap", src_comp=[1, 1, 1, 1], dest_comp=1, indices=[ACCESS]) +intrinsic("ssbo_atomic_add", src_comp=[-1, 1, 1], dest_comp=1, indices=[ACCESS]) +intrinsic("ssbo_atomic_imin", src_comp=[-1, 1, 1], dest_comp=1, indices=[ACCESS]) +intrinsic("ssbo_atomic_umin", src_comp=[-1, 1, 1], dest_comp=1, indices=[ACCESS]) +intrinsic("ssbo_atomic_imax", src_comp=[-1, 1, 1], dest_comp=1, indices=[ACCESS]) +intrinsic("ssbo_atomic_umax", src_comp=[-1, 1, 1], dest_comp=1, indices=[ACCESS]) +intrinsic("ssbo_atomic_and", src_comp=[-1, 1, 1], dest_comp=1, indices=[ACCESS]) +intrinsic("ssbo_atomic_or", src_comp=[-1, 1, 1], dest_comp=1, indices=[ACCESS]) +intrinsic("ssbo_atomic_xor", src_comp=[-1, 1, 1], dest_comp=1, indices=[ACCESS]) +intrinsic("ssbo_atomic_exchange", src_comp=[-1, 1, 1], dest_comp=1, indices=[ACCESS]) +intrinsic("ssbo_atomic_comp_swap", src_comp=[-1, 1, 1, 1], dest_comp=1, indices=[ACCESS]) +intrinsic("ssbo_atomic_fadd", src_comp=[-1, 1, 1], dest_comp=1, indices=[ACCESS]) +intrinsic("ssbo_atomic_fmin", src_comp=[-1, 1, 1], dest_comp=1, indices=[ACCESS]) +intrinsic("ssbo_atomic_fmax", src_comp=[-1, 1, 1], dest_comp=1, indices=[ACCESS]) +intrinsic("ssbo_atomic_fcomp_swap", src_comp=[-1, 1, 1, 1], dest_comp=1, indices=[ACCESS]) # CS shared variable atomic intrinsics # @@ -543,6 +560,7 @@ def system_value(name, dest_comp, indices=[], bit_sizes=[32]): system_value("frag_coord", 4) system_value("point_coord", 2) +system_value("line_coord", 1) system_value("front_face", 1, bit_sizes=[1, 32]) system_value("vertex_id", 1) system_value("vertex_id_zero_base", 1) @@ -588,6 +606,11 @@ system_value("local_group_size", 3) system_value("global_invocation_id", 3, bit_sizes=[32, 64]) system_value("global_invocation_index", 1, bit_sizes=[32, 64]) system_value("work_dim", 1) +system_value("line_width", 1) +system_value("aa_line_width", 1) +# BASE=0 for global/shader, BASE=1 for local/function +system_value("scratch_base_ptr", 0, bit_sizes=[32,64], indices=[BASE]) + # Driver-specific viewport scale/offset parameters. # # VC4 and V3D need to emit a scaled version of the position in the vertex @@ -626,9 +649,10 @@ system_value("user_data_amd", 4) # Barycentric coordinate intrinsics. # # These set up the barycentric coordinates for a particular interpolation. -# The first three are for the simple cases: pixel, centroid, or per-sample -# (at gl_SampleID). The next two handle interpolating at a specified -# sample location, or interpolating with a vec2 offset, +# The first four are for the simple cases: pixel, centroid, per-sample +# (at gl_SampleID), or pull model (1/W, 1/I, 1/J) at the pixel center. The next +# three two handle interpolating at a specified sample location, or +# interpolating with a vec2 offset, # # The interp_mode index should be either the INTERP_MODE_SMOOTH or # INTERP_MODE_NOPERSPECTIVE enum values. @@ -636,18 +660,19 @@ system_value("user_data_amd", 4) # The vec2 value produced by these intrinsics is intended for use as the # barycoord source of a load_interpolated_input intrinsic. -def barycentric(name, src_comp=[]): - intrinsic("load_barycentric_" + name, src_comp=src_comp, dest_comp=2, +def barycentric(name, dst_comp, src_comp=[]): + intrinsic("load_barycentric_" + name, src_comp=src_comp, dest_comp=dst_comp, indices=[INTERP_MODE], flags=[CAN_ELIMINATE, CAN_REORDER]) # no sources. -barycentric("pixel") -barycentric("centroid") -barycentric("sample") +barycentric("pixel", 2) +barycentric("centroid", 2) +barycentric("sample", 2) +barycentric("model", 3) # src[] = { sample_id }. -barycentric("at_sample", [1]) +barycentric("at_sample", 2, [1]) # src[] = { offset.xy }. -barycentric("at_offset", [2]) +barycentric("at_offset", 2, [2]) # Load sample position: # @@ -692,61 +717,65 @@ intrinsic("load_fs_input_interp_deltas", src_comp=[1], dest_comp=3, # varying slots and float units for fragment shader inputs. UBO and SSBO # offsets are always in bytes. -def load(name, num_srcs, indices=[], flags=[]): - intrinsic("load_" + name, [1] * num_srcs, dest_comp=0, indices=indices, +def load(name, src_comp, indices=[], flags=[]): + intrinsic("load_" + name, src_comp, dest_comp=0, indices=indices, flags=flags) # src[] = { offset }. -load("uniform", 1, [BASE, RANGE, TYPE], [CAN_ELIMINATE, CAN_REORDER]) +load("uniform", [1], [BASE, RANGE, TYPE], [CAN_ELIMINATE, CAN_REORDER]) # src[] = { buffer_index, offset }. -load("ubo", 2, [ACCESS, ALIGN_MUL, ALIGN_OFFSET], flags=[CAN_ELIMINATE, CAN_REORDER]) +load("ubo", [-1, 1], [ACCESS, ALIGN_MUL, ALIGN_OFFSET], flags=[CAN_ELIMINATE, CAN_REORDER]) # src[] = { offset }. -load("input", 1, [BASE, COMPONENT, TYPE], [CAN_ELIMINATE, CAN_REORDER]) +load("input", [1], [BASE, COMPONENT, TYPE], [CAN_ELIMINATE, CAN_REORDER]) +# src[] = { vertex_id, offset }. +load("input_vertex", [1, 1], [BASE, COMPONENT, TYPE], [CAN_ELIMINATE, CAN_REORDER]) # src[] = { vertex, offset }. -load("per_vertex_input", 2, [BASE, COMPONENT], [CAN_ELIMINATE, CAN_REORDER]) +load("per_vertex_input", [1, 1], [BASE, COMPONENT], [CAN_ELIMINATE, CAN_REORDER]) # src[] = { barycoord, offset }. -intrinsic("load_interpolated_input", src_comp=[2, 1], dest_comp=0, - indices=[BASE, COMPONENT], flags=[CAN_ELIMINATE, CAN_REORDER]) +load("interpolated_input", [2, 1], [BASE, COMPONENT], [CAN_ELIMINATE, CAN_REORDER]) # src[] = { buffer_index, offset }. -load("ssbo", 2, [ACCESS, ALIGN_MUL, ALIGN_OFFSET], [CAN_ELIMINATE]) +load("ssbo", [-1, 1], [ACCESS, ALIGN_MUL, ALIGN_OFFSET], [CAN_ELIMINATE]) +# src[] = { buffer_index } +load("ssbo_address", [1], [], [CAN_ELIMINATE, CAN_REORDER]) # src[] = { offset }. -load("output", 1, [BASE, COMPONENT], flags=[CAN_ELIMINATE]) +load("output", [1], [BASE, COMPONENT], flags=[CAN_ELIMINATE]) # src[] = { vertex, offset }. -load("per_vertex_output", 2, [BASE, COMPONENT], [CAN_ELIMINATE]) +load("per_vertex_output", [1, 1], [BASE, COMPONENT], [CAN_ELIMINATE]) # src[] = { offset }. -load("shared", 1, [BASE, ALIGN_MUL, ALIGN_OFFSET], [CAN_ELIMINATE]) +load("shared", [1], [BASE, ALIGN_MUL, ALIGN_OFFSET], [CAN_ELIMINATE]) # src[] = { offset }. -load("push_constant", 1, [BASE, RANGE], [CAN_ELIMINATE, CAN_REORDER]) +load("push_constant", [1], [BASE, RANGE], [CAN_ELIMINATE, CAN_REORDER]) # src[] = { offset }. -load("constant", 1, [BASE, RANGE], [CAN_ELIMINATE, CAN_REORDER]) +load("constant", [1], [BASE, RANGE, ALIGN_MUL, ALIGN_OFFSET], + [CAN_ELIMINATE, CAN_REORDER]) # src[] = { address }. -load("global", 1, [ACCESS, ALIGN_MUL, ALIGN_OFFSET], [CAN_ELIMINATE]) +load("global", [1], [ACCESS, ALIGN_MUL, ALIGN_OFFSET], [CAN_ELIMINATE]) # src[] = { address }. -load("kernel_input", 1, [BASE, RANGE, ALIGN_MUL, ALIGN_OFFSET], [CAN_ELIMINATE, CAN_REORDER]) +load("kernel_input", [1], [BASE, RANGE, ALIGN_MUL, ALIGN_OFFSET], [CAN_ELIMINATE, CAN_REORDER]) # src[] = { offset }. -load("scratch", 1, [ALIGN_MUL, ALIGN_OFFSET], [CAN_ELIMINATE]) +load("scratch", [1], [ALIGN_MUL, ALIGN_OFFSET], [CAN_ELIMINATE]) # Stores work the same way as loads, except now the first source is the value # to store and the second (and possibly third) source specify where to store # the value. SSBO and shared memory stores also have a # nir_intrinsic_write_mask() -def store(name, num_srcs, indices=[], flags=[]): - intrinsic("store_" + name, [0] + ([1] * (num_srcs - 1)), indices=indices, flags=flags) +def store(name, srcs, indices=[], flags=[]): + intrinsic("store_" + name, [0] + srcs, indices=indices, flags=flags) # src[] = { value, offset }. -store("output", 2, [BASE, WRMASK, COMPONENT, TYPE]) +store("output", [1], [BASE, WRMASK, COMPONENT, TYPE]) # src[] = { value, vertex, offset }. -store("per_vertex_output", 3, [BASE, WRMASK, COMPONENT]) +store("per_vertex_output", [1, 1], [BASE, WRMASK, COMPONENT]) # src[] = { value, block_index, offset } -store("ssbo", 3, [WRMASK, ACCESS, ALIGN_MUL, ALIGN_OFFSET]) +store("ssbo", [-1, 1], [WRMASK, ACCESS, ALIGN_MUL, ALIGN_OFFSET]) # src[] = { value, offset }. -store("shared", 2, [BASE, WRMASK, ALIGN_MUL, ALIGN_OFFSET]) +store("shared", [1], [BASE, WRMASK, ALIGN_MUL, ALIGN_OFFSET]) # src[] = { value, address }. -store("global", 2, [WRMASK, ACCESS, ALIGN_MUL, ALIGN_OFFSET]) +store("global", [1], [WRMASK, ACCESS, ALIGN_MUL, ALIGN_OFFSET]) # src[] = { value, offset }. -store("scratch", 2, [ALIGN_MUL, ALIGN_OFFSET, WRMASK]) +store("scratch", [1], [ALIGN_MUL, ALIGN_OFFSET, WRMASK]) # IR3-specific version of most SSBO intrinsics. The only different # compare to the originals is that they add an extra source to hold @@ -759,20 +788,26 @@ store("scratch", 2, [ALIGN_MUL, ALIGN_OFFSET, WRMASK]) # # The float versions are not handled because those are not supported # by the backend. -intrinsic("store_ssbo_ir3", src_comp=[0, 1, 1, 1], - indices=[WRMASK, ACCESS, ALIGN_MUL, ALIGN_OFFSET]) -intrinsic("load_ssbo_ir3", src_comp=[1, 1, 1], dest_comp=0, - indices=[ACCESS, ALIGN_MUL, ALIGN_OFFSET], flags=[CAN_ELIMINATE]) -intrinsic("ssbo_atomic_add_ir3", src_comp=[1, 1, 1, 1], dest_comp=1) -intrinsic("ssbo_atomic_imin_ir3", src_comp=[1, 1, 1, 1], dest_comp=1) -intrinsic("ssbo_atomic_umin_ir3", src_comp=[1, 1, 1, 1], dest_comp=1) -intrinsic("ssbo_atomic_imax_ir3", src_comp=[1, 1, 1, 1], dest_comp=1) -intrinsic("ssbo_atomic_umax_ir3", src_comp=[1, 1, 1, 1], dest_comp=1) -intrinsic("ssbo_atomic_and_ir3", src_comp=[1, 1, 1, 1], dest_comp=1) -intrinsic("ssbo_atomic_or_ir3", src_comp=[1, 1, 1, 1], dest_comp=1) -intrinsic("ssbo_atomic_xor_ir3", src_comp=[1, 1, 1, 1], dest_comp=1) -intrinsic("ssbo_atomic_exchange_ir3", src_comp=[1, 1, 1, 1], dest_comp=1) -intrinsic("ssbo_atomic_comp_swap_ir3", src_comp=[1, 1, 1, 1, 1], dest_comp=1) +store("ssbo_ir3", [1, 1, 1], + indices=[WRMASK, ACCESS, ALIGN_MUL, ALIGN_OFFSET]) +load("ssbo_ir3", [1, 1, 1], + indices=[ACCESS, ALIGN_MUL, ALIGN_OFFSET], flags=[CAN_ELIMINATE]) +intrinsic("ssbo_atomic_add_ir3", src_comp=[1, 1, 1, 1], dest_comp=1, indices=[ACCESS]) +intrinsic("ssbo_atomic_imin_ir3", src_comp=[1, 1, 1, 1], dest_comp=1, indices=[ACCESS]) +intrinsic("ssbo_atomic_umin_ir3", src_comp=[1, 1, 1, 1], dest_comp=1, indices=[ACCESS]) +intrinsic("ssbo_atomic_imax_ir3", src_comp=[1, 1, 1, 1], dest_comp=1, indices=[ACCESS]) +intrinsic("ssbo_atomic_umax_ir3", src_comp=[1, 1, 1, 1], dest_comp=1, indices=[ACCESS]) +intrinsic("ssbo_atomic_and_ir3", src_comp=[1, 1, 1, 1], dest_comp=1, indices=[ACCESS]) +intrinsic("ssbo_atomic_or_ir3", src_comp=[1, 1, 1, 1], dest_comp=1, indices=[ACCESS]) +intrinsic("ssbo_atomic_xor_ir3", src_comp=[1, 1, 1, 1], dest_comp=1, indices=[ACCESS]) +intrinsic("ssbo_atomic_exchange_ir3", src_comp=[1, 1, 1, 1], dest_comp=1, indices=[ACCESS]) +intrinsic("ssbo_atomic_comp_swap_ir3", src_comp=[1, 1, 1, 1, 1], dest_comp=1, indices=[ACCESS]) + +# IR3-specific instruction for UBO loads using the ldc instruction. The second +# source is the indirect offset, in units of four dwords. The base is a +# component offset, in dword units. +intrinsic("load_ubo_ir3", src_comp=[1, 1], bit_sizes=[32], dest_comp=0, indices=[BASE], + flags=[CAN_REORDER, CAN_ELIMINATE]) # System values for freedreno geometry shaders. system_value("vs_primitive_stride_ir3", 1) @@ -780,13 +815,44 @@ system_value("vs_vertex_stride_ir3", 1) system_value("gs_header_ir3", 1) system_value("primitive_location_ir3", 1, indices=[DRIVER_LOCATION]) +# System values for freedreno tessellation shaders. +system_value("hs_patch_stride_ir3", 1) +system_value("tess_factor_base_ir3", 2) +system_value("tess_param_base_ir3", 2) +system_value("tcs_header_ir3", 1) + +# IR3-specific intrinsics for tessellation control shaders. cond_end_ir3 end +# the shader when src0 is false and is used to narrow down the TCS shader to +# just thread 0 before writing out tessellation levels. +intrinsic("cond_end_ir3", src_comp=[1]) +# end_patch_ir3 is used just before thread 0 exist the TCS and presumably +# signals the TE that the patch is complete and can be tessellated. +intrinsic("end_patch_ir3") + # IR3-specific load/store intrinsics. These access a buffer used to pass data # between geometry stages - perhaps it's explicit access to the vertex cache. # src[] = { value, offset }. -store("shared_ir3", 2, [BASE, WRMASK, ALIGN_MUL, ALIGN_OFFSET]) +store("shared_ir3", [1], [BASE, ALIGN_MUL, ALIGN_OFFSET]) # src[] = { offset }. -load("shared_ir3", 1, [BASE, ALIGN_MUL, ALIGN_OFFSET], [CAN_ELIMINATE]) +load("shared_ir3", [1], [BASE, ALIGN_MUL, ALIGN_OFFSET], [CAN_ELIMINATE]) + +# IR3-specific load/store global intrinsics. They take a 64-bit base address +# and a 32-bit offset. The hardware will add the base and the offset, which +# saves us from doing 64-bit math on the base address. + +# src[] = { value, address(vec2 of hi+lo uint32_t), offset }. +# const_index[] = { write_mask, align_mul, align_offset } +store("global_ir3", [2, 1], indices=[ACCESS, ALIGN_MUL, ALIGN_OFFSET]) +# src[] = { address(vec2 of hi+lo uint32_t), offset }. +# const_index[] = { access, align_mul, align_offset } +load("global_ir3", [2, 1], indices=[ACCESS, ALIGN_MUL, ALIGN_OFFSET], flags=[CAN_ELIMINATE]) + +# IR3-specific bindless handle specifier. Similar to vulkan_resource_index, but +# without the binding because the hardware expects a single flattened index +# rather than a (binding, index) pair. We may also want to use this with GL. +# Note that this doesn't actually turn into a HW instruction. +intrinsic("bindless_resource_ir3", [1], dest_comp=1, indices=[DESC_SET], flags=[CAN_ELIMINATE, CAN_REORDER]) # Intrinsics used by the Midgard/Bifrost blend pipeline. These are defined # within a blend shader to read/write the raw value from the tile buffer, @@ -801,8 +867,33 @@ load("shared_ir3", 1, [BASE, ALIGN_MUL, ALIGN_OFFSET], [CAN_ELIMINATE]) # raw_output_pan and the hardware handles linear->sRGB. # src[] = { value } -store("raw_output_pan", 1, []) -load("raw_output_pan", 0, [], [CAN_ELIMINATE, CAN_REORDER]) +store("raw_output_pan", [], []) +store("combined_output_pan", [1, 1, 1], [BASE, COMPONENT]) +load("raw_output_pan", [1], [BASE], [CAN_ELIMINATE, CAN_REORDER]) + +# Loads the sampler paramaters +# src[] = { sampler_index } +load("sampler_lod_parameters_pan", [1], [CAN_ELIMINATE, CAN_REORDER]) + +# R600 specific instrincs +# +# R600 can only fetch 16 byte aligned data from an UBO, and the actual offset +# is given in vec4 units, so we have to fetch the a vec4 and get the component +# later +# src[] = { buffer_index, offset }. +load("ubo_r600", [1, 1], [ACCESS, ALIGN_MUL, ALIGN_OFFSET], flags=[CAN_ELIMINATE, CAN_REORDER]) + +# location where the tesselation data is stored in LDS +system_value("tcs_in_param_base_r600", 4) +system_value("tcs_out_param_base_r600", 4) +system_value("tcs_rel_patch_id_r600", 1) +system_value("tcs_tess_factor_base_r600", 1) + +# load as many components as needed giving per-component addresses +intrinsic("load_local_shared_r600", src_comp=[0], dest_comp=0, indices = [COMPONENT], flags = [CAN_ELIMINATE, CAN_REORDER]) + +store("local_shared_r600", [1], [WRMASK]) +store("tf_r600", []) # V3D-specific instrinc for tile buffer color reads. # @@ -811,7 +902,7 @@ load("raw_output_pan", 0, [], [CAN_ELIMINATE, CAN_REORDER]) # # src[] = { render_target } # BASE = sample index -load("tlb_color_v3d", 1, [BASE, COMPONENT], []) +load("tlb_color_v3d", [1], [BASE, COMPONENT], []) # V3D-specific instrinc for per-sample tile buffer color writes. # @@ -820,4 +911,20 @@ load("tlb_color_v3d", 1, [BASE, COMPONENT], []) # # src[] = { value, render_target } # BASE = sample index -store("tlb_sample_color_v3d", 2, [BASE, COMPONENT, TYPE], []) +store("tlb_sample_color_v3d", [1], [BASE, COMPONENT, TYPE], []) + +# V3D-specific intrinsic to load the number of layers attached to +# the target framebuffer +intrinsic("load_fb_layers_v3d", dest_comp=1, flags=[CAN_ELIMINATE, CAN_REORDER]) + +# Intel-specific query for loading from the brw_image_param struct passed +# into the shader as a uniform. The variable is a deref to the image +# variable. The const index specifies which of the six parameters to load. +intrinsic("image_deref_load_param_intel", src_comp=[1], dest_comp=0, + indices=[BASE], flags=[CAN_ELIMINATE, CAN_REORDER]) +image("load_raw_intel", src_comp=[1], dest_comp=0, + flags=[CAN_ELIMINATE]) +image("store_raw_intel", src_comp=[1, 0]) + +# Number of data items being operated on for a SIMD program. +system_value("simd_width_intel", 1)