2 # Copyright (C) 2018 Red Hat
3 # Copyright (C) 2014 Intel Corporation
5 # Permission is hereby granted, free of charge, to any person obtaining a
6 # copy of this software and associated documentation files (the "Software"),
7 # to deal in the Software without restriction, including without limitation
8 # the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 # and/or sell copies of the Software, and to permit persons to whom the
10 # Software is furnished to do so, subject to the following conditions:
12 # The above copyright notice and this permission notice (including the next
13 # paragraph) shall be included in all copies or substantial portions of the
16 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 # This file defines all the available intrinsics in one place.
27 # The Intrinsic class corresponds one-to-one with nir_intrinsic_info
30 class Intrinsic(object):
31 """Class that represents all the information about an intrinsic opcode.
32 NOTE: this must be kept in sync with nir_intrinsic_info.
34 def __init__(self
, name
, src_components
, dest_components
,
35 indices
, flags
, sysval
, bit_sizes
):
38 - name: the intrinsic name
39 - src_components: list of the number of components per src, 0 means
40 vectorized instruction with number of components given in the
41 num_components field in nir_intrinsic_instr.
42 - dest_components: number of destination components, -1 means no
43 dest, 0 means number of components given in num_components field
44 in nir_intrinsic_instr.
45 - indices: list of constant indicies
46 - flags: list of semantic flags
47 - sysval: is this a system-value intrinsic
48 - bit_sizes: allowed dest bit_sizes
50 assert isinstance(name
, str)
51 assert isinstance(src_components
, list)
53 assert isinstance(src_components
[0], int)
54 assert isinstance(dest_components
, int)
55 assert isinstance(indices
, list)
57 assert isinstance(indices
[0], str)
58 assert isinstance(flags
, list)
60 assert isinstance(flags
[0], str)
61 assert isinstance(sysval
, bool)
63 assert isinstance(bit_sizes
[0], int)
66 self
.num_srcs
= len(src_components
)
67 self
.src_components
= src_components
68 self
.has_dest
= (dest_components
>= 0)
69 self
.dest_components
= dest_components
70 self
.num_indices
= len(indices
)
71 self
.indices
= indices
74 self
.bit_sizes
= bit_sizes
80 # A constant 'base' value that is added to an offset src:
81 BASE
= "NIR_INTRINSIC_BASE"
82 # For store instructions, a writemask:
83 WRMASK
= "NIR_INTRINSIC_WRMASK"
84 # The stream-id for GS emit_vertex/end_primitive intrinsics:
85 STREAM_ID
= "NIR_INTRINSIC_STREAM_ID"
86 # The clip-plane id for load_user_clip_plane intrinsics:
87 UCP_ID
= "NIR_INTRINSIC_UCP_ID"
88 # The amount of data, starting from BASE, that this instruction
89 # may access. This is used to provide bounds if the offset is
91 RANGE
= "NIR_INTRINSIC_RANGE"
92 # The vulkan descriptor set binding for vulkan_resource_index
94 DESC_SET
= "NIR_INTRINSIC_DESC_SET"
95 # The vulkan descriptor set binding for vulkan_resource_index
97 BINDING
= "NIR_INTRINSIC_BINDING"
99 COMPONENT
= "NIR_INTRINSIC_COMPONENT"
100 # Interpolation mode (only meaningful for FS inputs)
101 INTERP_MODE
= "NIR_INTRINSIC_INTERP_MODE"
102 # A binary nir_op to use when performing a reduction or scan operation
103 REDUCTION_OP
= "NIR_INTRINSIC_REDUCTION_OP"
104 # Cluster size for reduction operations
105 CLUSTER_SIZE
= "NIR_INTRINSIC_CLUSTER_SIZE"
106 # Parameter index for a load_param intrinsic
107 PARAM_IDX
= "NIR_INTRINSIC_PARAM_IDX"
108 # Image dimensionality for image intrinsics
109 IMAGE_DIM
= "NIR_INTRINSIC_IMAGE_DIM"
110 # Non-zero if we are accessing an array image
111 IMAGE_ARRAY
= "NIR_INTRINSIC_IMAGE_ARRAY"
112 # Access qualifiers for image and memory access intrinsics
113 ACCESS
= "NIR_INTRINSIC_ACCESS"
114 # Image format for image intrinsics
115 FORMAT
= "NIR_INTRINSIC_FORMAT"
116 # Offset or address alignment
117 ALIGN_MUL
= "NIR_INTRINSIC_ALIGN_MUL"
118 ALIGN_OFFSET
= "NIR_INTRINSIC_ALIGN_OFFSET"
119 # The vulkan descriptor type for vulkan_resource_index
120 DESC_TYPE
= "NIR_INTRINSIC_DESC_TYPE"
121 # The nir_alu_type of a uniform/input/output
122 TYPE
= "NIR_INTRINSIC_TYPE"
128 CAN_ELIMINATE
= "NIR_INTRINSIC_CAN_ELIMINATE"
129 CAN_REORDER
= "NIR_INTRINSIC_CAN_REORDER"
133 # Defines a new NIR intrinsic. By default, the intrinsic will have no sources
134 # and no destination.
136 # You can set dest_comp=n to enable a destination for the intrinsic, in which
137 # case it will have that many components, or =0 for "as many components as the
138 # NIR destination value."
140 # Set src_comp=n to enable sources for the intruction. It can be an array of
141 # component counts, or (for convenience) a scalar component count if there's
142 # only one source. If a component count is 0, it will be as many components as
143 # the intrinsic has based on the dest_comp.
144 def intrinsic(name
, src_comp
=[], dest_comp
=-1, indices
=[],
145 flags
=[], sysval
=False, bit_sizes
=[]):
146 assert name
not in INTR_OPCODES
147 INTR_OPCODES
[name
] = Intrinsic(name
, src_comp
, dest_comp
,
148 indices
, flags
, sysval
, bit_sizes
)
150 intrinsic("nop", flags
=[CAN_ELIMINATE
])
152 intrinsic("load_param", dest_comp
=0, indices
=[PARAM_IDX
], flags
=[CAN_ELIMINATE
])
154 intrinsic("load_deref", dest_comp
=0, src_comp
=[-1],
155 indices
=[ACCESS
], flags
=[CAN_ELIMINATE
])
156 intrinsic("store_deref", src_comp
=[-1, 0], indices
=[WRMASK
, ACCESS
])
157 intrinsic("copy_deref", src_comp
=[-1, -1])
159 # Interpolation of input. The interp_deref_at* intrinsics are similar to the
160 # load_var intrinsic acting on a shader input except that they interpolate the
161 # input differently. The at_sample and at_offset intrinsics take an
162 # additional source that is an integer sample id or a vec2 position offset
165 intrinsic("interp_deref_at_centroid", dest_comp
=0, src_comp
=[1],
166 flags
=[ CAN_ELIMINATE
, CAN_REORDER
])
167 intrinsic("interp_deref_at_sample", src_comp
=[1, 1], dest_comp
=0,
168 flags
=[CAN_ELIMINATE
, CAN_REORDER
])
169 intrinsic("interp_deref_at_offset", src_comp
=[1, 2], dest_comp
=0,
170 flags
=[CAN_ELIMINATE
, CAN_REORDER
])
172 # Gets the length of an unsized array at the end of a buffer
173 intrinsic("deref_buffer_array_length", src_comp
=[-1], dest_comp
=1,
174 flags
=[CAN_ELIMINATE
, CAN_REORDER
])
176 # Ask the driver for the size of a given buffer. It takes the buffer index
178 intrinsic("get_buffer_size", src_comp
=[-1], dest_comp
=1,
179 flags
=[CAN_ELIMINATE
, CAN_REORDER
])
181 # a barrier is an intrinsic with no inputs/outputs but which can't be moved
182 # around/optimized in general
189 # Memory barrier with semantics analogous to the memoryBarrier() GLSL
191 barrier("memory_barrier")
193 # Shader clock intrinsic with semantics analogous to the clock2x32ARB()
195 # The latter can be used as code motion barrier, which is currently not
197 intrinsic("shader_clock", dest_comp
=2, flags
=[CAN_ELIMINATE
])
199 # Shader ballot intrinsics with semantics analogous to the
202 # readInvocationARB()
203 # readFirstInvocationARB()
205 # GLSL functions from ARB_shader_ballot.
206 intrinsic("ballot", src_comp
=[1], dest_comp
=0, flags
=[CAN_ELIMINATE
])
207 intrinsic("read_invocation", src_comp
=[0, 1], dest_comp
=0, flags
=[CAN_ELIMINATE
])
208 intrinsic("read_first_invocation", src_comp
=[0], dest_comp
=0, flags
=[CAN_ELIMINATE
])
210 # Additional SPIR-V ballot intrinsics
212 # These correspond to the SPIR-V opcodes
214 # OpGroupUniformElect
215 # OpSubgroupFirstInvocationKHR
216 intrinsic("elect", dest_comp
=1, flags
=[CAN_ELIMINATE
])
217 intrinsic("first_invocation", dest_comp
=1, flags
=[CAN_ELIMINATE
])
219 # Memory barrier with semantics analogous to the compute shader
220 # groupMemoryBarrier(), memoryBarrierAtomicCounter(), memoryBarrierBuffer(),
221 # memoryBarrierImage() and memoryBarrierShared() GLSL intrinsics.
222 barrier("group_memory_barrier")
223 barrier("memory_barrier_atomic_counter")
224 barrier("memory_barrier_buffer")
225 barrier("memory_barrier_image")
226 barrier("memory_barrier_shared")
227 barrier("begin_invocation_interlock")
228 barrier("end_invocation_interlock")
230 # A conditional discard, with a single boolean source.
231 intrinsic("discard_if", src_comp
=[1])
233 # ARB_shader_group_vote intrinsics
234 intrinsic("vote_any", src_comp
=[1], dest_comp
=1, flags
=[CAN_ELIMINATE
])
235 intrinsic("vote_all", src_comp
=[1], dest_comp
=1, flags
=[CAN_ELIMINATE
])
236 intrinsic("vote_feq", src_comp
=[0], dest_comp
=1, flags
=[CAN_ELIMINATE
])
237 intrinsic("vote_ieq", src_comp
=[0], dest_comp
=1, flags
=[CAN_ELIMINATE
])
239 # Ballot ALU operations from SPIR-V.
241 # These operations work like their ALU counterparts except that the operate
242 # on a uvec4 which is treated as a 128bit integer. Also, they are, in
243 # general, free to ignore any bits which are above the subgroup size.
244 intrinsic("ballot_bitfield_extract", src_comp
=[4, 1], dest_comp
=1, flags
=[CAN_ELIMINATE
])
245 intrinsic("ballot_bit_count_reduce", src_comp
=[4], dest_comp
=1, flags
=[CAN_ELIMINATE
])
246 intrinsic("ballot_bit_count_inclusive", src_comp
=[4], dest_comp
=1, flags
=[CAN_ELIMINATE
])
247 intrinsic("ballot_bit_count_exclusive", src_comp
=[4], dest_comp
=1, flags
=[CAN_ELIMINATE
])
248 intrinsic("ballot_find_lsb", src_comp
=[4], dest_comp
=1, flags
=[CAN_ELIMINATE
])
249 intrinsic("ballot_find_msb", src_comp
=[4], dest_comp
=1, flags
=[CAN_ELIMINATE
])
251 # Shuffle operations from SPIR-V.
252 intrinsic("shuffle", src_comp
=[0, 1], dest_comp
=0, flags
=[CAN_ELIMINATE
])
253 intrinsic("shuffle_xor", src_comp
=[0, 1], dest_comp
=0, flags
=[CAN_ELIMINATE
])
254 intrinsic("shuffle_up", src_comp
=[0, 1], dest_comp
=0, flags
=[CAN_ELIMINATE
])
255 intrinsic("shuffle_down", src_comp
=[0, 1], dest_comp
=0, flags
=[CAN_ELIMINATE
])
257 # Quad operations from SPIR-V.
258 intrinsic("quad_broadcast", src_comp
=[0, 1], dest_comp
=0, flags
=[CAN_ELIMINATE
])
259 intrinsic("quad_swap_horizontal", src_comp
=[0], dest_comp
=0, flags
=[CAN_ELIMINATE
])
260 intrinsic("quad_swap_vertical", src_comp
=[0], dest_comp
=0, flags
=[CAN_ELIMINATE
])
261 intrinsic("quad_swap_diagonal", src_comp
=[0], dest_comp
=0, flags
=[CAN_ELIMINATE
])
263 intrinsic("reduce", src_comp
=[0], dest_comp
=0, indices
=[REDUCTION_OP
, CLUSTER_SIZE
],
264 flags
=[CAN_ELIMINATE
])
265 intrinsic("inclusive_scan", src_comp
=[0], dest_comp
=0, indices
=[REDUCTION_OP
],
266 flags
=[CAN_ELIMINATE
])
267 intrinsic("exclusive_scan", src_comp
=[0], dest_comp
=0, indices
=[REDUCTION_OP
],
268 flags
=[CAN_ELIMINATE
])
270 # Basic Geometry Shader intrinsics.
272 # emit_vertex implements GLSL's EmitStreamVertex() built-in. It takes a single
273 # index, which is the stream ID to write to.
275 # end_primitive implements GLSL's EndPrimitive() built-in.
276 intrinsic("emit_vertex", indices
=[STREAM_ID
])
277 intrinsic("end_primitive", indices
=[STREAM_ID
])
279 # Geometry Shader intrinsics with a vertex count.
281 # Alternatively, drivers may implement these intrinsics, and use
282 # nir_lower_gs_intrinsics() to convert from the basic intrinsics.
284 # These maintain a count of the number of vertices emitted, as an additional
285 # unsigned integer source.
286 intrinsic("emit_vertex_with_counter", src_comp
=[1], indices
=[STREAM_ID
])
287 intrinsic("end_primitive_with_counter", src_comp
=[1], indices
=[STREAM_ID
])
288 intrinsic("set_vertex_count", src_comp
=[1])
292 # The *_var variants take an atomic_uint nir_variable, while the other,
293 # lowered, variants take a constant buffer index and register offset.
295 def atomic(name
, flags
=[]):
296 intrinsic(name
+ "_deref", src_comp
=[-1], dest_comp
=1, flags
=flags
)
297 intrinsic(name
, src_comp
=[1], dest_comp
=1, indices
=[BASE
], flags
=flags
)
300 intrinsic(name
+ "_deref", src_comp
=[-1, 1], dest_comp
=1)
301 intrinsic(name
, src_comp
=[1, 1], dest_comp
=1, indices
=[BASE
])
304 intrinsic(name
+ "_deref", src_comp
=[-1, 1, 1], dest_comp
=1)
305 intrinsic(name
, src_comp
=[1, 1, 1], dest_comp
=1, indices
=[BASE
])
307 atomic("atomic_counter_inc")
308 atomic("atomic_counter_pre_dec")
309 atomic("atomic_counter_post_dec")
310 atomic("atomic_counter_read", flags
=[CAN_ELIMINATE
])
311 atomic2("atomic_counter_add")
312 atomic2("atomic_counter_min")
313 atomic2("atomic_counter_max")
314 atomic2("atomic_counter_and")
315 atomic2("atomic_counter_or")
316 atomic2("atomic_counter_xor")
317 atomic2("atomic_counter_exchange")
318 atomic3("atomic_counter_comp_swap")
320 # Image load, store and atomic intrinsics.
322 # All image intrinsics come in three versions. One which take an image target
323 # passed as a deref chain as the first source, one which takes an index as the
324 # first source, and one which takes a bindless handle as the first source.
325 # In the first version, the image variable contains the memory and layout
326 # qualifiers that influence the semantics of the intrinsic. In the second and
327 # third, the image format and access qualifiers are provided as constant
330 # All image intrinsics take a four-coordinate vector and a sample index as
331 # 2nd and 3rd sources, determining the location within the image that will be
332 # accessed by the intrinsic. Components not applicable to the image target
333 # in use are undefined. Image store takes an additional four-component
334 # argument with the value to be written, and image atomic operations take
335 # either one or two additional scalar arguments with the same meaning as in
336 # the ARB_shader_image_load_store specification.
337 def image(name
, src_comp
=[], **kwargs
):
338 intrinsic("image_deref_" + name
, src_comp
=[1] + src_comp
, **kwargs
)
339 intrinsic("image_" + name
, src_comp
=[1] + src_comp
,
340 indices
=[IMAGE_DIM
, IMAGE_ARRAY
, FORMAT
, ACCESS
], **kwargs
)
341 intrinsic("bindless_image_" + name
, src_comp
=[1] + src_comp
,
342 indices
=[IMAGE_DIM
, IMAGE_ARRAY
, FORMAT
, ACCESS
], **kwargs
)
344 image("load", src_comp
=[4, 1], dest_comp
=0, flags
=[CAN_ELIMINATE
])
345 image("store", src_comp
=[4, 1, 0])
346 image("atomic_add", src_comp
=[4, 1, 1], dest_comp
=1)
347 image("atomic_min", src_comp
=[4, 1, 1], dest_comp
=1)
348 image("atomic_max", src_comp
=[4, 1, 1], dest_comp
=1)
349 image("atomic_and", src_comp
=[4, 1, 1], dest_comp
=1)
350 image("atomic_or", src_comp
=[4, 1, 1], dest_comp
=1)
351 image("atomic_xor", src_comp
=[4, 1, 1], dest_comp
=1)
352 image("atomic_exchange", src_comp
=[4, 1, 1], dest_comp
=1)
353 image("atomic_comp_swap", src_comp
=[4, 1, 1, 1], dest_comp
=1)
354 image("atomic_fadd", src_comp
=[1, 4, 1, 1], dest_comp
=1)
355 image("size", dest_comp
=0, flags
=[CAN_ELIMINATE
, CAN_REORDER
])
356 image("samples", dest_comp
=1, flags
=[CAN_ELIMINATE
, CAN_REORDER
])
358 # Intel-specific query for loading from the brw_image_param struct passed
359 # into the shader as a uniform. The variable is a deref to the image
360 # variable. The const index specifies which of the six parameters to load.
361 intrinsic("image_deref_load_param_intel", src_comp
=[1], dest_comp
=0,
362 indices
=[BASE
], flags
=[CAN_ELIMINATE
, CAN_REORDER
])
363 image("load_raw_intel", src_comp
=[1], dest_comp
=0,
364 flags
=[CAN_ELIMINATE
])
365 image("store_raw_intel", src_comp
=[1, 0])
367 # Vulkan descriptor set intrinsics
369 # The Vulkan API uses a different binding model from GL. In the Vulkan
370 # API, all external resources are represented by a tuple:
372 # (descriptor set, binding, array index)
374 # where the array index is the only thing allowed to be indirect. The
375 # vulkan_surface_index intrinsic takes the descriptor set and binding as
376 # its first two indices and the array index as its source. The third
377 # index is a nir_variable_mode in case that's useful to the backend.
379 # The intended usage is that the shader will call vulkan_surface_index to
380 # get an index and then pass that as the buffer index ubo/ssbo calls.
382 # The vulkan_resource_reindex intrinsic takes a resource index in src0
383 # (the result of a vulkan_resource_index or vulkan_resource_reindex) which
384 # corresponds to the tuple (set, binding, index) and computes an index
385 # corresponding to tuple (set, binding, idx + src1).
386 intrinsic("vulkan_resource_index", src_comp
=[1], dest_comp
=0,
387 indices
=[DESC_SET
, BINDING
, DESC_TYPE
],
388 flags
=[CAN_ELIMINATE
, CAN_REORDER
])
389 intrinsic("vulkan_resource_reindex", src_comp
=[0, 1], dest_comp
=0,
390 indices
=[DESC_TYPE
], flags
=[CAN_ELIMINATE
, CAN_REORDER
])
391 intrinsic("load_vulkan_descriptor", src_comp
=[-1], dest_comp
=0,
392 indices
=[DESC_TYPE
], flags
=[CAN_ELIMINATE
, CAN_REORDER
])
394 # variable atomic intrinsics
396 # All of these variable atomic memory operations read a value from memory,
397 # compute a new value using one of the operations below, write the new value
398 # to memory, and return the original value read.
400 # All operations take 2 sources except CompSwap that takes 3. These sources
403 # 0: A deref to the memory on which to perform the atomic
404 # 1: The data parameter to the atomic function (i.e. the value to add
405 # in shared_atomic_add, etc).
406 # 2: For CompSwap only: the second data parameter.
407 intrinsic("deref_atomic_add", src_comp
=[-1, 1], dest_comp
=1, indices
=[ACCESS
])
408 intrinsic("deref_atomic_imin", src_comp
=[-1, 1], dest_comp
=1, indices
=[ACCESS
])
409 intrinsic("deref_atomic_umin", src_comp
=[-1, 1], dest_comp
=1, indices
=[ACCESS
])
410 intrinsic("deref_atomic_imax", src_comp
=[-1, 1], dest_comp
=1, indices
=[ACCESS
])
411 intrinsic("deref_atomic_umax", src_comp
=[-1, 1], dest_comp
=1, indices
=[ACCESS
])
412 intrinsic("deref_atomic_and", src_comp
=[-1, 1], dest_comp
=1, indices
=[ACCESS
])
413 intrinsic("deref_atomic_or", src_comp
=[-1, 1], dest_comp
=1, indices
=[ACCESS
])
414 intrinsic("deref_atomic_xor", src_comp
=[-1, 1], dest_comp
=1, indices
=[ACCESS
])
415 intrinsic("deref_atomic_exchange", src_comp
=[-1, 1], dest_comp
=1, indices
=[ACCESS
])
416 intrinsic("deref_atomic_comp_swap", src_comp
=[-1, 1, 1], dest_comp
=1, indices
=[ACCESS
])
417 intrinsic("deref_atomic_fadd", src_comp
=[-1, 1], dest_comp
=1, indices
=[ACCESS
])
418 intrinsic("deref_atomic_fmin", src_comp
=[-1, 1], dest_comp
=1, indices
=[ACCESS
])
419 intrinsic("deref_atomic_fmax", src_comp
=[-1, 1], dest_comp
=1, indices
=[ACCESS
])
420 intrinsic("deref_atomic_fcomp_swap", src_comp
=[-1, 1, 1], dest_comp
=1, indices
=[ACCESS
])
422 # SSBO atomic intrinsics
424 # All of the SSBO atomic memory operations read a value from memory,
425 # compute a new value using one of the operations below, write the new
426 # value to memory, and return the original value read.
428 # All operations take 3 sources except CompSwap that takes 4. These
431 # 0: The SSBO buffer index.
432 # 1: The offset into the SSBO buffer of the variable that the atomic
433 # operation will operate on.
434 # 2: The data parameter to the atomic function (i.e. the value to add
435 # in ssbo_atomic_add, etc).
436 # 3: For CompSwap only: the second data parameter.
437 intrinsic("ssbo_atomic_add", src_comp
=[1, 1, 1], dest_comp
=1, indices
=[ACCESS
])
438 intrinsic("ssbo_atomic_imin", src_comp
=[1, 1, 1], dest_comp
=1, indices
=[ACCESS
])
439 intrinsic("ssbo_atomic_umin", src_comp
=[1, 1, 1], dest_comp
=1, indices
=[ACCESS
])
440 intrinsic("ssbo_atomic_imax", src_comp
=[1, 1, 1], dest_comp
=1, indices
=[ACCESS
])
441 intrinsic("ssbo_atomic_umax", src_comp
=[1, 1, 1], dest_comp
=1, indices
=[ACCESS
])
442 intrinsic("ssbo_atomic_and", src_comp
=[1, 1, 1], dest_comp
=1, indices
=[ACCESS
])
443 intrinsic("ssbo_atomic_or", src_comp
=[1, 1, 1], dest_comp
=1, indices
=[ACCESS
])
444 intrinsic("ssbo_atomic_xor", src_comp
=[1, 1, 1], dest_comp
=1, indices
=[ACCESS
])
445 intrinsic("ssbo_atomic_exchange", src_comp
=[1, 1, 1], dest_comp
=1, indices
=[ACCESS
])
446 intrinsic("ssbo_atomic_comp_swap", src_comp
=[1, 1, 1, 1], dest_comp
=1, indices
=[ACCESS
])
447 intrinsic("ssbo_atomic_fadd", src_comp
=[1, 1, 1], dest_comp
=1, indices
=[ACCESS
])
448 intrinsic("ssbo_atomic_fmin", src_comp
=[1, 1, 1], dest_comp
=1, indices
=[ACCESS
])
449 intrinsic("ssbo_atomic_fmax", src_comp
=[1, 1, 1], dest_comp
=1, indices
=[ACCESS
])
450 intrinsic("ssbo_atomic_fcomp_swap", src_comp
=[1, 1, 1, 1], dest_comp
=1, indices
=[ACCESS
])
452 # CS shared variable atomic intrinsics
454 # All of the shared variable atomic memory operations read a value from
455 # memory, compute a new value using one of the operations below, write the
456 # new value to memory, and return the original value read.
458 # All operations take 2 sources except CompSwap that takes 3. These
461 # 0: The offset into the shared variable storage region that the atomic
462 # operation will operate on.
463 # 1: The data parameter to the atomic function (i.e. the value to add
464 # in shared_atomic_add, etc).
465 # 2: For CompSwap only: the second data parameter.
466 intrinsic("shared_atomic_add", src_comp
=[1, 1], dest_comp
=1, indices
=[BASE
])
467 intrinsic("shared_atomic_imin", src_comp
=[1, 1], dest_comp
=1, indices
=[BASE
])
468 intrinsic("shared_atomic_umin", src_comp
=[1, 1], dest_comp
=1, indices
=[BASE
])
469 intrinsic("shared_atomic_imax", src_comp
=[1, 1], dest_comp
=1, indices
=[BASE
])
470 intrinsic("shared_atomic_umax", src_comp
=[1, 1], dest_comp
=1, indices
=[BASE
])
471 intrinsic("shared_atomic_and", src_comp
=[1, 1], dest_comp
=1, indices
=[BASE
])
472 intrinsic("shared_atomic_or", src_comp
=[1, 1], dest_comp
=1, indices
=[BASE
])
473 intrinsic("shared_atomic_xor", src_comp
=[1, 1], dest_comp
=1, indices
=[BASE
])
474 intrinsic("shared_atomic_exchange", src_comp
=[1, 1], dest_comp
=1, indices
=[BASE
])
475 intrinsic("shared_atomic_comp_swap", src_comp
=[1, 1, 1], dest_comp
=1, indices
=[BASE
])
476 intrinsic("shared_atomic_fadd", src_comp
=[1, 1], dest_comp
=1, indices
=[BASE
])
477 intrinsic("shared_atomic_fmin", src_comp
=[1, 1], dest_comp
=1, indices
=[BASE
])
478 intrinsic("shared_atomic_fmax", src_comp
=[1, 1], dest_comp
=1, indices
=[BASE
])
479 intrinsic("shared_atomic_fcomp_swap", src_comp
=[1, 1, 1], dest_comp
=1, indices
=[BASE
])
481 # Global atomic intrinsics
483 # All of the shared variable atomic memory operations read a value from
484 # memory, compute a new value using one of the operations below, write the
485 # new value to memory, and return the original value read.
487 # All operations take 2 sources except CompSwap that takes 3. These
490 # 0: The memory address that the atomic operation will operate on.
491 # 1: The data parameter to the atomic function (i.e. the value to add
492 # in shared_atomic_add, etc).
493 # 2: For CompSwap only: the second data parameter.
494 intrinsic("global_atomic_add", src_comp
=[1, 1], dest_comp
=1, indices
=[BASE
])
495 intrinsic("global_atomic_imin", src_comp
=[1, 1], dest_comp
=1, indices
=[BASE
])
496 intrinsic("global_atomic_umin", src_comp
=[1, 1], dest_comp
=1, indices
=[BASE
])
497 intrinsic("global_atomic_imax", src_comp
=[1, 1], dest_comp
=1, indices
=[BASE
])
498 intrinsic("global_atomic_umax", src_comp
=[1, 1], dest_comp
=1, indices
=[BASE
])
499 intrinsic("global_atomic_and", src_comp
=[1, 1], dest_comp
=1, indices
=[BASE
])
500 intrinsic("global_atomic_or", src_comp
=[1, 1], dest_comp
=1, indices
=[BASE
])
501 intrinsic("global_atomic_xor", src_comp
=[1, 1], dest_comp
=1, indices
=[BASE
])
502 intrinsic("global_atomic_exchange", src_comp
=[1, 1], dest_comp
=1, indices
=[BASE
])
503 intrinsic("global_atomic_comp_swap", src_comp
=[1, 1, 1], dest_comp
=1, indices
=[BASE
])
504 intrinsic("global_atomic_fadd", src_comp
=[1, 1], dest_comp
=1, indices
=[BASE
])
505 intrinsic("global_atomic_fmin", src_comp
=[1, 1], dest_comp
=1, indices
=[BASE
])
506 intrinsic("global_atomic_fmax", src_comp
=[1, 1], dest_comp
=1, indices
=[BASE
])
507 intrinsic("global_atomic_fcomp_swap", src_comp
=[1, 1, 1], dest_comp
=1, indices
=[BASE
])
509 def system_value(name
, dest_comp
, indices
=[], bit_sizes
=[32]):
510 intrinsic("load_" + name
, [], dest_comp
, indices
,
511 flags
=[CAN_ELIMINATE
, CAN_REORDER
], sysval
=True,
514 system_value("frag_coord", 4)
515 system_value("front_face", 1, bit_sizes
=[1, 32])
516 system_value("vertex_id", 1)
517 system_value("vertex_id_zero_base", 1)
518 system_value("first_vertex", 1)
519 system_value("is_indexed_draw", 1)
520 system_value("base_vertex", 1)
521 system_value("instance_id", 1)
522 system_value("base_instance", 1)
523 system_value("draw_id", 1)
524 system_value("sample_id", 1)
525 # sample_id_no_per_sample is like sample_id but does not imply per-
526 # sample shading. See the lower_helper_invocation option.
527 system_value("sample_id_no_per_sample", 1)
528 system_value("sample_pos", 2)
529 system_value("sample_mask_in", 1)
530 system_value("primitive_id", 1)
531 system_value("invocation_id", 1)
532 system_value("tess_coord", 3)
533 system_value("tess_level_outer", 4)
534 system_value("tess_level_inner", 2)
535 system_value("patch_vertices_in", 1)
536 system_value("local_invocation_id", 3)
537 system_value("local_invocation_index", 1)
538 system_value("work_group_id", 3)
539 system_value("user_clip_plane", 4, indices
=[UCP_ID
])
540 system_value("num_work_groups", 3)
541 system_value("helper_invocation", 1, bit_sizes
=[1, 32])
542 system_value("alpha_ref_float", 1)
543 system_value("layer_id", 1)
544 system_value("view_index", 1)
545 system_value("subgroup_size", 1)
546 system_value("subgroup_invocation", 1)
547 system_value("subgroup_eq_mask", 0, bit_sizes
=[32, 64])
548 system_value("subgroup_ge_mask", 0, bit_sizes
=[32, 64])
549 system_value("subgroup_gt_mask", 0, bit_sizes
=[32, 64])
550 system_value("subgroup_le_mask", 0, bit_sizes
=[32, 64])
551 system_value("subgroup_lt_mask", 0, bit_sizes
=[32, 64])
552 system_value("num_subgroups", 1)
553 system_value("subgroup_id", 1)
554 system_value("local_group_size", 3)
555 system_value("global_invocation_id", 3, bit_sizes
=[32, 64])
556 system_value("global_invocation_index", 1, bit_sizes
=[32, 64])
557 system_value("work_dim", 1)
558 # Driver-specific viewport scale/offset parameters.
560 # VC4 and V3D need to emit a scaled version of the position in the vertex
561 # shaders for binning, and having system values lets us move the math for that
564 # Panfrost needs to implement all coordinate transformation in the
565 # vertex shader; system values allow us to share this routine in NIR.
566 system_value("viewport_x_scale", 1)
567 system_value("viewport_y_scale", 1)
568 system_value("viewport_z_scale", 1)
569 system_value("viewport_z_offset", 1)
570 system_value("viewport_scale", 3)
571 system_value("viewport_offset", 3)
573 # Blend constant color values. Float values are clamped. Vectored versions are
574 # provided as well for driver convenience
576 system_value("blend_const_color_r_float", 1)
577 system_value("blend_const_color_g_float", 1)
578 system_value("blend_const_color_b_float", 1)
579 system_value("blend_const_color_a_float", 1)
580 system_value("blend_const_color_rgba", 4)
581 system_value("blend_const_color_rgba8888_unorm", 1)
582 system_value("blend_const_color_aaaa8888_unorm", 1)
584 # Barycentric coordinate intrinsics.
586 # These set up the barycentric coordinates for a particular interpolation.
587 # The first three are for the simple cases: pixel, centroid, or per-sample
588 # (at gl_SampleID). The next two handle interpolating at a specified
589 # sample location, or interpolating with a vec2 offset,
591 # The interp_mode index should be either the INTERP_MODE_SMOOTH or
592 # INTERP_MODE_NOPERSPECTIVE enum values.
594 # The vec2 value produced by these intrinsics is intended for use as the
595 # barycoord source of a load_interpolated_input intrinsic.
597 def barycentric(name
, src_comp
=[]):
598 intrinsic("load_barycentric_" + name
, src_comp
=src_comp
, dest_comp
=2,
599 indices
=[INTERP_MODE
], flags
=[CAN_ELIMINATE
, CAN_REORDER
])
603 barycentric("centroid")
604 barycentric("sample")
605 # src[] = { sample_id }.
606 barycentric("at_sample", [1])
607 # src[] = { offset.xy }.
608 barycentric("at_offset", [2])
610 # Load sample position:
612 # Takes a sample # and returns a sample position. Used for lowering
613 # interpolateAtSample() to interpolateAtOffset()
614 intrinsic("load_sample_pos_from_id", src_comp
=[1], dest_comp
=2,
615 flags
=[CAN_ELIMINATE
, CAN_REORDER
])
617 # Loads what I believe is the primitive size, for scaling ij to pixel size:
618 intrinsic("load_size_ir3", dest_comp
=1, flags
=[CAN_ELIMINATE
, CAN_REORDER
])
620 # Load operations pull data from some piece of GPU memory. All load
621 # operations operate in terms of offsets into some piece of theoretical
622 # memory. Loads from externally visible memory (UBO and SSBO) simply take a
623 # byte offset as a source. Loads from opaque memory (uniforms, inputs, etc.)
624 # take a base+offset pair where the nir_intrinsic_base() gives the location
625 # of the start of the variable being loaded and and the offset source is a
626 # offset into that variable.
628 # Uniform load operations have a nir_intrinsic_range() index that specifies the
629 # range (starting at base) of the data from which we are loading. If
630 # range == 0, then the range is unknown.
632 # Some load operations such as UBO/SSBO load and per_vertex loads take an
633 # additional source to specify which UBO/SSBO/vertex to load from.
635 # The exact address type depends on the lowering pass that generates the
636 # load/store intrinsics. Typically, this is vec4 units for things such as
637 # varying slots and float units for fragment shader inputs. UBO and SSBO
638 # offsets are always in bytes.
640 def load(name
, num_srcs
, indices
=[], flags
=[]):
641 intrinsic("load_" + name
, [1] * num_srcs
, dest_comp
=0, indices
=indices
,
644 # src[] = { offset }.
645 load("uniform", 1, [BASE
, RANGE
, TYPE
], [CAN_ELIMINATE
, CAN_REORDER
])
646 # src[] = { buffer_index, offset }.
647 load("ubo", 2, [ACCESS
, ALIGN_MUL
, ALIGN_OFFSET
], flags
=[CAN_ELIMINATE
, CAN_REORDER
])
648 # src[] = { offset }.
649 load("input", 1, [BASE
, COMPONENT
, TYPE
], [CAN_ELIMINATE
, CAN_REORDER
])
650 # src[] = { vertex, offset }.
651 load("per_vertex_input", 2, [BASE
, COMPONENT
], [CAN_ELIMINATE
, CAN_REORDER
])
652 # src[] = { barycoord, offset }.
653 intrinsic("load_interpolated_input", src_comp
=[2, 1], dest_comp
=0,
654 indices
=[BASE
, COMPONENT
], flags
=[CAN_ELIMINATE
, CAN_REORDER
])
656 # src[] = { buffer_index, offset }.
657 load("ssbo", 2, [ACCESS
, ALIGN_MUL
, ALIGN_OFFSET
], [CAN_ELIMINATE
])
658 # src[] = { offset }.
659 load("output", 1, [BASE
, COMPONENT
], flags
=[CAN_ELIMINATE
])
660 # src[] = { vertex, offset }.
661 load("per_vertex_output", 2, [BASE
, COMPONENT
], [CAN_ELIMINATE
])
662 # src[] = { offset }.
663 load("shared", 1, [BASE
, ALIGN_MUL
, ALIGN_OFFSET
], [CAN_ELIMINATE
])
664 # src[] = { offset }.
665 load("push_constant", 1, [BASE
, RANGE
], [CAN_ELIMINATE
, CAN_REORDER
])
666 # src[] = { offset }.
667 load("constant", 1, [BASE
, RANGE
], [CAN_ELIMINATE
, CAN_REORDER
])
668 # src[] = { address }.
669 load("global", 1, [ACCESS
, ALIGN_MUL
, ALIGN_OFFSET
], [CAN_ELIMINATE
])
670 # src[] = { address }.
671 load("kernel_input", 1, [BASE
, RANGE
, ALIGN_MUL
, ALIGN_OFFSET
], [CAN_ELIMINATE
, CAN_REORDER
])
672 # src[] = { offset }.
673 load("scratch", 1, [ALIGN_MUL
, ALIGN_OFFSET
], [CAN_ELIMINATE
])
675 # Stores work the same way as loads, except now the first source is the value
676 # to store and the second (and possibly third) source specify where to store
677 # the value. SSBO and shared memory stores also have a
678 # nir_intrinsic_write_mask()
680 def store(name
, num_srcs
, indices
=[], flags
=[]):
681 intrinsic("store_" + name
, [0] + ([1] * (num_srcs
- 1)), indices
=indices
, flags
=flags
)
683 # src[] = { value, offset }.
684 store("output", 2, [BASE
, WRMASK
, COMPONENT
, TYPE
])
685 # src[] = { value, vertex, offset }.
686 store("per_vertex_output", 3, [BASE
, WRMASK
, COMPONENT
])
687 # src[] = { value, block_index, offset }
688 store("ssbo", 3, [WRMASK
, ACCESS
, ALIGN_MUL
, ALIGN_OFFSET
])
689 # src[] = { value, offset }.
690 store("shared", 2, [BASE
, WRMASK
, ALIGN_MUL
, ALIGN_OFFSET
])
691 # src[] = { value, address }.
692 store("global", 2, [WRMASK
, ACCESS
, ALIGN_MUL
, ALIGN_OFFSET
])
693 # src[] = { value, offset }.
694 store("scratch", 2, [ALIGN_MUL
, ALIGN_OFFSET
, WRMASK
])
696 # IR3-specific version of most SSBO intrinsics. The only different
697 # compare to the originals is that they add an extra source to hold
698 # the dword-offset, which is needed by the backend code apart from
699 # the byte-offset already provided by NIR in one of the sources.
701 # NIR lowering pass 'ir3_nir_lower_io_offset' will replace the
702 # original SSBO intrinsics by these, placing the computed
703 # dword-offset always in the last source.
705 # The float versions are not handled because those are not supported
707 intrinsic("store_ssbo_ir3", src_comp
=[0, 1, 1, 1],
708 indices
=[WRMASK
, ACCESS
, ALIGN_MUL
, ALIGN_OFFSET
])
709 intrinsic("load_ssbo_ir3", src_comp
=[1, 1, 1], dest_comp
=0,
710 indices
=[ACCESS
, ALIGN_MUL
, ALIGN_OFFSET
], flags
=[CAN_ELIMINATE
])
711 intrinsic("ssbo_atomic_add_ir3", src_comp
=[1, 1, 1, 1], dest_comp
=1)
712 intrinsic("ssbo_atomic_imin_ir3", src_comp
=[1, 1, 1, 1], dest_comp
=1)
713 intrinsic("ssbo_atomic_umin_ir3", src_comp
=[1, 1, 1, 1], dest_comp
=1)
714 intrinsic("ssbo_atomic_imax_ir3", src_comp
=[1, 1, 1, 1], dest_comp
=1)
715 intrinsic("ssbo_atomic_umax_ir3", src_comp
=[1, 1, 1, 1], dest_comp
=1)
716 intrinsic("ssbo_atomic_and_ir3", src_comp
=[1, 1, 1, 1], dest_comp
=1)
717 intrinsic("ssbo_atomic_or_ir3", src_comp
=[1, 1, 1, 1], dest_comp
=1)
718 intrinsic("ssbo_atomic_xor_ir3", src_comp
=[1, 1, 1, 1], dest_comp
=1)
719 intrinsic("ssbo_atomic_exchange_ir3", src_comp
=[1, 1, 1, 1], dest_comp
=1)
720 intrinsic("ssbo_atomic_comp_swap_ir3", src_comp
=[1, 1, 1, 1, 1], dest_comp
=1)