2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Connor Abbott (cwabbott0@gmail.com)
29 * This header file defines all the available intrinsics in one place. It
30 * expands to a list of macros of the form:
32 * INTRINSIC(name, num_srcs, src_components, has_dest, dest_components,
33 * num_variables, num_indices, idx0, idx1, idx2, flags)
35 * Which should correspond one-to-one with the nir_intrinsic_info structure. It
36 * is included in both ir.h to create the nir_intrinsic enum (with members of
37 * the form nir_intrinsic_(name)) and and in opcodes.c to create
38 * nir_intrinsic_infos, which is a const array of nir_intrinsic_info structures
42 #define ARR(...) { __VA_ARGS__ }
44 INTRINSIC(nop
, 0, ARR(0), false, 0, 0, 0, xx
, xx
, xx
,
45 NIR_INTRINSIC_CAN_ELIMINATE
)
47 INTRINSIC(load_var
, 0, ARR(0), true, 0, 1, 0, xx
, xx
, xx
, NIR_INTRINSIC_CAN_ELIMINATE
)
48 INTRINSIC(store_var
, 1, ARR(0), false, 0, 1, 1, WRMASK
, xx
, xx
, 0)
49 INTRINSIC(copy_var
, 0, ARR(0), false, 0, 2, 0, xx
, xx
, xx
, 0)
52 * Interpolation of input. The interp_var_at* intrinsics are similar to the
53 * load_var intrinsic acting on a shader input except that they interpolate
54 * the input differently. The at_sample and at_offset intrinsics take an
55 * additional source that is an integer sample id or a vec2 position offset
59 INTRINSIC(interp_var_at_centroid
, 0, ARR(0), true, 0, 1, 0, xx
, xx
, xx
,
60 NIR_INTRINSIC_CAN_ELIMINATE
| NIR_INTRINSIC_CAN_REORDER
)
61 INTRINSIC(interp_var_at_sample
, 1, ARR(1), true, 0, 1, 0, xx
, xx
, xx
,
62 NIR_INTRINSIC_CAN_ELIMINATE
| NIR_INTRINSIC_CAN_REORDER
)
63 INTRINSIC(interp_var_at_offset
, 1, ARR(2), true, 0, 1, 0, xx
, xx
, xx
,
64 NIR_INTRINSIC_CAN_ELIMINATE
| NIR_INTRINSIC_CAN_REORDER
)
67 * Ask the driver for the size of a given buffer. It takes the buffer index
70 INTRINSIC(get_buffer_size
, 1, ARR(1), true, 1, 0, 0, xx
, xx
, xx
,
71 NIR_INTRINSIC_CAN_ELIMINATE
| NIR_INTRINSIC_CAN_REORDER
)
74 * a barrier is an intrinsic with no inputs/outputs but which can't be moved
75 * around/optimized in general
77 #define BARRIER(name) INTRINSIC(name, 0, ARR(0), false, 0, 0, 0, xx, xx, xx, 0)
83 * Memory barrier with semantics analogous to the memoryBarrier() GLSL
86 BARRIER(memory_barrier
)
89 * Shader clock intrinsic with semantics analogous to the clock2x32ARB()
91 * The latter can be used as code motion barrier, which is currently not
94 INTRINSIC(shader_clock
, 0, ARR(0), true, 2, 0, 0, xx
, xx
, xx
, NIR_INTRINSIC_CAN_ELIMINATE
)
97 * Shader ballot intrinsics with semantics analogous to the
100 * readInvocationARB()
101 * readFirstInvocationARB()
103 * GLSL functions from ARB_shader_ballot.
105 INTRINSIC(ballot
, 1, ARR(1), true, 0, 0, 0, xx
, xx
, xx
, NIR_INTRINSIC_CAN_ELIMINATE
)
106 INTRINSIC(read_invocation
, 2, ARR(0, 1), true, 0, 0, 0, xx
, xx
, xx
, NIR_INTRINSIC_CAN_ELIMINATE
)
107 INTRINSIC(read_first_invocation
, 1, ARR(0), true, 0, 0, 0, xx
, xx
, xx
, NIR_INTRINSIC_CAN_ELIMINATE
)
109 /** Additional SPIR-V ballot intrinsics
111 * These correspond to the SPIR-V opcodes
113 * OpGroupUniformElect
114 * OpSubgroupFirstInvocationKHR
116 INTRINSIC(elect
, 0, ARR(0), true, 1, 0, 0, xx
, xx
, xx
,
117 NIR_INTRINSIC_CAN_ELIMINATE
)
118 INTRINSIC(first_invocation
, 0, ARR(0), true, 1, 0, 0, xx
, xx
, xx
,
119 NIR_INTRINSIC_CAN_ELIMINATE
)
122 * Memory barrier with semantics analogous to the compute shader
123 * groupMemoryBarrier(), memoryBarrierAtomicCounter(), memoryBarrierBuffer(),
124 * memoryBarrierImage() and memoryBarrierShared() GLSL intrinsics.
126 BARRIER(group_memory_barrier
)
127 BARRIER(memory_barrier_atomic_counter
)
128 BARRIER(memory_barrier_buffer
)
129 BARRIER(memory_barrier_image
)
130 BARRIER(memory_barrier_shared
)
132 /** A conditional discard, with a single boolean source. */
133 INTRINSIC(discard_if
, 1, ARR(1), false, 0, 0, 0, xx
, xx
, xx
, 0)
135 /** ARB_shader_group_vote intrinsics */
136 INTRINSIC(vote_any
, 1, ARR(1), true, 1, 0, 0, xx
, xx
, xx
, NIR_INTRINSIC_CAN_ELIMINATE
)
137 INTRINSIC(vote_all
, 1, ARR(1), true, 1, 0, 0, xx
, xx
, xx
, NIR_INTRINSIC_CAN_ELIMINATE
)
138 INTRINSIC(vote_feq
, 1, ARR(0), true, 1, 0, 0, xx
, xx
, xx
, NIR_INTRINSIC_CAN_ELIMINATE
)
139 INTRINSIC(vote_ieq
, 1, ARR(0), true, 1, 0, 0, xx
, xx
, xx
, NIR_INTRINSIC_CAN_ELIMINATE
)
141 /** Ballot ALU operations from SPIR-V.
143 * These operations work like their ALU counterparts except that the operate
144 * on a uvec4 which is treated as a 128bit integer. Also, they are, in
145 * general, free to ignore any bits which are above the subgroup size.
147 INTRINSIC(ballot_bitfield_extract
, 2, ARR(4, 1), true, 1, 0,
148 0, xx
, xx
, xx
, NIR_INTRINSIC_CAN_ELIMINATE
)
149 INTRINSIC(ballot_bit_count_reduce
, 1, ARR(4), true, 1, 0,
150 0, xx
, xx
, xx
, NIR_INTRINSIC_CAN_ELIMINATE
)
151 INTRINSIC(ballot_bit_count_inclusive
, 1, ARR(4), true, 1, 0,
152 0, xx
, xx
, xx
, NIR_INTRINSIC_CAN_ELIMINATE
)
153 INTRINSIC(ballot_bit_count_exclusive
, 1, ARR(4), true, 1, 0,
154 0, xx
, xx
, xx
, NIR_INTRINSIC_CAN_ELIMINATE
)
155 INTRINSIC(ballot_find_lsb
, 1, ARR(4), true, 1, 0,
156 0, xx
, xx
, xx
, NIR_INTRINSIC_CAN_ELIMINATE
)
157 INTRINSIC(ballot_find_msb
, 1, ARR(4), true, 1, 0,
158 0, xx
, xx
, xx
, NIR_INTRINSIC_CAN_ELIMINATE
)
161 * Basic Geometry Shader intrinsics.
163 * emit_vertex implements GLSL's EmitStreamVertex() built-in. It takes a single
164 * index, which is the stream ID to write to.
166 * end_primitive implements GLSL's EndPrimitive() built-in.
168 INTRINSIC(emit_vertex
, 0, ARR(0), false, 0, 0, 1, STREAM_ID
, xx
, xx
, 0)
169 INTRINSIC(end_primitive
, 0, ARR(0), false, 0, 0, 1, STREAM_ID
, xx
, xx
, 0)
172 * Geometry Shader intrinsics with a vertex count.
174 * Alternatively, drivers may implement these intrinsics, and use
175 * nir_lower_gs_intrinsics() to convert from the basic intrinsics.
177 * These maintain a count of the number of vertices emitted, as an additional
178 * unsigned integer source.
180 INTRINSIC(emit_vertex_with_counter
, 1, ARR(1), false, 0, 0, 1, STREAM_ID
, xx
, xx
, 0)
181 INTRINSIC(end_primitive_with_counter
, 1, ARR(1), false, 0, 0, 1, STREAM_ID
, xx
, xx
, 0)
182 INTRINSIC(set_vertex_count
, 1, ARR(1), false, 0, 0, 0, xx
, xx
, xx
, 0)
187 * The *_var variants take an atomic_uint nir_variable, while the other,
188 * lowered, variants take a constant buffer index and register offset.
191 #define ATOMIC(name, flags) \
192 INTRINSIC(name##_var, 0, ARR(0), true, 1, 1, 0, xx, xx, xx, flags) \
193 INTRINSIC(name, 1, ARR(1), true, 1, 0, 1, BASE, xx, xx, flags)
194 #define ATOMIC2(name) \
195 INTRINSIC(name##_var, 1, ARR(1), true, 1, 1, 0, xx, xx, xx, 0) \
196 INTRINSIC(name, 2, ARR(1, 1), true, 1, 0, 1, BASE, xx, xx, 0)
197 #define ATOMIC3(name) \
198 INTRINSIC(name##_var, 2, ARR(1, 1), true, 1, 1, 0, xx, xx, xx, 0) \
199 INTRINSIC(name, 3, ARR(1, 1, 1), true, 1, 0, 1, BASE, xx, xx, 0)
201 ATOMIC(atomic_counter_inc
, 0)
202 ATOMIC(atomic_counter_dec
, 0)
203 ATOMIC(atomic_counter_read
, NIR_INTRINSIC_CAN_ELIMINATE
)
204 ATOMIC2(atomic_counter_add
)
205 ATOMIC2(atomic_counter_min
)
206 ATOMIC2(atomic_counter_max
)
207 ATOMIC2(atomic_counter_and
)
208 ATOMIC2(atomic_counter_or
)
209 ATOMIC2(atomic_counter_xor
)
210 ATOMIC2(atomic_counter_exchange
)
211 ATOMIC3(atomic_counter_comp_swap
)
214 * Image load, store and atomic intrinsics.
216 * All image intrinsics take an image target passed as a nir_variable. Image
217 * variables contain a number of memory and layout qualifiers that influence
218 * the semantics of the intrinsic.
220 * All image intrinsics take a four-coordinate vector and a sample index as
221 * first two sources, determining the location within the image that will be
222 * accessed by the intrinsic. Components not applicable to the image target
223 * in use are undefined. Image store takes an additional four-component
224 * argument with the value to be written, and image atomic operations take
225 * either one or two additional scalar arguments with the same meaning as in
226 * the ARB_shader_image_load_store specification.
228 INTRINSIC(image_load
, 2, ARR(4, 1), true, 4, 1, 0, xx
, xx
, xx
,
229 NIR_INTRINSIC_CAN_ELIMINATE
)
230 INTRINSIC(image_store
, 3, ARR(4, 1, 4), false, 0, 1, 0, xx
, xx
, xx
, 0)
231 INTRINSIC(image_atomic_add
, 3, ARR(4, 1, 1), true, 1, 1, 0, xx
, xx
, xx
, 0)
232 INTRINSIC(image_atomic_min
, 3, ARR(4, 1, 1), true, 1, 1, 0, xx
, xx
, xx
, 0)
233 INTRINSIC(image_atomic_max
, 3, ARR(4, 1, 1), true, 1, 1, 0, xx
, xx
, xx
, 0)
234 INTRINSIC(image_atomic_and
, 3, ARR(4, 1, 1), true, 1, 1, 0, xx
, xx
, xx
, 0)
235 INTRINSIC(image_atomic_or
, 3, ARR(4, 1, 1), true, 1, 1, 0, xx
, xx
, xx
, 0)
236 INTRINSIC(image_atomic_xor
, 3, ARR(4, 1, 1), true, 1, 1, 0, xx
, xx
, xx
, 0)
237 INTRINSIC(image_atomic_exchange
, 3, ARR(4, 1, 1), true, 1, 1, 0, xx
, xx
, xx
, 0)
238 INTRINSIC(image_atomic_comp_swap
, 4, ARR(4, 1, 1, 1), true, 1, 1, 0, xx
, xx
, xx
, 0)
239 INTRINSIC(image_size
, 0, ARR(0), true, 0, 1, 0, xx
, xx
, xx
,
240 NIR_INTRINSIC_CAN_ELIMINATE
| NIR_INTRINSIC_CAN_REORDER
)
241 INTRINSIC(image_samples
, 0, ARR(0), true, 1, 1, 0, xx
, xx
, xx
,
242 NIR_INTRINSIC_CAN_ELIMINATE
| NIR_INTRINSIC_CAN_REORDER
)
245 * Vulkan descriptor set intrinsics
247 * The Vulkan API uses a different binding model from GL. In the Vulkan
248 * API, all external resources are represented by a tuple:
250 * (descriptor set, binding, array index)
252 * where the array index is the only thing allowed to be indirect. The
253 * vulkan_surface_index intrinsic takes the descriptor set and binding as
254 * its first two indices and the array index as its source. The third
255 * index is a nir_variable_mode in case that's useful to the backend.
257 * The intended usage is that the shader will call vulkan_surface_index to
258 * get an index and then pass that as the buffer index ubo/ssbo calls.
260 * The vulkan_resource_reindex intrinsic takes a resource index in src0
261 * (the result of a vulkan_resource_index or vulkan_resource_reindex) which
262 * corresponds to the tuple (set, binding, index) and computes an index
263 * corresponding to tuple (set, binding, idx + src1).
265 INTRINSIC(vulkan_resource_index
, 1, ARR(1), true, 1, 0, 2,
266 DESC_SET
, BINDING
, xx
,
267 NIR_INTRINSIC_CAN_ELIMINATE
| NIR_INTRINSIC_CAN_REORDER
)
268 INTRINSIC(vulkan_resource_reindex
, 2, ARR(1, 1), true, 1, 0, 0, xx
, xx
, xx
,
269 NIR_INTRINSIC_CAN_ELIMINATE
| NIR_INTRINSIC_CAN_REORDER
)
272 * variable atomic intrinsics
274 * All of these variable atomic memory operations read a value from memory,
275 * compute a new value using one of the operations below, write the new value
276 * to memory, and return the original value read.
278 * All operations take 1 source except CompSwap that takes 2. These sources
281 * 0: The data parameter to the atomic function (i.e. the value to add
282 * in shared_atomic_add, etc).
283 * 1: For CompSwap only: the second data parameter.
285 * All operations take 1 variable deref.
287 INTRINSIC(var_atomic_add
, 1, ARR(1), true, 1, 1, 0, xx
, xx
, xx
, 0)
288 INTRINSIC(var_atomic_imin
, 1, ARR(1), true, 1, 1, 0, xx
, xx
, xx
, 0)
289 INTRINSIC(var_atomic_umin
, 1, ARR(1), true, 1, 1, 0, xx
, xx
, xx
, 0)
290 INTRINSIC(var_atomic_imax
, 1, ARR(1), true, 1, 1, 0, xx
, xx
, xx
, 0)
291 INTRINSIC(var_atomic_umax
, 1, ARR(1), true, 1, 1, 0, xx
, xx
, xx
, 0)
292 INTRINSIC(var_atomic_and
, 1, ARR(1), true, 1, 1, 0, xx
, xx
, xx
, 0)
293 INTRINSIC(var_atomic_or
, 1, ARR(1), true, 1, 1, 0, xx
, xx
, xx
, 0)
294 INTRINSIC(var_atomic_xor
, 1, ARR(1), true, 1, 1, 0, xx
, xx
, xx
, 0)
295 INTRINSIC(var_atomic_exchange
, 1, ARR(1), true, 1, 1, 0, xx
, xx
, xx
, 0)
296 INTRINSIC(var_atomic_comp_swap
, 2, ARR(1, 1), true, 1, 1, 0, xx
, xx
, xx
, 0)
299 * SSBO atomic intrinsics
301 * All of the SSBO atomic memory operations read a value from memory,
302 * compute a new value using one of the operations below, write the new
303 * value to memory, and return the original value read.
305 * All operations take 3 sources except CompSwap that takes 4. These
308 * 0: The SSBO buffer index.
309 * 1: The offset into the SSBO buffer of the variable that the atomic
310 * operation will operate on.
311 * 2: The data parameter to the atomic function (i.e. the value to add
312 * in ssbo_atomic_add, etc).
313 * 3: For CompSwap only: the second data parameter.
315 INTRINSIC(ssbo_atomic_add
, 3, ARR(1, 1, 1), true, 1, 0, 0, xx
, xx
, xx
, 0)
316 INTRINSIC(ssbo_atomic_imin
, 3, ARR(1, 1, 1), true, 1, 0, 0, xx
, xx
, xx
, 0)
317 INTRINSIC(ssbo_atomic_umin
, 3, ARR(1, 1, 1), true, 1, 0, 0, xx
, xx
, xx
, 0)
318 INTRINSIC(ssbo_atomic_imax
, 3, ARR(1, 1, 1), true, 1, 0, 0, xx
, xx
, xx
, 0)
319 INTRINSIC(ssbo_atomic_umax
, 3, ARR(1, 1, 1), true, 1, 0, 0, xx
, xx
, xx
, 0)
320 INTRINSIC(ssbo_atomic_and
, 3, ARR(1, 1, 1), true, 1, 0, 0, xx
, xx
, xx
, 0)
321 INTRINSIC(ssbo_atomic_or
, 3, ARR(1, 1, 1), true, 1, 0, 0, xx
, xx
, xx
, 0)
322 INTRINSIC(ssbo_atomic_xor
, 3, ARR(1, 1, 1), true, 1, 0, 0, xx
, xx
, xx
, 0)
323 INTRINSIC(ssbo_atomic_exchange
, 3, ARR(1, 1, 1), true, 1, 0, 0, xx
, xx
, xx
, 0)
324 INTRINSIC(ssbo_atomic_comp_swap
, 4, ARR(1, 1, 1, 1), true, 1, 0, 0, xx
, xx
, xx
, 0)
327 * CS shared variable atomic intrinsics
329 * All of the shared variable atomic memory operations read a value from
330 * memory, compute a new value using one of the operations below, write the
331 * new value to memory, and return the original value read.
333 * All operations take 2 sources except CompSwap that takes 3. These
336 * 0: The offset into the shared variable storage region that the atomic
337 * operation will operate on.
338 * 1: The data parameter to the atomic function (i.e. the value to add
339 * in shared_atomic_add, etc).
340 * 2: For CompSwap only: the second data parameter.
342 INTRINSIC(shared_atomic_add
, 2, ARR(1, 1), true, 1, 0, 1, BASE
, xx
, xx
, 0)
343 INTRINSIC(shared_atomic_imin
, 2, ARR(1, 1), true, 1, 0, 1, BASE
, xx
, xx
, 0)
344 INTRINSIC(shared_atomic_umin
, 2, ARR(1, 1), true, 1, 0, 1, BASE
, xx
, xx
, 0)
345 INTRINSIC(shared_atomic_imax
, 2, ARR(1, 1), true, 1, 0, 1, BASE
, xx
, xx
, 0)
346 INTRINSIC(shared_atomic_umax
, 2, ARR(1, 1), true, 1, 0, 1, BASE
, xx
, xx
, 0)
347 INTRINSIC(shared_atomic_and
, 2, ARR(1, 1), true, 1, 0, 1, BASE
, xx
, xx
, 0)
348 INTRINSIC(shared_atomic_or
, 2, ARR(1, 1), true, 1, 0, 1, BASE
, xx
, xx
, 0)
349 INTRINSIC(shared_atomic_xor
, 2, ARR(1, 1), true, 1, 0, 1, BASE
, xx
, xx
, 0)
350 INTRINSIC(shared_atomic_exchange
, 2, ARR(1, 1), true, 1, 0, 1, BASE
, xx
, xx
, 0)
351 INTRINSIC(shared_atomic_comp_swap
, 3, ARR(1, 1, 1), true, 1, 0, 1, BASE
, xx
, xx
, 0)
353 /* Used by nir_builder.h to generate loader helpers for the system values. */
354 #ifndef DEFINE_SYSTEM_VALUE
355 #define DEFINE_SYSTEM_VALUE(name)
358 #define SYSTEM_VALUE(name, components, num_indices, idx0, idx1, idx2) \
359 DEFINE_SYSTEM_VALUE(name) \
360 INTRINSIC(load_##name, 0, ARR(0), true, components, 0, num_indices, \
362 NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
364 SYSTEM_VALUE(frag_coord
, 4, 0, xx
, xx
, xx
)
365 SYSTEM_VALUE(front_face
, 1, 0, xx
, xx
, xx
)
366 SYSTEM_VALUE(vertex_id
, 1, 0, xx
, xx
, xx
)
367 SYSTEM_VALUE(vertex_id_zero_base
, 1, 0, xx
, xx
, xx
)
368 SYSTEM_VALUE(base_vertex
, 1, 0, xx
, xx
, xx
)
369 SYSTEM_VALUE(instance_id
, 1, 0, xx
, xx
, xx
)
370 SYSTEM_VALUE(base_instance
, 1, 0, xx
, xx
, xx
)
371 SYSTEM_VALUE(draw_id
, 1, 0, xx
, xx
, xx
)
372 SYSTEM_VALUE(sample_id
, 1, 0, xx
, xx
, xx
)
373 SYSTEM_VALUE(sample_pos
, 2, 0, xx
, xx
, xx
)
374 SYSTEM_VALUE(sample_mask_in
, 1, 0, xx
, xx
, xx
)
375 SYSTEM_VALUE(primitive_id
, 1, 0, xx
, xx
, xx
)
376 SYSTEM_VALUE(invocation_id
, 1, 0, xx
, xx
, xx
)
377 SYSTEM_VALUE(tess_coord
, 3, 0, xx
, xx
, xx
)
378 SYSTEM_VALUE(tess_level_outer
, 4, 0, xx
, xx
, xx
)
379 SYSTEM_VALUE(tess_level_inner
, 2, 0, xx
, xx
, xx
)
380 SYSTEM_VALUE(patch_vertices_in
, 1, 0, xx
, xx
, xx
)
381 SYSTEM_VALUE(local_invocation_id
, 3, 0, xx
, xx
, xx
)
382 SYSTEM_VALUE(local_invocation_index
, 1, 0, xx
, xx
, xx
)
383 SYSTEM_VALUE(work_group_id
, 3, 0, xx
, xx
, xx
)
384 SYSTEM_VALUE(user_clip_plane
, 4, 1, UCP_ID
, xx
, xx
)
385 SYSTEM_VALUE(num_work_groups
, 3, 0, xx
, xx
, xx
)
386 SYSTEM_VALUE(helper_invocation
, 1, 0, xx
, xx
, xx
)
387 SYSTEM_VALUE(alpha_ref_float
, 1, 0, xx
, xx
, xx
)
388 SYSTEM_VALUE(layer_id
, 1, 0, xx
, xx
, xx
)
389 SYSTEM_VALUE(view_index
, 1, 0, xx
, xx
, xx
)
390 SYSTEM_VALUE(subgroup_size
, 1, 0, xx
, xx
, xx
)
391 SYSTEM_VALUE(subgroup_invocation
, 1, 0, xx
, xx
, xx
)
392 SYSTEM_VALUE(subgroup_eq_mask
, 0, 0, xx
, xx
, xx
)
393 SYSTEM_VALUE(subgroup_ge_mask
, 0, 0, xx
, xx
, xx
)
394 SYSTEM_VALUE(subgroup_gt_mask
, 0, 0, xx
, xx
, xx
)
395 SYSTEM_VALUE(subgroup_le_mask
, 0, 0, xx
, xx
, xx
)
396 SYSTEM_VALUE(subgroup_lt_mask
, 0, 0, xx
, xx
, xx
)
397 SYSTEM_VALUE(num_subgroups
, 1, 0, xx
, xx
, xx
)
398 SYSTEM_VALUE(subgroup_id
, 1, 0, xx
, xx
, xx
)
399 SYSTEM_VALUE(local_group_size
, 3, 0, xx
, xx
, xx
)
401 /* Blend constant color values. Float values are clamped. */
402 SYSTEM_VALUE(blend_const_color_r_float
, 1, 0, xx
, xx
, xx
)
403 SYSTEM_VALUE(blend_const_color_g_float
, 1, 0, xx
, xx
, xx
)
404 SYSTEM_VALUE(blend_const_color_b_float
, 1, 0, xx
, xx
, xx
)
405 SYSTEM_VALUE(blend_const_color_a_float
, 1, 0, xx
, xx
, xx
)
406 SYSTEM_VALUE(blend_const_color_rgba8888_unorm
, 1, 0, xx
, xx
, xx
)
407 SYSTEM_VALUE(blend_const_color_aaaa8888_unorm
, 1, 0, xx
, xx
, xx
)
410 * Barycentric coordinate intrinsics.
412 * These set up the barycentric coordinates for a particular interpolation.
413 * The first three are for the simple cases: pixel, centroid, or per-sample
414 * (at gl_SampleID). The next two handle interpolating at a specified
415 * sample location, or interpolating with a vec2 offset,
417 * The interp_mode index should be either the INTERP_MODE_SMOOTH or
418 * INTERP_MODE_NOPERSPECTIVE enum values.
420 * The vec2 value produced by these intrinsics is intended for use as the
421 * barycoord source of a load_interpolated_input intrinsic.
424 #define BARYCENTRIC(name, sources, source_components) \
425 INTRINSIC(load_barycentric_##name, sources, ARR(source_components), \
426 true, 2, 0, 1, INTERP_MODE, xx, xx, \
427 NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
429 /* no sources. const_index[] = { interp_mode } */
430 BARYCENTRIC(pixel
, 0, 0)
431 BARYCENTRIC(centroid
, 0, 0)
432 BARYCENTRIC(sample
, 0, 0)
433 /* src[] = { sample_id }. const_index[] = { interp_mode } */
434 BARYCENTRIC(at_sample
, 1, 1)
435 /* src[] = { offset.xy }. const_index[] = { interp_mode } */
436 BARYCENTRIC(at_offset
, 1, 2)
439 * Load operations pull data from some piece of GPU memory. All load
440 * operations operate in terms of offsets into some piece of theoretical
441 * memory. Loads from externally visible memory (UBO and SSBO) simply take a
442 * byte offset as a source. Loads from opaque memory (uniforms, inputs, etc.)
443 * take a base+offset pair where the base (const_index[0]) gives the location
444 * of the start of the variable being loaded and and the offset source is a
445 * offset into that variable.
447 * Uniform load operations have a second "range" index that specifies the
448 * range (starting at base) of the data from which we are loading. If
449 * const_index[1] == 0, then the range is unknown.
451 * Some load operations such as UBO/SSBO load and per_vertex loads take an
452 * additional source to specify which UBO/SSBO/vertex to load from.
454 * The exact address type depends on the lowering pass that generates the
455 * load/store intrinsics. Typically, this is vec4 units for things such as
456 * varying slots and float units for fragment shader inputs. UBO and SSBO
457 * offsets are always in bytes.
460 #define LOAD(name, srcs, num_indices, idx0, idx1, idx2, flags) \
461 INTRINSIC(load_##name, srcs, ARR(1, 1, 1, 1), true, 0, 0, num_indices, idx0, idx1, idx2, flags)
463 /* src[] = { offset }. const_index[] = { base, range } */
464 LOAD(uniform
, 1, 2, BASE
, RANGE
, xx
, NIR_INTRINSIC_CAN_ELIMINATE
| NIR_INTRINSIC_CAN_REORDER
)
465 /* src[] = { buffer_index, offset }. No const_index */
466 LOAD(ubo
, 2, 0, xx
, xx
, xx
, NIR_INTRINSIC_CAN_ELIMINATE
| NIR_INTRINSIC_CAN_REORDER
)
467 /* src[] = { offset }. const_index[] = { base, component } */
468 LOAD(input
, 1, 2, BASE
, COMPONENT
, xx
, NIR_INTRINSIC_CAN_ELIMINATE
| NIR_INTRINSIC_CAN_REORDER
)
469 /* src[] = { vertex, offset }. const_index[] = { base, component } */
470 LOAD(per_vertex_input
, 2, 2, BASE
, COMPONENT
, xx
, NIR_INTRINSIC_CAN_ELIMINATE
| NIR_INTRINSIC_CAN_REORDER
)
471 /* src[] = { barycoord, offset }. const_index[] = { base, component } */
472 INTRINSIC(load_interpolated_input
, 2, ARR(2, 1), true, 0, 0,
473 2, BASE
, COMPONENT
, xx
,
474 NIR_INTRINSIC_CAN_ELIMINATE
| NIR_INTRINSIC_CAN_REORDER
)
476 /* src[] = { buffer_index, offset }. No const_index */
477 LOAD(ssbo
, 2, 0, xx
, xx
, xx
, NIR_INTRINSIC_CAN_ELIMINATE
)
478 /* src[] = { offset }. const_index[] = { base, component } */
479 LOAD(output
, 1, 2, BASE
, COMPONENT
, xx
, NIR_INTRINSIC_CAN_ELIMINATE
)
480 /* src[] = { vertex, offset }. const_index[] = { base, component } */
481 LOAD(per_vertex_output
, 2, 1, BASE
, COMPONENT
, xx
, NIR_INTRINSIC_CAN_ELIMINATE
)
482 /* src[] = { offset }. const_index[] = { base } */
483 LOAD(shared
, 1, 1, BASE
, xx
, xx
, NIR_INTRINSIC_CAN_ELIMINATE
)
484 /* src[] = { offset }. const_index[] = { base, range } */
485 LOAD(push_constant
, 1, 2, BASE
, RANGE
, xx
,
486 NIR_INTRINSIC_CAN_ELIMINATE
| NIR_INTRINSIC_CAN_REORDER
)
489 * Stores work the same way as loads, except now the first source is the value
490 * to store and the second (and possibly third) source specify where to store
491 * the value. SSBO and shared memory stores also have a write mask as
495 #define STORE(name, srcs, num_indices, idx0, idx1, idx2, flags) \
496 INTRINSIC(store_##name, srcs, ARR(0, 1, 1, 1), false, 0, 0, num_indices, idx0, idx1, idx2, flags)
498 /* src[] = { value, offset }. const_index[] = { base, write_mask, component } */
499 STORE(output
, 2, 3, BASE
, WRMASK
, COMPONENT
, 0)
500 /* src[] = { value, vertex, offset }.
501 * const_index[] = { base, write_mask, component }
503 STORE(per_vertex_output
, 3, 3, BASE
, WRMASK
, COMPONENT
, 0)
504 /* src[] = { value, block_index, offset }. const_index[] = { write_mask } */
505 STORE(ssbo
, 3, 1, WRMASK
, xx
, xx
, 0)
506 /* src[] = { value, offset }. const_index[] = { base, write_mask } */
507 STORE(shared
, 2, 2, BASE
, WRMASK
, xx
, 0)
509 LAST_INTRINSIC(store_shared
)
511 #undef DEFINE_SYSTEM_VALUE
513 #undef LAST_INTRINSIC