2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Connor Abbott (cwabbott0@gmail.com)
29 * This header file defines all the available intrinsics in one place. It
30 * expands to a list of macros of the form:
32 * INTRINSIC(name, num_srcs, src_components, has_dest, dest_components,
33 * num_variables, num_indices, idx0, idx1, idx2, flags)
35 * Which should correspond one-to-one with the nir_intrinsic_info structure. It
36 * is included in both ir.h to create the nir_intrinsic enum (with members of
37 * the form nir_intrinsic_(name)) and and in opcodes.c to create
38 * nir_intrinsic_infos, which is a const array of nir_intrinsic_info structures
42 #define ARR(...) { __VA_ARGS__ }
45 INTRINSIC(load_var
, 0, ARR(0), true, 0, 1, 0, xx
, xx
, xx
, NIR_INTRINSIC_CAN_ELIMINATE
)
46 INTRINSIC(store_var
, 1, ARR(0), false, 0, 1, 1, WRMASK
, xx
, xx
, 0)
47 INTRINSIC(copy_var
, 0, ARR(0), false, 0, 2, 0, xx
, xx
, xx
, 0)
50 * Interpolation of input. The interp_var_at* intrinsics are similar to the
51 * load_var intrinsic acting on a shader input except that they interpolate
52 * the input differently. The at_sample and at_offset intrinsics take an
53 * additional source that is an integer sample id or a vec2 position offset
57 INTRINSIC(interp_var_at_centroid
, 0, ARR(0), true, 0, 1, 0, xx
, xx
, xx
,
58 NIR_INTRINSIC_CAN_ELIMINATE
| NIR_INTRINSIC_CAN_REORDER
)
59 INTRINSIC(interp_var_at_sample
, 1, ARR(1), true, 0, 1, 0, xx
, xx
, xx
,
60 NIR_INTRINSIC_CAN_ELIMINATE
| NIR_INTRINSIC_CAN_REORDER
)
61 INTRINSIC(interp_var_at_offset
, 1, ARR(2), true, 0, 1, 0, xx
, xx
, xx
,
62 NIR_INTRINSIC_CAN_ELIMINATE
| NIR_INTRINSIC_CAN_REORDER
)
65 * Ask the driver for the size of a given buffer. It takes the buffer index
68 INTRINSIC(get_buffer_size
, 1, ARR(1), true, 1, 0, 0, xx
, xx
, xx
,
69 NIR_INTRINSIC_CAN_ELIMINATE
| NIR_INTRINSIC_CAN_REORDER
)
72 * a barrier is an intrinsic with no inputs/outputs but which can't be moved
73 * around/optimized in general
75 #define BARRIER(name) INTRINSIC(name, 0, ARR(0), false, 0, 0, 0, xx, xx, xx, 0)
81 * Memory barrier with semantics analogous to the memoryBarrier() GLSL
84 BARRIER(memory_barrier
)
87 * Shader clock intrinsic with semantics analogous to the clock2x32ARB()
89 * The latter can be used as code motion barrier, which is currently not
92 INTRINSIC(shader_clock
, 0, ARR(0), true, 1, 0, 0, xx
, xx
, xx
, NIR_INTRINSIC_CAN_ELIMINATE
)
95 * Memory barrier with semantics analogous to the compute shader
96 * groupMemoryBarrier(), memoryBarrierAtomicCounter(), memoryBarrierBuffer(),
97 * memoryBarrierImage() and memoryBarrierShared() GLSL intrinsics.
99 BARRIER(group_memory_barrier
)
100 BARRIER(memory_barrier_atomic_counter
)
101 BARRIER(memory_barrier_buffer
)
102 BARRIER(memory_barrier_image
)
103 BARRIER(memory_barrier_shared
)
105 /** A conditional discard, with a single boolean source. */
106 INTRINSIC(discard_if
, 1, ARR(1), false, 0, 0, 0, xx
, xx
, xx
, 0)
109 * Basic Geometry Shader intrinsics.
111 * emit_vertex implements GLSL's EmitStreamVertex() built-in. It takes a single
112 * index, which is the stream ID to write to.
114 * end_primitive implements GLSL's EndPrimitive() built-in.
116 INTRINSIC(emit_vertex
, 0, ARR(0), false, 0, 0, 1, STREAM_ID
, xx
, xx
, 0)
117 INTRINSIC(end_primitive
, 0, ARR(0), false, 0, 0, 1, STREAM_ID
, xx
, xx
, 0)
120 * Geometry Shader intrinsics with a vertex count.
122 * Alternatively, drivers may implement these intrinsics, and use
123 * nir_lower_gs_intrinsics() to convert from the basic intrinsics.
125 * These maintain a count of the number of vertices emitted, as an additional
126 * unsigned integer source.
128 INTRINSIC(emit_vertex_with_counter
, 1, ARR(1), false, 0, 0, 1, STREAM_ID
, xx
, xx
, 0)
129 INTRINSIC(end_primitive_with_counter
, 1, ARR(1), false, 0, 0, 1, STREAM_ID
, xx
, xx
, 0)
130 INTRINSIC(set_vertex_count
, 1, ARR(1), false, 0, 0, 0, xx
, xx
, xx
, 0)
135 * The *_var variants take an atomic_uint nir_variable, while the other,
136 * lowered, variants take a constant buffer index and register offset.
139 #define ATOMIC(name, flags) \
140 INTRINSIC(atomic_counter_##name##_var, 0, ARR(0), true, 1, 1, 0, xx, xx, xx, flags) \
141 INTRINSIC(atomic_counter_##name, 1, ARR(1), true, 1, 0, 1, BASE, xx, xx, flags)
145 ATOMIC(read
, NIR_INTRINSIC_CAN_ELIMINATE
)
148 * Image load, store and atomic intrinsics.
150 * All image intrinsics take an image target passed as a nir_variable. Image
151 * variables contain a number of memory and layout qualifiers that influence
152 * the semantics of the intrinsic.
154 * All image intrinsics take a four-coordinate vector and a sample index as
155 * first two sources, determining the location within the image that will be
156 * accessed by the intrinsic. Components not applicable to the image target
157 * in use are undefined. Image store takes an additional four-component
158 * argument with the value to be written, and image atomic operations take
159 * either one or two additional scalar arguments with the same meaning as in
160 * the ARB_shader_image_load_store specification.
162 INTRINSIC(image_load
, 2, ARR(4, 1), true, 4, 1, 0, xx
, xx
, xx
,
163 NIR_INTRINSIC_CAN_ELIMINATE
)
164 INTRINSIC(image_store
, 3, ARR(4, 1, 4), false, 0, 1, 0, xx
, xx
, xx
, 0)
165 INTRINSIC(image_atomic_add
, 3, ARR(4, 1, 1), true, 1, 1, 0, xx
, xx
, xx
, 0)
166 INTRINSIC(image_atomic_min
, 3, ARR(4, 1, 1), true, 1, 1, 0, xx
, xx
, xx
, 0)
167 INTRINSIC(image_atomic_max
, 3, ARR(4, 1, 1), true, 1, 1, 0, xx
, xx
, xx
, 0)
168 INTRINSIC(image_atomic_and
, 3, ARR(4, 1, 1), true, 1, 1, 0, xx
, xx
, xx
, 0)
169 INTRINSIC(image_atomic_or
, 3, ARR(4, 1, 1), true, 1, 1, 0, xx
, xx
, xx
, 0)
170 INTRINSIC(image_atomic_xor
, 3, ARR(4, 1, 1), true, 1, 1, 0, xx
, xx
, xx
, 0)
171 INTRINSIC(image_atomic_exchange
, 3, ARR(4, 1, 1), true, 1, 1, 0, xx
, xx
, xx
, 0)
172 INTRINSIC(image_atomic_comp_swap
, 4, ARR(4, 1, 1, 1), true, 1, 1, 0, xx
, xx
, xx
, 0)
173 INTRINSIC(image_size
, 0, ARR(0), true, 4, 1, 0, xx
, xx
, xx
,
174 NIR_INTRINSIC_CAN_ELIMINATE
| NIR_INTRINSIC_CAN_REORDER
)
175 INTRINSIC(image_samples
, 0, ARR(0), true, 1, 1, 0, xx
, xx
, xx
,
176 NIR_INTRINSIC_CAN_ELIMINATE
| NIR_INTRINSIC_CAN_REORDER
)
179 * Vulkan descriptor set intrinsic
181 * The Vulkan API uses a different binding model from GL. In the Vulkan
182 * API, all external resources are represented by a tuple:
184 * (descriptor set, binding, array index)
186 * where the array index is the only thing allowed to be indirect. The
187 * vulkan_surface_index intrinsic takes the descriptor set and binding as
188 * its first two indices and the array index as its source. The third
189 * index is a nir_variable_mode in case that's useful to the backend.
191 * The intended usage is that the shader will call vulkan_surface_index to
192 * get an index and then pass that as the buffer index ubo/ssbo calls.
194 INTRINSIC(vulkan_resource_index
, 1, ARR(1), true, 1, 0, 2,
195 DESC_SET
, BINDING
, xx
,
196 NIR_INTRINSIC_CAN_ELIMINATE
| NIR_INTRINSIC_CAN_REORDER
)
199 * variable atomic intrinsics
201 * All of these variable atomic memory operations read a value from memory,
202 * compute a new value using one of the operations below, write the new value
203 * to memory, and return the original value read.
205 * All operations take 1 source except CompSwap that takes 2. These sources
208 * 0: The data parameter to the atomic function (i.e. the value to add
209 * in shared_atomic_add, etc).
210 * 1: For CompSwap only: the second data parameter.
212 * All operations take 1 variable deref.
214 INTRINSIC(var_atomic_add
, 1, ARR(1), true, 1, 1, 0, xx
, xx
, xx
, 0)
215 INTRINSIC(var_atomic_imin
, 1, ARR(1), true, 1, 1, 0, xx
, xx
, xx
, 0)
216 INTRINSIC(var_atomic_umin
, 1, ARR(1), true, 1, 1, 0, xx
, xx
, xx
, 0)
217 INTRINSIC(var_atomic_imax
, 1, ARR(1), true, 1, 1, 0, xx
, xx
, xx
, 0)
218 INTRINSIC(var_atomic_umax
, 1, ARR(1), true, 1, 1, 0, xx
, xx
, xx
, 0)
219 INTRINSIC(var_atomic_and
, 1, ARR(1), true, 1, 1, 0, xx
, xx
, xx
, 0)
220 INTRINSIC(var_atomic_or
, 1, ARR(1), true, 1, 1, 0, xx
, xx
, xx
, 0)
221 INTRINSIC(var_atomic_xor
, 1, ARR(1), true, 1, 1, 0, xx
, xx
, xx
, 0)
222 INTRINSIC(var_atomic_exchange
, 1, ARR(1), true, 1, 1, 0, xx
, xx
, xx
, 0)
223 INTRINSIC(var_atomic_comp_swap
, 2, ARR(1, 1), true, 1, 1, 0, xx
, xx
, xx
, 0)
226 * SSBO atomic intrinsics
228 * All of the SSBO atomic memory operations read a value from memory,
229 * compute a new value using one of the operations below, write the new
230 * value to memory, and return the original value read.
232 * All operations take 3 sources except CompSwap that takes 4. These
235 * 0: The SSBO buffer index.
236 * 1: The offset into the SSBO buffer of the variable that the atomic
237 * operation will operate on.
238 * 2: The data parameter to the atomic function (i.e. the value to add
239 * in ssbo_atomic_add, etc).
240 * 3: For CompSwap only: the second data parameter.
242 INTRINSIC(ssbo_atomic_add
, 3, ARR(1, 1, 1), true, 1, 0, 0, xx
, xx
, xx
, 0)
243 INTRINSIC(ssbo_atomic_imin
, 3, ARR(1, 1, 1), true, 1, 0, 0, xx
, xx
, xx
, 0)
244 INTRINSIC(ssbo_atomic_umin
, 3, ARR(1, 1, 1), true, 1, 0, 0, xx
, xx
, xx
, 0)
245 INTRINSIC(ssbo_atomic_imax
, 3, ARR(1, 1, 1), true, 1, 0, 0, xx
, xx
, xx
, 0)
246 INTRINSIC(ssbo_atomic_umax
, 3, ARR(1, 1, 1), true, 1, 0, 0, xx
, xx
, xx
, 0)
247 INTRINSIC(ssbo_atomic_and
, 3, ARR(1, 1, 1), true, 1, 0, 0, xx
, xx
, xx
, 0)
248 INTRINSIC(ssbo_atomic_or
, 3, ARR(1, 1, 1), true, 1, 0, 0, xx
, xx
, xx
, 0)
249 INTRINSIC(ssbo_atomic_xor
, 3, ARR(1, 1, 1), true, 1, 0, 0, xx
, xx
, xx
, 0)
250 INTRINSIC(ssbo_atomic_exchange
, 3, ARR(1, 1, 1), true, 1, 0, 0, xx
, xx
, xx
, 0)
251 INTRINSIC(ssbo_atomic_comp_swap
, 4, ARR(1, 1, 1, 1), true, 1, 0, 0, xx
, xx
, xx
, 0)
254 * CS shared variable atomic intrinsics
256 * All of the shared variable atomic memory operations read a value from
257 * memory, compute a new value using one of the operations below, write the
258 * new value to memory, and return the original value read.
260 * All operations take 2 sources except CompSwap that takes 3. These
263 * 0: The offset into the shared variable storage region that the atomic
264 * operation will operate on.
265 * 1: The data parameter to the atomic function (i.e. the value to add
266 * in shared_atomic_add, etc).
267 * 2: For CompSwap only: the second data parameter.
269 INTRINSIC(shared_atomic_add
, 2, ARR(1, 1), true, 1, 0, 0, xx
, xx
, xx
, 0)
270 INTRINSIC(shared_atomic_imin
, 2, ARR(1, 1), true, 1, 0, 0, xx
, xx
, xx
, 0)
271 INTRINSIC(shared_atomic_umin
, 2, ARR(1, 1), true, 1, 0, 0, xx
, xx
, xx
, 0)
272 INTRINSIC(shared_atomic_imax
, 2, ARR(1, 1), true, 1, 0, 0, xx
, xx
, xx
, 0)
273 INTRINSIC(shared_atomic_umax
, 2, ARR(1, 1), true, 1, 0, 0, xx
, xx
, xx
, 0)
274 INTRINSIC(shared_atomic_and
, 2, ARR(1, 1), true, 1, 0, 0, xx
, xx
, xx
, 0)
275 INTRINSIC(shared_atomic_or
, 2, ARR(1, 1), true, 1, 0, 0, xx
, xx
, xx
, 0)
276 INTRINSIC(shared_atomic_xor
, 2, ARR(1, 1), true, 1, 0, 0, xx
, xx
, xx
, 0)
277 INTRINSIC(shared_atomic_exchange
, 2, ARR(1, 1), true, 1, 0, 0, xx
, xx
, xx
, 0)
278 INTRINSIC(shared_atomic_comp_swap
, 3, ARR(1, 1, 1), true, 1, 0, 0, xx
, xx
, xx
, 0)
280 #define SYSTEM_VALUE(name, components, num_indices, idx0, idx1, idx2) \
281 INTRINSIC(load_##name, 0, ARR(0), true, components, 0, num_indices, \
283 NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
285 SYSTEM_VALUE(front_face
, 1, 0, xx
, xx
, xx
)
286 SYSTEM_VALUE(vertex_id
, 1, 0, xx
, xx
, xx
)
287 SYSTEM_VALUE(vertex_id_zero_base
, 1, 0, xx
, xx
, xx
)
288 SYSTEM_VALUE(base_vertex
, 1, 0, xx
, xx
, xx
)
289 SYSTEM_VALUE(instance_id
, 1, 0, xx
, xx
, xx
)
290 SYSTEM_VALUE(base_instance
, 1, 0, xx
, xx
, xx
)
291 SYSTEM_VALUE(draw_id
, 1, 0, xx
, xx
, xx
)
292 SYSTEM_VALUE(sample_id
, 1, 0, xx
, xx
, xx
)
293 SYSTEM_VALUE(sample_pos
, 2, 0, xx
, xx
, xx
)
294 SYSTEM_VALUE(sample_mask_in
, 1, 0, xx
, xx
, xx
)
295 SYSTEM_VALUE(primitive_id
, 1, 0, xx
, xx
, xx
)
296 SYSTEM_VALUE(invocation_id
, 1, 0, xx
, xx
, xx
)
297 SYSTEM_VALUE(tess_coord
, 3, 0, xx
, xx
, xx
)
298 SYSTEM_VALUE(tess_level_outer
, 4, 0, xx
, xx
, xx
)
299 SYSTEM_VALUE(tess_level_inner
, 2, 0, xx
, xx
, xx
)
300 SYSTEM_VALUE(patch_vertices_in
, 1, 0, xx
, xx
, xx
)
301 SYSTEM_VALUE(local_invocation_id
, 3, 0, xx
, xx
, xx
)
302 SYSTEM_VALUE(local_invocation_index
, 1, 0, xx
, xx
, xx
)
303 SYSTEM_VALUE(work_group_id
, 3, 0, xx
, xx
, xx
)
304 SYSTEM_VALUE(user_clip_plane
, 4, 1, UCP_ID
, xx
, xx
)
305 SYSTEM_VALUE(num_work_groups
, 3, 0, xx
, xx
, xx
)
306 SYSTEM_VALUE(helper_invocation
, 1, 0, xx
, xx
, xx
)
307 SYSTEM_VALUE(channel_num
, 1, 0, xx
, xx
, xx
)
310 * Load operations pull data from some piece of GPU memory. All load
311 * operations operate in terms of offsets into some piece of theoretical
312 * memory. Loads from externally visible memory (UBO and SSBO) simply take a
313 * byte offset as a source. Loads from opaque memory (uniforms, inputs, etc.)
314 * take a base+offset pair where the base (const_index[0]) gives the location
315 * of the start of the variable being loaded and and the offset source is a
316 * offset into that variable.
318 * Uniform load operations have a second "range" index that specifies the
319 * range (starting at base) of the data from which we are loading. If
320 * const_index[1] == 0, then the range is unknown.
322 * Some load operations such as UBO/SSBO load and per_vertex loads take an
323 * additional source to specify which UBO/SSBO/vertex to load from.
325 * The exact address type depends on the lowering pass that generates the
326 * load/store intrinsics. Typically, this is vec4 units for things such as
327 * varying slots and float units for fragment shader inputs. UBO and SSBO
328 * offsets are always in bytes.
331 #define LOAD(name, srcs, num_indices, idx0, idx1, idx2, flags) \
332 INTRINSIC(load_##name, srcs, ARR(1, 1, 1, 1), true, 0, 0, num_indices, idx0, idx1, idx2, flags)
334 /* src[] = { offset }. const_index[] = { base } */
335 LOAD(uniform
, 1, 2, BASE
, RANGE
, xx
, NIR_INTRINSIC_CAN_ELIMINATE
| NIR_INTRINSIC_CAN_REORDER
)
336 /* src[] = { buffer_index, offset }. No const_index */
337 LOAD(ubo
, 2, 0, xx
, xx
, xx
, NIR_INTRINSIC_CAN_ELIMINATE
| NIR_INTRINSIC_CAN_REORDER
)
338 /* src[] = { offset }. const_index[] = { base } */
339 LOAD(input
, 1, 1, BASE
, xx
, xx
, NIR_INTRINSIC_CAN_ELIMINATE
| NIR_INTRINSIC_CAN_REORDER
)
340 /* src[] = { vertex, offset }. const_index[] = { base } */
341 LOAD(per_vertex_input
, 2, 1, BASE
, xx
, xx
, NIR_INTRINSIC_CAN_ELIMINATE
| NIR_INTRINSIC_CAN_REORDER
)
342 /* src[] = { buffer_index, offset }. No const_index */
343 LOAD(ssbo
, 2, 0, xx
, xx
, xx
, NIR_INTRINSIC_CAN_ELIMINATE
)
344 /* src[] = { offset }. const_index[] = { base } */
345 LOAD(output
, 1, 1, BASE
, xx
, xx
, NIR_INTRINSIC_CAN_ELIMINATE
)
346 /* src[] = { vertex, offset }. const_index[] = { base } */
347 LOAD(per_vertex_output
, 2, 1, BASE
, xx
, xx
, NIR_INTRINSIC_CAN_ELIMINATE
)
348 /* src[] = { offset }. const_index[] = { base } */
349 LOAD(shared
, 1, 1, BASE
, xx
, xx
, NIR_INTRINSIC_CAN_ELIMINATE
)
350 /* src[] = { offset }. const_index[] = { base, range } */
351 LOAD(push_constant
, 1, 2, BASE
, RANGE
, xx
,
352 NIR_INTRINSIC_CAN_ELIMINATE
| NIR_INTRINSIC_CAN_REORDER
)
355 * Stores work the same way as loads, except now the first source is the value
356 * to store and the second (and possibly third) source specify where to store
357 * the value. SSBO and shared memory stores also have a write mask as
361 #define STORE(name, srcs, num_indices, idx0, idx1, idx2, flags) \
362 INTRINSIC(store_##name, srcs, ARR(0, 1, 1, 1), false, 0, 0, num_indices, idx0, idx1, idx2, flags)
364 /* src[] = { value, offset }. const_index[] = { base, write_mask } */
365 STORE(output
, 2, 2, BASE
, WRMASK
, xx
, 0)
366 /* src[] = { value, vertex, offset }. const_index[] = { base, write_mask } */
367 STORE(per_vertex_output
, 3, 2, BASE
, WRMASK
, xx
, 0)
368 /* src[] = { value, block_index, offset }. const_index[] = { write_mask } */
369 STORE(ssbo
, 3, 1, WRMASK
, xx
, xx
, 0)
370 /* src[] = { value, offset }. const_index[] = { base, write_mask } */
371 STORE(shared
, 2, 2, BASE
, WRMASK
, xx
, 0)
373 LAST_INTRINSIC(store_shared
)