nir: Get rid of *_indirect variants of input/output load/store intrinsics
[mesa.git] / src / glsl / nir / nir_intrinsics.h
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Connor Abbott (cwabbott0@gmail.com)
25 *
26 */
27
28 /**
29 * This header file defines all the available intrinsics in one place. It
30 * expands to a list of macros of the form:
31 *
32 * INTRINSIC(name, num_srcs, src_components, has_dest, dest_components,
33 * num_variables, num_indices, flags)
34 *
35 * Which should correspond one-to-one with the nir_intrinsic_info structure. It
36 * is included in both ir.h to create the nir_intrinsic enum (with members of
37 * the form nir_intrinsic_(name)) and and in opcodes.c to create
38 * nir_intrinsic_infos, which is a const array of nir_intrinsic_info structures
39 * for each intrinsic.
40 */
41
42 #define ARR(...) { __VA_ARGS__ }
43
44
45 INTRINSIC(load_var, 0, ARR(), true, 0, 1, 0, NIR_INTRINSIC_CAN_ELIMINATE)
46 INTRINSIC(store_var, 1, ARR(0), false, 0, 1, 0, 0)
47 INTRINSIC(copy_var, 0, ARR(), false, 0, 2, 0, 0)
48
49 /*
50 * Interpolation of input. The interp_var_at* intrinsics are similar to the
51 * load_var intrinsic acting an a shader input except that they interpolate
52 * the input differently. The at_sample and at_offset intrinsics take an
53 * aditional source that is a integer sample id or a vec2 position offset
54 * respectively.
55 */
56
57 INTRINSIC(interp_var_at_centroid, 0, ARR(0), true, 0, 1, 0,
58 NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
59 INTRINSIC(interp_var_at_sample, 1, ARR(1), true, 0, 1, 0,
60 NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
61 INTRINSIC(interp_var_at_offset, 1, ARR(2), true, 0, 1, 0,
62 NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
63
64 /*
65 * Ask the driver for the size of a given buffer. It takes the buffer index
66 * as source.
67 */
68 INTRINSIC(get_buffer_size, 1, ARR(1), true, 1, 0, 0,
69 NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
70
71 /*
72 * a barrier is an intrinsic with no inputs/outputs but which can't be moved
73 * around/optimized in general
74 */
75 #define BARRIER(name) INTRINSIC(name, 0, ARR(), false, 0, 0, 0, 0)
76
77 BARRIER(barrier)
78 BARRIER(discard)
79
80 /*
81 * Memory barrier with semantics analogous to the memoryBarrier() GLSL
82 * intrinsic.
83 */
84 BARRIER(memory_barrier)
85
86 /*
87 * Shader clock intrinsic with semantics analogous to the clock2x32ARB()
88 * GLSL intrinsic.
89 * The latter can be used as code motion barrier, which is currently not
90 * feasible with NIR.
91 */
92 INTRINSIC(shader_clock, 0, ARR(), true, 1, 0, 0, NIR_INTRINSIC_CAN_ELIMINATE)
93
94 /*
95 * Memory barrier with semantics analogous to the compute shader
96 * groupMemoryBarrier(), memoryBarrierAtomicCounter(), memoryBarrierBuffer(),
97 * memoryBarrierImage() and memoryBarrierShared() GLSL intrinsics.
98 */
99 BARRIER(group_memory_barrier)
100 BARRIER(memory_barrier_atomic_counter)
101 BARRIER(memory_barrier_buffer)
102 BARRIER(memory_barrier_image)
103 BARRIER(memory_barrier_shared)
104
105 /** A conditional discard, with a single boolean source. */
106 INTRINSIC(discard_if, 1, ARR(1), false, 0, 0, 0, 0)
107
108 /**
109 * Basic Geometry Shader intrinsics.
110 *
111 * emit_vertex implements GLSL's EmitStreamVertex() built-in. It takes a single
112 * index, which is the stream ID to write to.
113 *
114 * end_primitive implements GLSL's EndPrimitive() built-in.
115 */
116 INTRINSIC(emit_vertex, 0, ARR(), false, 0, 0, 1, 0)
117 INTRINSIC(end_primitive, 0, ARR(), false, 0, 0, 1, 0)
118
119 /**
120 * Geometry Shader intrinsics with a vertex count.
121 *
122 * Alternatively, drivers may implement these intrinsics, and use
123 * nir_lower_gs_intrinsics() to convert from the basic intrinsics.
124 *
125 * These maintain a count of the number of vertices emitted, as an additional
126 * unsigned integer source.
127 */
128 INTRINSIC(emit_vertex_with_counter, 1, ARR(1), false, 0, 0, 1, 0)
129 INTRINSIC(end_primitive_with_counter, 1, ARR(1), false, 0, 0, 1, 0)
130 INTRINSIC(set_vertex_count, 1, ARR(1), false, 0, 0, 0, 0)
131
132 /*
133 * Atomic counters
134 *
135 * The *_var variants take an atomic_uint nir_variable, while the other,
136 * lowered, variants take a constant buffer index and register offset.
137 */
138
139 #define ATOMIC(name, flags) \
140 INTRINSIC(atomic_counter_##name##_var, 0, ARR(), true, 1, 1, 0, flags) \
141 INTRINSIC(atomic_counter_##name, 1, ARR(1), true, 1, 0, 1, flags)
142
143 ATOMIC(inc, 0)
144 ATOMIC(dec, 0)
145 ATOMIC(read, NIR_INTRINSIC_CAN_ELIMINATE)
146
147 /*
148 * Image load, store and atomic intrinsics.
149 *
150 * All image intrinsics take an image target passed as a nir_variable. Image
151 * variables contain a number of memory and layout qualifiers that influence
152 * the semantics of the intrinsic.
153 *
154 * All image intrinsics take a four-coordinate vector and a sample index as
155 * first two sources, determining the location within the image that will be
156 * accessed by the intrinsic. Components not applicable to the image target
157 * in use are undefined. Image store takes an additional four-component
158 * argument with the value to be written, and image atomic operations take
159 * either one or two additional scalar arguments with the same meaning as in
160 * the ARB_shader_image_load_store specification.
161 */
162 INTRINSIC(image_load, 2, ARR(4, 1), true, 4, 1, 0,
163 NIR_INTRINSIC_CAN_ELIMINATE)
164 INTRINSIC(image_store, 3, ARR(4, 1, 4), false, 0, 1, 0, 0)
165 INTRINSIC(image_atomic_add, 3, ARR(4, 1, 1), true, 1, 1, 0, 0)
166 INTRINSIC(image_atomic_min, 3, ARR(4, 1, 1), true, 1, 1, 0, 0)
167 INTRINSIC(image_atomic_max, 3, ARR(4, 1, 1), true, 1, 1, 0, 0)
168 INTRINSIC(image_atomic_and, 3, ARR(4, 1, 1), true, 1, 1, 0, 0)
169 INTRINSIC(image_atomic_or, 3, ARR(4, 1, 1), true, 1, 1, 0, 0)
170 INTRINSIC(image_atomic_xor, 3, ARR(4, 1, 1), true, 1, 1, 0, 0)
171 INTRINSIC(image_atomic_exchange, 3, ARR(4, 1, 1), true, 1, 1, 0, 0)
172 INTRINSIC(image_atomic_comp_swap, 4, ARR(4, 1, 1, 1), true, 1, 1, 0, 0)
173 INTRINSIC(image_size, 0, ARR(), true, 4, 1, 0,
174 NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
175 INTRINSIC(image_samples, 0, ARR(), true, 1, 1, 0,
176 NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
177
178 /*
179 * SSBO atomic intrinsics
180 *
181 * All of the SSBO atomic memory operations read a value from memory,
182 * compute a new value using one of the operations below, write the new
183 * value to memory, and return the original value read.
184 *
185 * All operations take 3 sources except CompSwap that takes 4. These
186 * sources represent:
187 *
188 * 0: The SSBO buffer index.
189 * 1: The offset into the SSBO buffer of the variable that the atomic
190 * operation will operate on.
191 * 2: The data parameter to the atomic function (i.e. the value to add
192 * in ssbo_atomic_add, etc).
193 * 3: For CompSwap only: the second data parameter.
194 */
195 INTRINSIC(ssbo_atomic_add, 3, ARR(1, 1, 1), true, 1, 0, 0, 0)
196 INTRINSIC(ssbo_atomic_imin, 3, ARR(1, 1, 1), true, 1, 0, 0, 0)
197 INTRINSIC(ssbo_atomic_umin, 3, ARR(1, 1, 1), true, 1, 0, 0, 0)
198 INTRINSIC(ssbo_atomic_imax, 3, ARR(1, 1, 1), true, 1, 0, 0, 0)
199 INTRINSIC(ssbo_atomic_umax, 3, ARR(1, 1, 1), true, 1, 0, 0, 0)
200 INTRINSIC(ssbo_atomic_and, 3, ARR(1, 1, 1), true, 1, 0, 0, 0)
201 INTRINSIC(ssbo_atomic_or, 3, ARR(1, 1, 1), true, 1, 0, 0, 0)
202 INTRINSIC(ssbo_atomic_xor, 3, ARR(1, 1, 1), true, 1, 0, 0, 0)
203 INTRINSIC(ssbo_atomic_exchange, 3, ARR(1, 1, 1), true, 1, 0, 0, 0)
204 INTRINSIC(ssbo_atomic_comp_swap, 4, ARR(1, 1, 1, 1), true, 1, 0, 0, 0)
205
206 /*
207 * CS shared variable atomic intrinsics
208 *
209 * All of the shared variable atomic memory operations read a value from
210 * memory, compute a new value using one of the operations below, write the
211 * new value to memory, and return the original value read.
212 *
213 * All operations take 2 sources except CompSwap that takes 3. These
214 * sources represent:
215 *
216 * 0: The offset into the shared variable storage region that the atomic
217 * operation will operate on.
218 * 1: The data parameter to the atomic function (i.e. the value to add
219 * in shared_atomic_add, etc).
220 * 2: For CompSwap only: the second data parameter.
221 */
222 INTRINSIC(shared_atomic_add, 2, ARR(1, 1), true, 1, 0, 0, 0)
223 INTRINSIC(shared_atomic_imin, 2, ARR(1, 1), true, 1, 0, 0, 0)
224 INTRINSIC(shared_atomic_umin, 2, ARR(1, 1), true, 1, 0, 0, 0)
225 INTRINSIC(shared_atomic_imax, 2, ARR(1, 1), true, 1, 0, 0, 0)
226 INTRINSIC(shared_atomic_umax, 2, ARR(1, 1), true, 1, 0, 0, 0)
227 INTRINSIC(shared_atomic_and, 2, ARR(1, 1), true, 1, 0, 0, 0)
228 INTRINSIC(shared_atomic_or, 2, ARR(1, 1), true, 1, 0, 0, 0)
229 INTRINSIC(shared_atomic_xor, 2, ARR(1, 1), true, 1, 0, 0, 0)
230 INTRINSIC(shared_atomic_exchange, 2, ARR(1, 1), true, 1, 0, 0, 0)
231 INTRINSIC(shared_atomic_comp_swap, 3, ARR(1, 1, 1), true, 1, 0, 0, 0)
232
233 #define SYSTEM_VALUE(name, components, num_indices) \
234 INTRINSIC(load_##name, 0, ARR(), true, components, 0, num_indices, \
235 NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
236
237 SYSTEM_VALUE(front_face, 1, 0)
238 SYSTEM_VALUE(vertex_id, 1, 0)
239 SYSTEM_VALUE(vertex_id_zero_base, 1, 0)
240 SYSTEM_VALUE(base_vertex, 1, 0)
241 SYSTEM_VALUE(instance_id, 1, 0)
242 SYSTEM_VALUE(sample_id, 1, 0)
243 SYSTEM_VALUE(sample_pos, 2, 0)
244 SYSTEM_VALUE(sample_mask_in, 1, 0)
245 SYSTEM_VALUE(primitive_id, 1, 0)
246 SYSTEM_VALUE(invocation_id, 1, 0)
247 SYSTEM_VALUE(tess_coord, 3, 0)
248 SYSTEM_VALUE(tess_level_outer, 4, 0)
249 SYSTEM_VALUE(tess_level_inner, 2, 0)
250 SYSTEM_VALUE(patch_vertices_in, 1, 0)
251 SYSTEM_VALUE(local_invocation_id, 3, 0)
252 SYSTEM_VALUE(work_group_id, 3, 0)
253 SYSTEM_VALUE(user_clip_plane, 4, 1) /* const_index[0] is user_clip_plane[idx] */
254 SYSTEM_VALUE(num_work_groups, 3, 0)
255 SYSTEM_VALUE(helper_invocation, 1, 0)
256
257 /*
258 * Load operations pull data from some piece of GPU memory. All load
259 * operations operate in terms of offsets into some piece of theoretical
260 * memory. Loads from externally visible memory (UBO and SSBO) simply take a
261 * byte offset as a source. Loads from opaque memory (uniforms, inputs, etc.)
262 * take a base+offset pair where the base (const_index[0]) gives the location
263 * of the start of the variable being loaded and and the offset source is a
264 * offset into that variable.
265 *
266 * Some load operations such as UBO/SSBO load and per_vertex loads take an
267 * additional source to specify which UBO/SSBO/vertex to load from.
268 *
269 * The exact address type depends on the lowering pass that generates the
270 * load/store intrinsics. Typically, this is vec4 units for things such as
271 * varying slots and float units for fragment shader inputs. UBO and SSBO
272 * offsets are always in bytes.
273 */
274
275 #define LOAD(name, srcs, indices, flags) \
276 INTRINSIC(load_##name, srcs, ARR(1, 1, 1, 1), true, 0, 0, indices, flags)
277
278 /* src[] = { offset }. const_index[] = { base } */
279 LOAD(uniform, 1, 1, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
280 /* src[] = { buffer_index, offset }. No const_index */
281 LOAD(ubo, 2, 0, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
282 /* src[] = { offset }. const_index[] = { base } */
283 LOAD(input, 1, 1, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
284 /* src[] = { vertex, offset }. const_index[] = { base } */
285 LOAD(per_vertex_input, 2, 1, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
286 /* src[] = { buffer_index, offset }. No const_index */
287 LOAD(ssbo, 2, 0, NIR_INTRINSIC_CAN_ELIMINATE)
288 /* src[] = { offset }. const_index[] = { base } */
289 LOAD(output, 1, 1, NIR_INTRINSIC_CAN_ELIMINATE)
290 /* src[] = { vertex, offset }. const_index[] = { base } */
291 LOAD(per_vertex_output, 2, 1, NIR_INTRINSIC_CAN_ELIMINATE)
292 /* src[] = { offset }. const_index[] = { base } */
293 LOAD(shared, 1, 1, NIR_INTRINSIC_CAN_ELIMINATE)
294
295 /*
296 * Stores work the same way as loads, except now the first source is the value
297 * to store and the second (and possibly third) source specify where to store
298 * the value. SSBO and shared memory stores also have a write mask as
299 * const_index[0].
300 */
301
302 #define STORE(name, srcs, indices, flags) \
303 INTRINSIC(store_##name, srcs, ARR(0, 1, 1, 1), false, 0, 0, indices, flags)
304
305 /* src[] = { value, offset }. const_index[] = { base } */
306 STORE(output, 2, 1, 0)
307 /* src[] = { value, vertex, offset }. const_index[] = { base } */
308 STORE(per_vertex_output, 3, 1, 0)
309 /* src[] = { value, block_index, offset }. const_index[] = { write_mask } */
310 STORE(ssbo, 3, 1, 0)
311 /* src[] = { value, offset }. const_index[] = { base, write_mask } */
312 STORE(shared, 2, 1, 0)
313
314 LAST_INTRINSIC(store_shared)