i965/vec4: split VEC4_OPCODE_FROM_DOUBLE into one opcode per destination's type
[mesa.git] / src / compiler / nir / nir_intrinsics.h
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Connor Abbott (cwabbott0@gmail.com)
25 *
26 */
27
28 /**
29 * This header file defines all the available intrinsics in one place. It
30 * expands to a list of macros of the form:
31 *
32 * INTRINSIC(name, num_srcs, src_components, has_dest, dest_components,
33 * num_variables, num_indices, idx0, idx1, idx2, flags)
34 *
35 * Which should correspond one-to-one with the nir_intrinsic_info structure. It
36 * is included in both ir.h to create the nir_intrinsic enum (with members of
37 * the form nir_intrinsic_(name)) and and in opcodes.c to create
38 * nir_intrinsic_infos, which is a const array of nir_intrinsic_info structures
39 * for each intrinsic.
40 */
41
42 #define ARR(...) { __VA_ARGS__ }
43
44 INTRINSIC(nop, 0, ARR(0), false, 0, 0, 0, xx, xx, xx,
45 NIR_INTRINSIC_CAN_ELIMINATE)
46
47 INTRINSIC(load_var, 0, ARR(0), true, 0, 1, 0, xx, xx, xx, NIR_INTRINSIC_CAN_ELIMINATE)
48 INTRINSIC(store_var, 1, ARR(0), false, 0, 1, 1, WRMASK, xx, xx, 0)
49 INTRINSIC(copy_var, 0, ARR(0), false, 0, 2, 0, xx, xx, xx, 0)
50
51 /*
52 * Interpolation of input. The interp_var_at* intrinsics are similar to the
53 * load_var intrinsic acting on a shader input except that they interpolate
54 * the input differently. The at_sample and at_offset intrinsics take an
55 * additional source that is an integer sample id or a vec2 position offset
56 * respectively.
57 */
58
59 INTRINSIC(interp_var_at_centroid, 0, ARR(0), true, 0, 1, 0, xx, xx, xx,
60 NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
61 INTRINSIC(interp_var_at_sample, 1, ARR(1), true, 0, 1, 0, xx, xx, xx,
62 NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
63 INTRINSIC(interp_var_at_offset, 1, ARR(2), true, 0, 1, 0, xx, xx, xx,
64 NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
65
66 /*
67 * Ask the driver for the size of a given buffer. It takes the buffer index
68 * as source.
69 */
70 INTRINSIC(get_buffer_size, 1, ARR(1), true, 1, 0, 0, xx, xx, xx,
71 NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
72
73 /*
74 * a barrier is an intrinsic with no inputs/outputs but which can't be moved
75 * around/optimized in general
76 */
77 #define BARRIER(name) INTRINSIC(name, 0, ARR(0), false, 0, 0, 0, xx, xx, xx, 0)
78
79 BARRIER(barrier)
80 BARRIER(discard)
81
82 /*
83 * Memory barrier with semantics analogous to the memoryBarrier() GLSL
84 * intrinsic.
85 */
86 BARRIER(memory_barrier)
87
88 /*
89 * Shader clock intrinsic with semantics analogous to the clock2x32ARB()
90 * GLSL intrinsic.
91 * The latter can be used as code motion barrier, which is currently not
92 * feasible with NIR.
93 */
94 INTRINSIC(shader_clock, 0, ARR(0), true, 2, 0, 0, xx, xx, xx, NIR_INTRINSIC_CAN_ELIMINATE)
95
96 /*
97 * Memory barrier with semantics analogous to the compute shader
98 * groupMemoryBarrier(), memoryBarrierAtomicCounter(), memoryBarrierBuffer(),
99 * memoryBarrierImage() and memoryBarrierShared() GLSL intrinsics.
100 */
101 BARRIER(group_memory_barrier)
102 BARRIER(memory_barrier_atomic_counter)
103 BARRIER(memory_barrier_buffer)
104 BARRIER(memory_barrier_image)
105 BARRIER(memory_barrier_shared)
106
107 /** A conditional discard, with a single boolean source. */
108 INTRINSIC(discard_if, 1, ARR(1), false, 0, 0, 0, xx, xx, xx, 0)
109
110 /**
111 * Basic Geometry Shader intrinsics.
112 *
113 * emit_vertex implements GLSL's EmitStreamVertex() built-in. It takes a single
114 * index, which is the stream ID to write to.
115 *
116 * end_primitive implements GLSL's EndPrimitive() built-in.
117 */
118 INTRINSIC(emit_vertex, 0, ARR(0), false, 0, 0, 1, STREAM_ID, xx, xx, 0)
119 INTRINSIC(end_primitive, 0, ARR(0), false, 0, 0, 1, STREAM_ID, xx, xx, 0)
120
121 /**
122 * Geometry Shader intrinsics with a vertex count.
123 *
124 * Alternatively, drivers may implement these intrinsics, and use
125 * nir_lower_gs_intrinsics() to convert from the basic intrinsics.
126 *
127 * These maintain a count of the number of vertices emitted, as an additional
128 * unsigned integer source.
129 */
130 INTRINSIC(emit_vertex_with_counter, 1, ARR(1), false, 0, 0, 1, STREAM_ID, xx, xx, 0)
131 INTRINSIC(end_primitive_with_counter, 1, ARR(1), false, 0, 0, 1, STREAM_ID, xx, xx, 0)
132 INTRINSIC(set_vertex_count, 1, ARR(1), false, 0, 0, 0, xx, xx, xx, 0)
133
134 /*
135 * Atomic counters
136 *
137 * The *_var variants take an atomic_uint nir_variable, while the other,
138 * lowered, variants take a constant buffer index and register offset.
139 */
140
141 #define ATOMIC(name, flags) \
142 INTRINSIC(name##_var, 0, ARR(0), true, 1, 1, 0, xx, xx, xx, flags) \
143 INTRINSIC(name, 1, ARR(1), true, 1, 0, 1, BASE, xx, xx, flags)
144 #define ATOMIC2(name) \
145 INTRINSIC(name##_var, 1, ARR(1), true, 1, 1, 0, xx, xx, xx, 0) \
146 INTRINSIC(name, 2, ARR(1, 1), true, 1, 0, 1, BASE, xx, xx, 0)
147 #define ATOMIC3(name) \
148 INTRINSIC(name##_var, 2, ARR(1, 1), true, 1, 1, 0, xx, xx, xx, 0) \
149 INTRINSIC(name, 3, ARR(1, 1, 1), true, 1, 0, 1, BASE, xx, xx, 0)
150
151 ATOMIC(atomic_counter_inc, 0)
152 ATOMIC(atomic_counter_dec, 0)
153 ATOMIC(atomic_counter_read, NIR_INTRINSIC_CAN_ELIMINATE)
154 ATOMIC2(atomic_counter_add)
155 ATOMIC2(atomic_counter_min)
156 ATOMIC2(atomic_counter_max)
157 ATOMIC2(atomic_counter_and)
158 ATOMIC2(atomic_counter_or)
159 ATOMIC2(atomic_counter_xor)
160 ATOMIC2(atomic_counter_exchange)
161 ATOMIC3(atomic_counter_comp_swap)
162
163 /*
164 * Image load, store and atomic intrinsics.
165 *
166 * All image intrinsics take an image target passed as a nir_variable. Image
167 * variables contain a number of memory and layout qualifiers that influence
168 * the semantics of the intrinsic.
169 *
170 * All image intrinsics take a four-coordinate vector and a sample index as
171 * first two sources, determining the location within the image that will be
172 * accessed by the intrinsic. Components not applicable to the image target
173 * in use are undefined. Image store takes an additional four-component
174 * argument with the value to be written, and image atomic operations take
175 * either one or two additional scalar arguments with the same meaning as in
176 * the ARB_shader_image_load_store specification.
177 */
178 INTRINSIC(image_load, 2, ARR(4, 1), true, 4, 1, 0, xx, xx, xx,
179 NIR_INTRINSIC_CAN_ELIMINATE)
180 INTRINSIC(image_store, 3, ARR(4, 1, 4), false, 0, 1, 0, xx, xx, xx, 0)
181 INTRINSIC(image_atomic_add, 3, ARR(4, 1, 1), true, 1, 1, 0, xx, xx, xx, 0)
182 INTRINSIC(image_atomic_min, 3, ARR(4, 1, 1), true, 1, 1, 0, xx, xx, xx, 0)
183 INTRINSIC(image_atomic_max, 3, ARR(4, 1, 1), true, 1, 1, 0, xx, xx, xx, 0)
184 INTRINSIC(image_atomic_and, 3, ARR(4, 1, 1), true, 1, 1, 0, xx, xx, xx, 0)
185 INTRINSIC(image_atomic_or, 3, ARR(4, 1, 1), true, 1, 1, 0, xx, xx, xx, 0)
186 INTRINSIC(image_atomic_xor, 3, ARR(4, 1, 1), true, 1, 1, 0, xx, xx, xx, 0)
187 INTRINSIC(image_atomic_exchange, 3, ARR(4, 1, 1), true, 1, 1, 0, xx, xx, xx, 0)
188 INTRINSIC(image_atomic_comp_swap, 4, ARR(4, 1, 1, 1), true, 1, 1, 0, xx, xx, xx, 0)
189 INTRINSIC(image_size, 0, ARR(0), true, 0, 1, 0, xx, xx, xx,
190 NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
191 INTRINSIC(image_samples, 0, ARR(0), true, 1, 1, 0, xx, xx, xx,
192 NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
193
194 /*
195 * Vulkan descriptor set intrinsic
196 *
197 * The Vulkan API uses a different binding model from GL. In the Vulkan
198 * API, all external resources are represented by a tuple:
199 *
200 * (descriptor set, binding, array index)
201 *
202 * where the array index is the only thing allowed to be indirect. The
203 * vulkan_surface_index intrinsic takes the descriptor set and binding as
204 * its first two indices and the array index as its source. The third
205 * index is a nir_variable_mode in case that's useful to the backend.
206 *
207 * The intended usage is that the shader will call vulkan_surface_index to
208 * get an index and then pass that as the buffer index ubo/ssbo calls.
209 */
210 INTRINSIC(vulkan_resource_index, 1, ARR(1), true, 1, 0, 2,
211 DESC_SET, BINDING, xx,
212 NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
213
214 /*
215 * variable atomic intrinsics
216 *
217 * All of these variable atomic memory operations read a value from memory,
218 * compute a new value using one of the operations below, write the new value
219 * to memory, and return the original value read.
220 *
221 * All operations take 1 source except CompSwap that takes 2. These sources
222 * represent:
223 *
224 * 0: The data parameter to the atomic function (i.e. the value to add
225 * in shared_atomic_add, etc).
226 * 1: For CompSwap only: the second data parameter.
227 *
228 * All operations take 1 variable deref.
229 */
230 INTRINSIC(var_atomic_add, 1, ARR(1), true, 1, 1, 0, xx, xx, xx, 0)
231 INTRINSIC(var_atomic_imin, 1, ARR(1), true, 1, 1, 0, xx, xx, xx, 0)
232 INTRINSIC(var_atomic_umin, 1, ARR(1), true, 1, 1, 0, xx, xx, xx, 0)
233 INTRINSIC(var_atomic_imax, 1, ARR(1), true, 1, 1, 0, xx, xx, xx, 0)
234 INTRINSIC(var_atomic_umax, 1, ARR(1), true, 1, 1, 0, xx, xx, xx, 0)
235 INTRINSIC(var_atomic_and, 1, ARR(1), true, 1, 1, 0, xx, xx, xx, 0)
236 INTRINSIC(var_atomic_or, 1, ARR(1), true, 1, 1, 0, xx, xx, xx, 0)
237 INTRINSIC(var_atomic_xor, 1, ARR(1), true, 1, 1, 0, xx, xx, xx, 0)
238 INTRINSIC(var_atomic_exchange, 1, ARR(1), true, 1, 1, 0, xx, xx, xx, 0)
239 INTRINSIC(var_atomic_comp_swap, 2, ARR(1, 1), true, 1, 1, 0, xx, xx, xx, 0)
240
241 /*
242 * SSBO atomic intrinsics
243 *
244 * All of the SSBO atomic memory operations read a value from memory,
245 * compute a new value using one of the operations below, write the new
246 * value to memory, and return the original value read.
247 *
248 * All operations take 3 sources except CompSwap that takes 4. These
249 * sources represent:
250 *
251 * 0: The SSBO buffer index.
252 * 1: The offset into the SSBO buffer of the variable that the atomic
253 * operation will operate on.
254 * 2: The data parameter to the atomic function (i.e. the value to add
255 * in ssbo_atomic_add, etc).
256 * 3: For CompSwap only: the second data parameter.
257 */
258 INTRINSIC(ssbo_atomic_add, 3, ARR(1, 1, 1), true, 1, 0, 0, xx, xx, xx, 0)
259 INTRINSIC(ssbo_atomic_imin, 3, ARR(1, 1, 1), true, 1, 0, 0, xx, xx, xx, 0)
260 INTRINSIC(ssbo_atomic_umin, 3, ARR(1, 1, 1), true, 1, 0, 0, xx, xx, xx, 0)
261 INTRINSIC(ssbo_atomic_imax, 3, ARR(1, 1, 1), true, 1, 0, 0, xx, xx, xx, 0)
262 INTRINSIC(ssbo_atomic_umax, 3, ARR(1, 1, 1), true, 1, 0, 0, xx, xx, xx, 0)
263 INTRINSIC(ssbo_atomic_and, 3, ARR(1, 1, 1), true, 1, 0, 0, xx, xx, xx, 0)
264 INTRINSIC(ssbo_atomic_or, 3, ARR(1, 1, 1), true, 1, 0, 0, xx, xx, xx, 0)
265 INTRINSIC(ssbo_atomic_xor, 3, ARR(1, 1, 1), true, 1, 0, 0, xx, xx, xx, 0)
266 INTRINSIC(ssbo_atomic_exchange, 3, ARR(1, 1, 1), true, 1, 0, 0, xx, xx, xx, 0)
267 INTRINSIC(ssbo_atomic_comp_swap, 4, ARR(1, 1, 1, 1), true, 1, 0, 0, xx, xx, xx, 0)
268
269 /*
270 * CS shared variable atomic intrinsics
271 *
272 * All of the shared variable atomic memory operations read a value from
273 * memory, compute a new value using one of the operations below, write the
274 * new value to memory, and return the original value read.
275 *
276 * All operations take 2 sources except CompSwap that takes 3. These
277 * sources represent:
278 *
279 * 0: The offset into the shared variable storage region that the atomic
280 * operation will operate on.
281 * 1: The data parameter to the atomic function (i.e. the value to add
282 * in shared_atomic_add, etc).
283 * 2: For CompSwap only: the second data parameter.
284 */
285 INTRINSIC(shared_atomic_add, 2, ARR(1, 1), true, 1, 0, 1, BASE, xx, xx, 0)
286 INTRINSIC(shared_atomic_imin, 2, ARR(1, 1), true, 1, 0, 1, BASE, xx, xx, 0)
287 INTRINSIC(shared_atomic_umin, 2, ARR(1, 1), true, 1, 0, 1, BASE, xx, xx, 0)
288 INTRINSIC(shared_atomic_imax, 2, ARR(1, 1), true, 1, 0, 1, BASE, xx, xx, 0)
289 INTRINSIC(shared_atomic_umax, 2, ARR(1, 1), true, 1, 0, 1, BASE, xx, xx, 0)
290 INTRINSIC(shared_atomic_and, 2, ARR(1, 1), true, 1, 0, 1, BASE, xx, xx, 0)
291 INTRINSIC(shared_atomic_or, 2, ARR(1, 1), true, 1, 0, 1, BASE, xx, xx, 0)
292 INTRINSIC(shared_atomic_xor, 2, ARR(1, 1), true, 1, 0, 1, BASE, xx, xx, 0)
293 INTRINSIC(shared_atomic_exchange, 2, ARR(1, 1), true, 1, 0, 1, BASE, xx, xx, 0)
294 INTRINSIC(shared_atomic_comp_swap, 3, ARR(1, 1, 1), true, 1, 0, 1, BASE, xx, xx, 0)
295
296 /* Used by nir_builder.h to generate loader helpers for the system values. */
297 #ifndef DEFINE_SYSTEM_VALUE
298 #define DEFINE_SYSTEM_VALUE(name)
299 #endif
300
301 #define SYSTEM_VALUE(name, components, num_indices, idx0, idx1, idx2) \
302 DEFINE_SYSTEM_VALUE(name) \
303 INTRINSIC(load_##name, 0, ARR(0), true, components, 0, num_indices, \
304 idx0, idx1, idx2, \
305 NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
306
307 SYSTEM_VALUE(front_face, 1, 0, xx, xx, xx)
308 SYSTEM_VALUE(vertex_id, 1, 0, xx, xx, xx)
309 SYSTEM_VALUE(vertex_id_zero_base, 1, 0, xx, xx, xx)
310 SYSTEM_VALUE(base_vertex, 1, 0, xx, xx, xx)
311 SYSTEM_VALUE(instance_id, 1, 0, xx, xx, xx)
312 SYSTEM_VALUE(base_instance, 1, 0, xx, xx, xx)
313 SYSTEM_VALUE(draw_id, 1, 0, xx, xx, xx)
314 SYSTEM_VALUE(sample_id, 1, 0, xx, xx, xx)
315 SYSTEM_VALUE(sample_pos, 2, 0, xx, xx, xx)
316 SYSTEM_VALUE(sample_mask_in, 1, 0, xx, xx, xx)
317 SYSTEM_VALUE(primitive_id, 1, 0, xx, xx, xx)
318 SYSTEM_VALUE(invocation_id, 1, 0, xx, xx, xx)
319 SYSTEM_VALUE(tess_coord, 3, 0, xx, xx, xx)
320 SYSTEM_VALUE(tess_level_outer, 4, 0, xx, xx, xx)
321 SYSTEM_VALUE(tess_level_inner, 2, 0, xx, xx, xx)
322 SYSTEM_VALUE(patch_vertices_in, 1, 0, xx, xx, xx)
323 SYSTEM_VALUE(local_invocation_id, 3, 0, xx, xx, xx)
324 SYSTEM_VALUE(local_invocation_index, 1, 0, xx, xx, xx)
325 SYSTEM_VALUE(work_group_id, 3, 0, xx, xx, xx)
326 SYSTEM_VALUE(user_clip_plane, 4, 1, UCP_ID, xx, xx)
327 SYSTEM_VALUE(num_work_groups, 3, 0, xx, xx, xx)
328 SYSTEM_VALUE(helper_invocation, 1, 0, xx, xx, xx)
329 SYSTEM_VALUE(channel_num, 1, 0, xx, xx, xx)
330 SYSTEM_VALUE(alpha_ref_float, 1, 0, xx, xx, xx)
331 SYSTEM_VALUE(layer_id, 1, 0, xx, xx, xx)
332
333 /* Blend constant color values. Float values are clamped. */
334 SYSTEM_VALUE(blend_const_color_r_float, 1, 0, xx, xx, xx)
335 SYSTEM_VALUE(blend_const_color_g_float, 1, 0, xx, xx, xx)
336 SYSTEM_VALUE(blend_const_color_b_float, 1, 0, xx, xx, xx)
337 SYSTEM_VALUE(blend_const_color_a_float, 1, 0, xx, xx, xx)
338 SYSTEM_VALUE(blend_const_color_rgba8888_unorm, 1, 0, xx, xx, xx)
339 SYSTEM_VALUE(blend_const_color_aaaa8888_unorm, 1, 0, xx, xx, xx)
340
341 /**
342 * Barycentric coordinate intrinsics.
343 *
344 * These set up the barycentric coordinates for a particular interpolation.
345 * The first three are for the simple cases: pixel, centroid, or per-sample
346 * (at gl_SampleID). The next two handle interpolating at a specified
347 * sample location, or interpolating with a vec2 offset,
348 *
349 * The interp_mode index should be either the INTERP_MODE_SMOOTH or
350 * INTERP_MODE_NOPERSPECTIVE enum values.
351 *
352 * The vec2 value produced by these intrinsics is intended for use as the
353 * barycoord source of a load_interpolated_input intrinsic.
354 */
355
356 #define BARYCENTRIC(name, sources, source_components) \
357 INTRINSIC(load_barycentric_##name, sources, ARR(source_components), \
358 true, 2, 0, 1, INTERP_MODE, xx, xx, \
359 NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
360
361 /* no sources. const_index[] = { interp_mode } */
362 BARYCENTRIC(pixel, 0, 0)
363 BARYCENTRIC(centroid, 0, 0)
364 BARYCENTRIC(sample, 0, 0)
365 /* src[] = { sample_id }. const_index[] = { interp_mode } */
366 BARYCENTRIC(at_sample, 1, 1)
367 /* src[] = { offset.xy }. const_index[] = { interp_mode } */
368 BARYCENTRIC(at_offset, 1, 2)
369
370 /*
371 * Load operations pull data from some piece of GPU memory. All load
372 * operations operate in terms of offsets into some piece of theoretical
373 * memory. Loads from externally visible memory (UBO and SSBO) simply take a
374 * byte offset as a source. Loads from opaque memory (uniforms, inputs, etc.)
375 * take a base+offset pair where the base (const_index[0]) gives the location
376 * of the start of the variable being loaded and and the offset source is a
377 * offset into that variable.
378 *
379 * Uniform load operations have a second "range" index that specifies the
380 * range (starting at base) of the data from which we are loading. If
381 * const_index[1] == 0, then the range is unknown.
382 *
383 * Some load operations such as UBO/SSBO load and per_vertex loads take an
384 * additional source to specify which UBO/SSBO/vertex to load from.
385 *
386 * The exact address type depends on the lowering pass that generates the
387 * load/store intrinsics. Typically, this is vec4 units for things such as
388 * varying slots and float units for fragment shader inputs. UBO and SSBO
389 * offsets are always in bytes.
390 */
391
392 #define LOAD(name, srcs, num_indices, idx0, idx1, idx2, flags) \
393 INTRINSIC(load_##name, srcs, ARR(1, 1, 1, 1), true, 0, 0, num_indices, idx0, idx1, idx2, flags)
394
395 /* src[] = { offset }. const_index[] = { base, range } */
396 LOAD(uniform, 1, 2, BASE, RANGE, xx, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
397 /* src[] = { buffer_index, offset }. No const_index */
398 LOAD(ubo, 2, 0, xx, xx, xx, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
399 /* src[] = { offset }. const_index[] = { base, component } */
400 LOAD(input, 1, 2, BASE, COMPONENT, xx, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
401 /* src[] = { vertex, offset }. const_index[] = { base, component } */
402 LOAD(per_vertex_input, 2, 2, BASE, COMPONENT, xx, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
403 /* src[] = { barycoord, offset }. const_index[] = { base, component } */
404 INTRINSIC(load_interpolated_input, 2, ARR(2, 1), true, 0, 0,
405 2, BASE, COMPONENT, xx,
406 NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
407
408 /* src[] = { buffer_index, offset }. No const_index */
409 LOAD(ssbo, 2, 0, xx, xx, xx, NIR_INTRINSIC_CAN_ELIMINATE)
410 /* src[] = { offset }. const_index[] = { base, component } */
411 LOAD(output, 1, 1, BASE, COMPONENT, xx, NIR_INTRINSIC_CAN_ELIMINATE)
412 /* src[] = { vertex, offset }. const_index[] = { base, component } */
413 LOAD(per_vertex_output, 2, 1, BASE, COMPONENT, xx, NIR_INTRINSIC_CAN_ELIMINATE)
414 /* src[] = { offset }. const_index[] = { base } */
415 LOAD(shared, 1, 1, BASE, xx, xx, NIR_INTRINSIC_CAN_ELIMINATE)
416 /* src[] = { offset }. const_index[] = { base, range } */
417 LOAD(push_constant, 1, 2, BASE, RANGE, xx,
418 NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
419
420 /*
421 * Stores work the same way as loads, except now the first source is the value
422 * to store and the second (and possibly third) source specify where to store
423 * the value. SSBO and shared memory stores also have a write mask as
424 * const_index[0].
425 */
426
427 #define STORE(name, srcs, num_indices, idx0, idx1, idx2, flags) \
428 INTRINSIC(store_##name, srcs, ARR(0, 1, 1, 1), false, 0, 0, num_indices, idx0, idx1, idx2, flags)
429
430 /* src[] = { value, offset }. const_index[] = { base, write_mask, component } */
431 STORE(output, 2, 3, BASE, WRMASK, COMPONENT, 0)
432 /* src[] = { value, vertex, offset }.
433 * const_index[] = { base, write_mask, component }
434 */
435 STORE(per_vertex_output, 3, 3, BASE, WRMASK, COMPONENT, 0)
436 /* src[] = { value, block_index, offset }. const_index[] = { write_mask } */
437 STORE(ssbo, 3, 1, WRMASK, xx, xx, 0)
438 /* src[] = { value, offset }. const_index[] = { base, write_mask } */
439 STORE(shared, 2, 2, BASE, WRMASK, xx, 0)
440
441 LAST_INTRINSIC(store_shared)
442
443 #undef DEFINE_SYSTEM_VALUE
444 #undef INTRINSIC
445 #undef LAST_INTRINSIC