nir: Get rid of the array elements parameter on load/store intrinsics
[mesa.git] / src / glsl / nir / nir_intrinsics.h
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Connor Abbott (cwabbott0@gmail.com)
25 *
26 */
27
28 /**
29 * This header file defines all the available intrinsics in one place. It
30 * expands to a list of macros of the form:
31 *
32 * INTRINSIC(name, num_srcs, src_components, has_dest, dest_components,
33 * num_variables, num_indices, flags)
34 *
35 * Which should correspond one-to-one with the nir_intrinsic_info structure. It
36 * is included in both ir.h to create the nir_intrinsic enum (with members of
37 * the form nir_intrinsic_(name)) and and in opcodes.c to create
38 * nir_intrinsic_infos, which is a const array of nir_intrinsic_info structures
39 * for each intrinsic.
40 */
41
42 #define ARR(...) { __VA_ARGS__ }
43
44
45 INTRINSIC(load_var, 0, ARR(), true, 0, 1, 0, NIR_INTRINSIC_CAN_ELIMINATE)
46 INTRINSIC(store_var, 1, ARR(0), false, 0, 1, 0, 0)
47 INTRINSIC(copy_var, 0, ARR(), false, 0, 2, 0, 0)
48
49 /*
50 * Interpolation of input. The interp_var_at* intrinsics are similar to the
51 * load_var intrinsic acting an a shader input except that they interpolate
52 * the input differently. The at_sample and at_offset intrinsics take an
53 * aditional source that is a integer sample id or a vec2 position offset
54 * respectively.
55 */
56
57 INTRINSIC(interp_var_at_centroid, 0, ARR(0), true, 0, 1, 0,
58 NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
59 INTRINSIC(interp_var_at_sample, 1, ARR(1), true, 0, 1, 0,
60 NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
61 INTRINSIC(interp_var_at_offset, 1, ARR(2), true, 0, 1, 0,
62 NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
63
64 /*
65 * a barrier is an intrinsic with no inputs/outputs but which can't be moved
66 * around/optimized in general
67 */
68 #define BARRIER(name) INTRINSIC(name, 0, ARR(), false, 0, 0, 0, 0)
69
70 BARRIER(discard)
71
72 /*
73 * Memory barrier with semantics analogous to the memoryBarrier() GLSL
74 * intrinsic.
75 */
76 BARRIER(memory_barrier)
77
78 /** A conditional discard, with a single boolean source. */
79 INTRINSIC(discard_if, 1, ARR(1), false, 0, 0, 0, 0)
80
81 INTRINSIC(emit_vertex, 0, ARR(), false, 0, 0, 1, 0)
82 INTRINSIC(end_primitive, 0, ARR(), false, 0, 0, 1, 0)
83
84 /*
85 * Atomic counters
86 *
87 * The *_var variants take an atomic_uint nir_variable, while the other,
88 * lowered, variants take a constant buffer index and register offset.
89 */
90
91 #define ATOMIC(name, flags) \
92 INTRINSIC(atomic_counter_##name##_var, 0, ARR(), true, 1, 1, 0, flags) \
93 INTRINSIC(atomic_counter_##name, 1, ARR(1), true, 1, 0, 1, flags)
94
95 ATOMIC(inc, 0)
96 ATOMIC(dec, 0)
97 ATOMIC(read, NIR_INTRINSIC_CAN_ELIMINATE)
98
99 /*
100 * Image load, store and atomic intrinsics.
101 *
102 * All image intrinsics take an image target passed as a nir_variable. Image
103 * variables contain a number of memory and layout qualifiers that influence
104 * the semantics of the intrinsic.
105 *
106 * All image intrinsics take a four-coordinate vector and a sample index as
107 * first two sources, determining the location within the image that will be
108 * accessed by the intrinsic. Components not applicable to the image target
109 * in use are undefined. Image store takes an additional four-component
110 * argument with the value to be written, and image atomic operations take
111 * either one or two additional scalar arguments with the same meaning as in
112 * the ARB_shader_image_load_store specification.
113 */
114 INTRINSIC(image_load, 2, ARR(4, 1), true, 4, 1, 0,
115 NIR_INTRINSIC_CAN_ELIMINATE)
116 INTRINSIC(image_store, 3, ARR(4, 1, 4), false, 0, 1, 0, 0)
117 INTRINSIC(image_atomic_add, 3, ARR(4, 1, 1), true, 1, 1, 0, 0)
118 INTRINSIC(image_atomic_min, 3, ARR(4, 1, 1), true, 1, 1, 0, 0)
119 INTRINSIC(image_atomic_max, 3, ARR(4, 1, 1), true, 1, 1, 0, 0)
120 INTRINSIC(image_atomic_and, 3, ARR(4, 1, 1), true, 1, 1, 0, 0)
121 INTRINSIC(image_atomic_or, 3, ARR(4, 1, 1), true, 1, 1, 0, 0)
122 INTRINSIC(image_atomic_xor, 3, ARR(4, 1, 1), true, 1, 1, 0, 0)
123 INTRINSIC(image_atomic_exchange, 3, ARR(4, 1, 1), true, 1, 1, 0, 0)
124 INTRINSIC(image_atomic_comp_swap, 4, ARR(4, 1, 1, 1), true, 1, 1, 0, 0)
125
126 #define SYSTEM_VALUE(name, components) \
127 INTRINSIC(load_##name, 0, ARR(), true, components, 0, 0, \
128 NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
129
130 SYSTEM_VALUE(front_face, 1)
131 SYSTEM_VALUE(vertex_id, 1)
132 SYSTEM_VALUE(vertex_id_zero_base, 1)
133 SYSTEM_VALUE(base_vertex, 1)
134 SYSTEM_VALUE(instance_id, 1)
135 SYSTEM_VALUE(sample_id, 1)
136 SYSTEM_VALUE(sample_pos, 2)
137 SYSTEM_VALUE(sample_mask_in, 1)
138 SYSTEM_VALUE(invocation_id, 1)
139
140 /*
141 * The first and only index is the base address to load from. Indirect
142 * loads have an additional register input, which is added to the constant
143 * address to compute the final address to load from. For UBO's (and
144 * SSBO's), the first source is the (possibly constant) UBO buffer index
145 * and the indirect (if it exists) is the second source.
146 *
147 * For vector backends, the address is in terms of one vec4, and so each array
148 * element is +4 scalar components from the previous array element. For scalar
149 * backends, the address is in terms of a single 4-byte float/int and arrays
150 * elements begin immediately after the previous array element.
151 */
152
153 #define LOAD(name, extra_srcs, flags) \
154 INTRINSIC(load_##name, extra_srcs, ARR(1), true, 0, 0, 1, flags) \
155 INTRINSIC(load_##name##_indirect, extra_srcs + 1, ARR(1, 1), \
156 true, 0, 0, 1, flags)
157
158 LOAD(uniform, 0, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
159 LOAD(ubo, 1, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
160 LOAD(input, 0, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
161 /* LOAD(ssbo, 1, 0) */
162
163 /*
164 * Stores work the same way as loads, except now the first register input is
165 * the value or array to store and the optional second input is the indirect
166 * offset.
167 */
168
169 #define STORE(name, num_indices, flags) \
170 INTRINSIC(store_##name, 1, ARR(0), false, 0, 0, num_indices, flags) \
171 INTRINSIC(store_##name##_indirect, 2, ARR(0, 1), false, 0, 0, \
172 num_indices, flags) \
173
174 STORE(output, 1, 0)
175 /* STORE(ssbo, 2, 0) */
176
177 LAST_INTRINSIC(store_output_indirect)