2a0df2be02563b8f93028d66f60c46195f8187df
[mesa.git] / src / glsl / nir / nir_intrinsics.h
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Connor Abbott (cwabbott0@gmail.com)
25 *
26 */
27
28 /**
29 * This header file defines all the available intrinsics in one place. It
30 * expands to a list of macros of the form:
31 *
32 * INTRINSIC(name, num_srcs, src_components, has_dest, dest_components,
33 * num_variables, num_indices, flags)
34 *
35 * Which should correspond one-to-one with the nir_intrinsic_info structure. It
36 * is included in both ir.h to create the nir_intrinsic enum (with members of
37 * the form nir_intrinsic_(name)) and and in opcodes.c to create
38 * nir_intrinsic_infos, which is a const array of nir_intrinsic_info structures
39 * for each intrinsic.
40 */
41
42 #define ARR(...) { __VA_ARGS__ }
43
44
45 INTRINSIC(load_var, 0, ARR(), true, 0, 1, 0, NIR_INTRINSIC_CAN_ELIMINATE)
46 INTRINSIC(store_var, 1, ARR(0), false, 0, 1, 0, 0)
47 INTRINSIC(copy_var, 0, ARR(), false, 0, 2, 0, 0)
48
49 /*
50 * Interpolation of input. The interp_var_at* intrinsics are similar to the
51 * load_var intrinsic acting an a shader input except that they interpolate
52 * the input differently. The at_sample and at_offset intrinsics take an
53 * aditional source that is a integer sample id or a vec2 position offset
54 * respectively.
55 */
56
57 INTRINSIC(interp_var_at_centroid, 0, ARR(0), true, 0, 1, 0,
58 NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
59 INTRINSIC(interp_var_at_sample, 1, ARR(1), true, 0, 1, 0,
60 NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
61 INTRINSIC(interp_var_at_offset, 1, ARR(2), true, 0, 1, 0,
62 NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
63
64 /*
65 * a barrier is an intrinsic with no inputs/outputs but which can't be moved
66 * around/optimized in general
67 */
68 #define BARRIER(name) INTRINSIC(name, 0, ARR(), false, 0, 0, 0, 0)
69
70 BARRIER(discard)
71 /** A conditional discard, with a single boolean source. */
72 INTRINSIC(discard_if, 1, ARR(1), false, 0, 0, 0, 0)
73
74 INTRINSIC(emit_vertex, 0, ARR(), false, 0, 0, 1, 0)
75 INTRINSIC(end_primitive, 0, ARR(), false, 0, 0, 1, 0)
76
77 /*
78 * Atomic counters
79 *
80 * The *_var variants take an atomic_uint nir_variable, while the other,
81 * lowered, variants take a constant buffer index and register offset.
82 */
83
84 #define ATOMIC(name, flags) \
85 INTRINSIC(atomic_counter_##name##_var, 0, ARR(), true, 1, 1, 0, flags) \
86 INTRINSIC(atomic_counter_##name, 1, ARR(1), true, 1, 0, 1, flags)
87
88 ATOMIC(inc, 0)
89 ATOMIC(dec, 0)
90 ATOMIC(read, NIR_INTRINSIC_CAN_ELIMINATE)
91
92 /*
93 * Image load, store and atomic intrinsics.
94 *
95 * All image intrinsics take an image target passed as a nir_variable. Image
96 * variables contain a number of memory and layout qualifiers that influence
97 * the semantics of the intrinsic.
98 *
99 * All image intrinsics take a four-coordinate vector and a sample index as
100 * first two sources, determining the location within the image that will be
101 * accessed by the intrinsic. Components not applicable to the image target
102 * in use are undefined. Image store takes an additional four-component
103 * argument with the value to be written, and image atomic operations take
104 * either one or two additional scalar arguments with the same meaning as in
105 * the ARB_shader_image_load_store specification.
106 */
107 INTRINSIC(image_load, 2, ARR(4, 1), true, 4, 1, 0,
108 NIR_INTRINSIC_CAN_ELIMINATE)
109 INTRINSIC(image_store, 3, ARR(4, 1, 4), false, 0, 1, 0, 0)
110 INTRINSIC(image_atomic_add, 3, ARR(4, 1, 1), true, 1, 1, 0, 0)
111 INTRINSIC(image_atomic_min, 3, ARR(4, 1, 1), true, 1, 1, 0, 0)
112 INTRINSIC(image_atomic_max, 3, ARR(4, 1, 1), true, 1, 1, 0, 0)
113 INTRINSIC(image_atomic_and, 3, ARR(4, 1, 1), true, 1, 1, 0, 0)
114 INTRINSIC(image_atomic_or, 3, ARR(4, 1, 1), true, 1, 1, 0, 0)
115 INTRINSIC(image_atomic_xor, 3, ARR(4, 1, 1), true, 1, 1, 0, 0)
116 INTRINSIC(image_atomic_exchange, 3, ARR(4, 1, 1), true, 1, 1, 0, 0)
117 INTRINSIC(image_atomic_comp_swap, 4, ARR(4, 1, 1, 1), true, 1, 1, 0, 0)
118
119 #define SYSTEM_VALUE(name, components) \
120 INTRINSIC(load_##name, 0, ARR(), true, components, 0, 0, \
121 NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
122
123 SYSTEM_VALUE(front_face, 1)
124 SYSTEM_VALUE(vertex_id, 1)
125 SYSTEM_VALUE(vertex_id_zero_base, 1)
126 SYSTEM_VALUE(base_vertex, 1)
127 SYSTEM_VALUE(instance_id, 1)
128 SYSTEM_VALUE(sample_id, 1)
129 SYSTEM_VALUE(sample_pos, 2)
130 SYSTEM_VALUE(sample_mask_in, 1)
131 SYSTEM_VALUE(invocation_id, 1)
132
133 /*
134 * The first index is the address to load from, and the second index is the
135 * number of array elements to load. Indirect loads have an additional
136 * register input, which is added to the constant address to compute the
137 * final address to load from. For UBO's (and SSBO's), the first source is
138 * the (possibly constant) UBO buffer index and the indirect (if it exists)
139 * is the second source.
140 *
141 * For vector backends, the address is in terms of one vec4, and so each array
142 * element is +4 scalar components from the previous array element. For scalar
143 * backends, the address is in terms of a single 4-byte float/int and arrays
144 * elements begin immediately after the previous array element.
145 */
146
147 #define LOAD(name, extra_srcs, flags) \
148 INTRINSIC(load_##name, extra_srcs, ARR(1), true, 0, 0, 2, flags) \
149 INTRINSIC(load_##name##_indirect, extra_srcs + 1, ARR(1, 1), \
150 true, 0, 0, 2, flags)
151
152 LOAD(uniform, 0, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
153 LOAD(ubo, 1, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
154 LOAD(input, 0, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
155 /* LOAD(ssbo, 1, 0) */
156
157 /*
158 * Stores work the same way as loads, except now the first register input is
159 * the value or array to store and the optional second input is the indirect
160 * offset.
161 */
162
163 #define STORE(name, num_indices, flags) \
164 INTRINSIC(store_##name, 1, ARR(0), false, 0, 0, num_indices, flags) \
165 INTRINSIC(store_##name##_indirect, 2, ARR(0, 1), false, 0, 0, \
166 num_indices, flags) \
167
168 STORE(output, 2, 0)
169 /* STORE(ssbo, 3, 0) */
170
171 LAST_INTRINSIC(store_output_indirect)