nir: make nir_const_value scalar
[mesa.git] / src / compiler / nir / nir_lower_system_values.c
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Connor Abbott (cwabbott0@gmail.com)
25 *
26 */
27
28 #include "nir.h"
29 #include "nir_builder.h"
30
31 static nir_ssa_def*
32 build_local_group_size(nir_builder *b, unsigned bit_size)
33 {
34 nir_ssa_def *local_size;
35
36 /*
37 * If the local work group size is variable it can't be lowered at this
38 * point, but its intrinsic can still be used.
39 */
40 if (b->shader->info.cs.local_size_variable) {
41 local_size = nir_load_local_group_size(b);
42 } else {
43 /* using a 32 bit constant is safe here as no device/driver needs more
44 * than 32 bits for the local size */
45 nir_const_value local_size_const[3];
46 memset(local_size_const, 0, sizeof(local_size_const));
47 local_size_const[0].u32 = b->shader->info.cs.local_size[0];
48 local_size_const[1].u32 = b->shader->info.cs.local_size[1];
49 local_size_const[2].u32 = b->shader->info.cs.local_size[2];
50 local_size = nir_build_imm(b, 3, 32, local_size_const);
51 }
52
53 return nir_u2u(b, local_size, bit_size);
54 }
55
56 static nir_ssa_def *
57 build_local_invocation_id(nir_builder *b, unsigned bit_size)
58 {
59 /* If lower_cs_local_id_from_index is true, then we derive the local
60 * index from the local id.
61 */
62 if (b->shader->options->lower_cs_local_id_from_index) {
63 /* We lower gl_LocalInvocationID from gl_LocalInvocationIndex based
64 * on this formula:
65 *
66 * gl_LocalInvocationID.x =
67 * gl_LocalInvocationIndex % gl_WorkGroupSize.x;
68 * gl_LocalInvocationID.y =
69 * (gl_LocalInvocationIndex / gl_WorkGroupSize.x) %
70 * gl_WorkGroupSize.y;
71 * gl_LocalInvocationID.z =
72 * (gl_LocalInvocationIndex /
73 * (gl_WorkGroupSize.x * gl_WorkGroupSize.y)) %
74 * gl_WorkGroupSize.z;
75 *
76 * However, the final % gl_WorkGroupSize.z does nothing unless we
77 * accidentally end up with a gl_LocalInvocationIndex that is too
78 * large so it can safely be omitted.
79 */
80 nir_ssa_def *local_index = nir_load_local_invocation_index(b);
81 nir_ssa_def *local_size = build_local_group_size(b, 32);
82
83 /* Because no hardware supports a local workgroup size greater than
84 * about 1K, this calculation can be done in 32-bit and can save some
85 * 64-bit arithmetic.
86 */
87 nir_ssa_def *id_x, *id_y, *id_z;
88 id_x = nir_umod(b, local_index,
89 nir_channel(b, local_size, 0));
90 id_y = nir_umod(b, nir_udiv(b, local_index,
91 nir_channel(b, local_size, 0)),
92 nir_channel(b, local_size, 1));
93 id_z = nir_udiv(b, local_index,
94 nir_imul(b, nir_channel(b, local_size, 0),
95 nir_channel(b, local_size, 1)));
96 return nir_u2u(b, nir_vec3(b, id_x, id_y, id_z), bit_size);
97 } else {
98 return nir_u2u(b, nir_load_local_invocation_id(b), bit_size);
99 }
100 }
101
102 static nir_ssa_def*
103 build_global_group_size(nir_builder *b, unsigned bit_size)
104 {
105 nir_ssa_def *group_size = build_local_group_size(b, bit_size);
106 nir_ssa_def *num_work_groups = nir_u2u(b, nir_load_num_work_groups(b), bit_size);
107 return nir_imul(b, group_size, num_work_groups);
108 }
109
110 static nir_ssa_def*
111 build_global_invocation_id(nir_builder *b, unsigned bit_size)
112 {
113 /* From the GLSL man page for gl_GlobalInvocationID:
114 *
115 * "The value of gl_GlobalInvocationID is equal to
116 * gl_WorkGroupID * gl_WorkGroupSize + gl_LocalInvocationID"
117 */
118 nir_ssa_def *group_size = build_local_group_size(b, bit_size);
119 nir_ssa_def *group_id = nir_u2u(b, nir_load_work_group_id(b), bit_size);
120 nir_ssa_def *local_id = build_local_invocation_id(b, bit_size);
121
122 return nir_iadd(b, nir_imul(b, group_id, group_size), local_id);
123 }
124
125 static bool
126 convert_block(nir_block *block, nir_builder *b)
127 {
128 bool progress = false;
129
130 nir_foreach_instr_safe(instr, block) {
131 if (instr->type != nir_instr_type_intrinsic)
132 continue;
133
134 nir_intrinsic_instr *load_deref = nir_instr_as_intrinsic(instr);
135 if (load_deref->intrinsic != nir_intrinsic_load_deref)
136 continue;
137
138 nir_deref_instr *deref = nir_src_as_deref(load_deref->src[0]);
139 if (deref->mode != nir_var_system_value)
140 continue;
141
142 if (deref->deref_type != nir_deref_type_var) {
143 /* The only one system value that is an array and that is
144 * gl_SampleMask which is always an array of one element.
145 */
146 assert(deref->deref_type == nir_deref_type_array);
147 deref = nir_deref_instr_parent(deref);
148 assert(deref->deref_type == nir_deref_type_var);
149 assert(deref->var->data.location == SYSTEM_VALUE_SAMPLE_MASK_IN);
150 }
151 nir_variable *var = deref->var;
152
153 b->cursor = nir_after_instr(&load_deref->instr);
154
155 unsigned bit_size = nir_dest_bit_size(load_deref->dest);
156 nir_ssa_def *sysval = NULL;
157 switch (var->data.location) {
158 case SYSTEM_VALUE_GLOBAL_INVOCATION_ID: {
159 sysval = build_global_invocation_id(b, bit_size);
160 break;
161 }
162
163 case SYSTEM_VALUE_GLOBAL_INVOCATION_INDEX: {
164 nir_ssa_def *global_id = build_global_invocation_id(b, bit_size);
165 nir_ssa_def *global_size = build_global_group_size(b, bit_size);
166
167 /* index = id.x + ((id.y + (id.z * size.y)) * size.x) */
168 sysval = nir_imul(b, nir_channel(b, global_id, 2),
169 nir_channel(b, global_size, 1));
170 sysval = nir_iadd(b, nir_channel(b, global_id, 1), sysval);
171 sysval = nir_imul(b, nir_channel(b, global_size, 0), sysval);
172 sysval = nir_iadd(b, nir_channel(b, global_id, 0), sysval);
173 break;
174 }
175
176 case SYSTEM_VALUE_LOCAL_INVOCATION_INDEX: {
177 /* If lower_cs_local_index_from_id is true, then we derive the local
178 * index from the local id.
179 */
180 if (!b->shader->options->lower_cs_local_index_from_id)
181 break;
182
183 /* From the GLSL man page for gl_LocalInvocationIndex:
184 *
185 * "The value of gl_LocalInvocationIndex is equal to
186 * gl_LocalInvocationID.z * gl_WorkGroupSize.x *
187 * gl_WorkGroupSize.y + gl_LocalInvocationID.y *
188 * gl_WorkGroupSize.x + gl_LocalInvocationID.x"
189 */
190 nir_ssa_def *local_id = nir_load_local_invocation_id(b);
191
192 nir_ssa_def *size_x =
193 nir_imm_int(b, b->shader->info.cs.local_size[0]);
194 nir_ssa_def *size_y =
195 nir_imm_int(b, b->shader->info.cs.local_size[1]);
196
197 /* Because no hardware supports a local workgroup size greater than
198 * about 1K, this calculation can be done in 32-bit and can save some
199 * 64-bit arithmetic.
200 */
201 sysval = nir_imul(b, nir_channel(b, local_id, 2),
202 nir_imul(b, size_x, size_y));
203 sysval = nir_iadd(b, sysval,
204 nir_imul(b, nir_channel(b, local_id, 1), size_x));
205 sysval = nir_iadd(b, sysval, nir_channel(b, local_id, 0));
206 sysval = nir_u2u(b, sysval, bit_size);
207 break;
208 }
209
210 case SYSTEM_VALUE_LOCAL_INVOCATION_ID:
211 sysval = build_local_invocation_id(b, bit_size);
212 break;
213
214 case SYSTEM_VALUE_LOCAL_GROUP_SIZE: {
215 sysval = build_local_group_size(b, bit_size);
216 break;
217 }
218
219 case SYSTEM_VALUE_VERTEX_ID:
220 if (b->shader->options->vertex_id_zero_based) {
221 sysval = nir_iadd(b,
222 nir_load_vertex_id_zero_base(b),
223 nir_load_first_vertex(b));
224 } else {
225 sysval = nir_load_vertex_id(b);
226 }
227 break;
228
229 case SYSTEM_VALUE_BASE_VERTEX:
230 /**
231 * From the OpenGL 4.6 (11.1.3.9 Shader Inputs) specification:
232 *
233 * "gl_BaseVertex holds the integer value passed to the baseVertex
234 * parameter to the command that resulted in the current shader
235 * invocation. In the case where the command has no baseVertex
236 * parameter, the value of gl_BaseVertex is zero."
237 */
238 if (b->shader->options->lower_base_vertex)
239 sysval = nir_iand(b,
240 nir_load_is_indexed_draw(b),
241 nir_load_first_vertex(b));
242 break;
243
244 case SYSTEM_VALUE_HELPER_INVOCATION:
245 if (b->shader->options->lower_helper_invocation) {
246 nir_ssa_def *tmp;
247
248 tmp = nir_ishl(b,
249 nir_imm_int(b, 1),
250 nir_load_sample_id_no_per_sample(b));
251
252 tmp = nir_iand(b,
253 nir_load_sample_mask_in(b),
254 tmp);
255
256 sysval = nir_inot(b, nir_i2b(b, tmp));
257 }
258
259 break;
260
261 case SYSTEM_VALUE_INSTANCE_INDEX:
262 sysval = nir_iadd(b,
263 nir_load_instance_id(b),
264 nir_load_base_instance(b));
265 break;
266
267 case SYSTEM_VALUE_SUBGROUP_EQ_MASK:
268 case SYSTEM_VALUE_SUBGROUP_GE_MASK:
269 case SYSTEM_VALUE_SUBGROUP_GT_MASK:
270 case SYSTEM_VALUE_SUBGROUP_LE_MASK:
271 case SYSTEM_VALUE_SUBGROUP_LT_MASK: {
272 nir_intrinsic_op op =
273 nir_intrinsic_from_system_value(var->data.location);
274 nir_intrinsic_instr *load = nir_intrinsic_instr_create(b->shader, op);
275 nir_ssa_dest_init_for_type(&load->instr, &load->dest,
276 var->type, NULL);
277 load->num_components = load->dest.ssa.num_components;
278 nir_builder_instr_insert(b, &load->instr);
279 sysval = &load->dest.ssa;
280 break;
281 }
282
283 case SYSTEM_VALUE_DEVICE_INDEX:
284 if (b->shader->options->lower_device_index_to_zero)
285 sysval = nir_imm_int(b, 0);
286 break;
287
288 case SYSTEM_VALUE_GLOBAL_GROUP_SIZE: {
289 sysval = build_global_group_size(b, bit_size);
290 break;
291 }
292
293 default:
294 break;
295 }
296
297 if (sysval == NULL) {
298 nir_intrinsic_op sysval_op =
299 nir_intrinsic_from_system_value(var->data.location);
300 sysval = nir_load_system_value(b, sysval_op, 0,
301 load_deref->dest.ssa.bit_size);
302 }
303
304 nir_ssa_def_rewrite_uses(&load_deref->dest.ssa, nir_src_for_ssa(sysval));
305 nir_instr_remove(&load_deref->instr);
306
307 progress = true;
308 }
309
310 return progress;
311 }
312
313 static bool
314 convert_impl(nir_function_impl *impl)
315 {
316 bool progress = false;
317 nir_builder builder;
318 nir_builder_init(&builder, impl);
319
320 nir_foreach_block(block, impl) {
321 progress |= convert_block(block, &builder);
322 }
323
324 nir_metadata_preserve(impl, nir_metadata_block_index |
325 nir_metadata_dominance);
326 return progress;
327 }
328
329 bool
330 nir_lower_system_values(nir_shader *shader)
331 {
332 bool progress = false;
333
334 nir_foreach_function(function, shader) {
335 if (function->impl)
336 progress = convert_impl(function->impl) || progress;
337 }
338
339 /* We're going to delete the variables so we need to clean up all those
340 * derefs we left lying around.
341 */
342 nir_remove_dead_derefs(shader);
343
344 exec_list_make_empty(&shader->system_values);
345
346 return progress;
347 }