nir/algebraic: mark some optimizations with fsat(NaN) as inexact
[mesa.git] / src / compiler / nir / nir_lower_system_values.c
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Connor Abbott (cwabbott0@gmail.com)
25 *
26 */
27
28 #include "nir.h"
29 #include "nir_builder.h"
30
31 static nir_ssa_def *
32 sanitize_32bit_sysval(nir_builder *b, nir_intrinsic_instr *intrin)
33 {
34 assert(intrin->dest.is_ssa);
35 const unsigned bit_size = intrin->dest.ssa.bit_size;
36 if (bit_size == 32)
37 return NULL;
38
39 intrin->dest.ssa.bit_size = 32;
40 return nir_u2u(b, &intrin->dest.ssa, bit_size);
41 }
42
43 static nir_ssa_def*
44 build_global_group_size(nir_builder *b, unsigned bit_size)
45 {
46 nir_ssa_def *group_size = nir_load_local_group_size(b);
47 nir_ssa_def *num_work_groups = nir_load_num_work_groups(b, bit_size);
48 return nir_imul(b, nir_u2u(b, group_size, bit_size),
49 num_work_groups);
50 }
51
52 static bool
53 lower_system_value_filter(const nir_instr *instr, const void *_state)
54 {
55 return instr->type == nir_instr_type_intrinsic;
56 }
57
58 static nir_ssa_def *
59 lower_system_value_instr(nir_builder *b, nir_instr *instr, void *_state)
60 {
61 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
62
63 /* All the intrinsics we care about are loads */
64 if (!nir_intrinsic_infos[intrin->intrinsic].has_dest)
65 return NULL;
66
67 assert(intrin->dest.is_ssa);
68 const unsigned bit_size = intrin->dest.ssa.bit_size;
69
70 switch (intrin->intrinsic) {
71 case nir_intrinsic_load_vertex_id:
72 if (b->shader->options->vertex_id_zero_based) {
73 return nir_iadd(b, nir_load_vertex_id_zero_base(b),
74 nir_load_first_vertex(b));
75 } else {
76 return NULL;
77 }
78
79 case nir_intrinsic_load_base_vertex:
80 /**
81 * From the OpenGL 4.6 (11.1.3.9 Shader Inputs) specification:
82 *
83 * "gl_BaseVertex holds the integer value passed to the baseVertex
84 * parameter to the command that resulted in the current shader
85 * invocation. In the case where the command has no baseVertex
86 * parameter, the value of gl_BaseVertex is zero."
87 */
88 if (b->shader->options->lower_base_vertex) {
89 return nir_iand(b, nir_load_is_indexed_draw(b),
90 nir_load_first_vertex(b));
91 } else {
92 return NULL;
93 }
94
95 case nir_intrinsic_load_helper_invocation:
96 if (b->shader->options->lower_helper_invocation) {
97 nir_ssa_def *tmp;
98 tmp = nir_ishl(b, nir_imm_int(b, 1),
99 nir_load_sample_id_no_per_sample(b));
100 tmp = nir_iand(b, nir_load_sample_mask_in(b), tmp);
101 return nir_inot(b, nir_i2b(b, tmp));
102 } else {
103 return NULL;
104 }
105
106 case nir_intrinsic_load_local_invocation_id:
107 case nir_intrinsic_load_local_invocation_index:
108 case nir_intrinsic_load_local_group_size:
109 return sanitize_32bit_sysval(b, intrin);
110
111 case nir_intrinsic_load_deref: {
112 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
113 if (deref->mode != nir_var_system_value)
114 return NULL;
115
116 if (deref->deref_type != nir_deref_type_var) {
117 /* The only one system value that is an array and that is
118 * gl_SampleMask which is always an array of one element.
119 */
120 assert(deref->deref_type == nir_deref_type_array);
121 deref = nir_deref_instr_parent(deref);
122 assert(deref->deref_type == nir_deref_type_var);
123 assert(deref->var->data.location == SYSTEM_VALUE_SAMPLE_MASK_IN);
124 }
125 nir_variable *var = deref->var;
126
127 switch (var->data.location) {
128 case SYSTEM_VALUE_INSTANCE_INDEX:
129 return nir_iadd(b, nir_load_instance_id(b),
130 nir_load_base_instance(b));
131
132 case SYSTEM_VALUE_SUBGROUP_EQ_MASK:
133 case SYSTEM_VALUE_SUBGROUP_GE_MASK:
134 case SYSTEM_VALUE_SUBGROUP_GT_MASK:
135 case SYSTEM_VALUE_SUBGROUP_LE_MASK:
136 case SYSTEM_VALUE_SUBGROUP_LT_MASK: {
137 nir_intrinsic_op op =
138 nir_intrinsic_from_system_value(var->data.location);
139 nir_intrinsic_instr *load = nir_intrinsic_instr_create(b->shader, op);
140 nir_ssa_dest_init_for_type(&load->instr, &load->dest,
141 var->type, NULL);
142 load->num_components = load->dest.ssa.num_components;
143 nir_builder_instr_insert(b, &load->instr);
144 return &load->dest.ssa;
145 }
146
147 case SYSTEM_VALUE_DEVICE_INDEX:
148 if (b->shader->options->lower_device_index_to_zero)
149 return nir_imm_int(b, 0);
150 break;
151
152 case SYSTEM_VALUE_GLOBAL_GROUP_SIZE:
153 return build_global_group_size(b, bit_size);
154
155 case SYSTEM_VALUE_BARYCENTRIC_LINEAR_PIXEL:
156 return nir_load_barycentric(b, nir_intrinsic_load_barycentric_pixel,
157 INTERP_MODE_NOPERSPECTIVE);
158
159 case SYSTEM_VALUE_BARYCENTRIC_LINEAR_CENTROID:
160 return nir_load_barycentric(b, nir_intrinsic_load_barycentric_centroid,
161 INTERP_MODE_NOPERSPECTIVE);
162
163 case SYSTEM_VALUE_BARYCENTRIC_LINEAR_SAMPLE:
164 return nir_load_barycentric(b, nir_intrinsic_load_barycentric_sample,
165 INTERP_MODE_NOPERSPECTIVE);
166
167 case SYSTEM_VALUE_BARYCENTRIC_PERSP_PIXEL:
168 return nir_load_barycentric(b, nir_intrinsic_load_barycentric_pixel,
169 INTERP_MODE_SMOOTH);
170
171 case SYSTEM_VALUE_BARYCENTRIC_PERSP_CENTROID:
172 return nir_load_barycentric(b, nir_intrinsic_load_barycentric_centroid,
173 INTERP_MODE_SMOOTH);
174
175 case SYSTEM_VALUE_BARYCENTRIC_PERSP_SAMPLE:
176 return nir_load_barycentric(b, nir_intrinsic_load_barycentric_sample,
177 INTERP_MODE_SMOOTH);
178
179 case SYSTEM_VALUE_BARYCENTRIC_PULL_MODEL:
180 return nir_load_barycentric(b, nir_intrinsic_load_barycentric_model,
181 INTERP_MODE_NONE);
182
183 default:
184 break;
185 }
186
187 nir_intrinsic_op sysval_op =
188 nir_intrinsic_from_system_value(var->data.location);
189 return nir_load_system_value(b, sysval_op, 0,
190 intrin->dest.ssa.num_components,
191 intrin->dest.ssa.bit_size);
192 }
193
194 default:
195 return NULL;
196 }
197 }
198
199 bool
200 nir_lower_system_values(nir_shader *shader)
201 {
202 bool progress = nir_shader_lower_instructions(shader,
203 lower_system_value_filter,
204 lower_system_value_instr,
205 NULL);
206
207 /* We're going to delete the variables so we need to clean up all those
208 * derefs we left lying around.
209 */
210 if (progress)
211 nir_remove_dead_derefs(shader);
212
213 nir_foreach_variable_with_modes_safe(var, shader, nir_var_system_value)
214 exec_node_remove(&var->node);
215
216 return progress;
217 }
218
219 static bool
220 lower_compute_system_value_filter(const nir_instr *instr, const void *_options)
221 {
222 return instr->type == nir_instr_type_intrinsic;
223 }
224
225 static nir_ssa_def *
226 lower_compute_system_value_instr(nir_builder *b,
227 nir_instr *instr, void *_options)
228 {
229 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
230 const nir_lower_compute_system_values_options *options = _options;
231
232 /* All the intrinsics we care about are loads */
233 if (!nir_intrinsic_infos[intrin->intrinsic].has_dest)
234 return NULL;
235
236 assert(intrin->dest.is_ssa);
237 const unsigned bit_size = intrin->dest.ssa.bit_size;
238
239 switch (intrin->intrinsic) {
240 case nir_intrinsic_load_local_invocation_id:
241 /* If lower_cs_local_id_from_index is true, then we derive the local
242 * index from the local id.
243 */
244 if (b->shader->options->lower_cs_local_id_from_index) {
245 /* We lower gl_LocalInvocationID from gl_LocalInvocationIndex based
246 * on this formula:
247 *
248 * gl_LocalInvocationID.x =
249 * gl_LocalInvocationIndex % gl_WorkGroupSize.x;
250 * gl_LocalInvocationID.y =
251 * (gl_LocalInvocationIndex / gl_WorkGroupSize.x) %
252 * gl_WorkGroupSize.y;
253 * gl_LocalInvocationID.z =
254 * (gl_LocalInvocationIndex /
255 * (gl_WorkGroupSize.x * gl_WorkGroupSize.y)) %
256 * gl_WorkGroupSize.z;
257 *
258 * However, the final % gl_WorkGroupSize.z does nothing unless we
259 * accidentally end up with a gl_LocalInvocationIndex that is too
260 * large so it can safely be omitted.
261 */
262 nir_ssa_def *local_index = nir_load_local_invocation_index(b);
263 nir_ssa_def *local_size = nir_load_local_group_size(b);
264
265 /* Because no hardware supports a local workgroup size greater than
266 * about 1K, this calculation can be done in 32-bit and can save some
267 * 64-bit arithmetic.
268 */
269 nir_ssa_def *id_x, *id_y, *id_z;
270 id_x = nir_umod(b, local_index,
271 nir_channel(b, local_size, 0));
272 id_y = nir_umod(b, nir_udiv(b, local_index,
273 nir_channel(b, local_size, 0)),
274 nir_channel(b, local_size, 1));
275 id_z = nir_udiv(b, local_index,
276 nir_imul(b, nir_channel(b, local_size, 0),
277 nir_channel(b, local_size, 1)));
278 return nir_u2u(b, nir_vec3(b, id_x, id_y, id_z), bit_size);
279 } else {
280 return NULL;
281 }
282
283 case nir_intrinsic_load_local_invocation_index:
284 /* If lower_cs_local_index_from_id is true, then we derive the local
285 * index from the local id.
286 */
287 if (b->shader->options->lower_cs_local_index_from_id) {
288 /* From the GLSL man page for gl_LocalInvocationIndex:
289 *
290 * "The value of gl_LocalInvocationIndex is equal to
291 * gl_LocalInvocationID.z * gl_WorkGroupSize.x *
292 * gl_WorkGroupSize.y + gl_LocalInvocationID.y *
293 * gl_WorkGroupSize.x + gl_LocalInvocationID.x"
294 */
295 nir_ssa_def *local_id = nir_load_local_invocation_id(b);
296
297 nir_ssa_def *size_x =
298 nir_imm_int(b, b->shader->info.cs.local_size[0]);
299 nir_ssa_def *size_y =
300 nir_imm_int(b, b->shader->info.cs.local_size[1]);
301
302 /* Because no hardware supports a local workgroup size greater than
303 * about 1K, this calculation can be done in 32-bit and can save some
304 * 64-bit arithmetic.
305 */
306 nir_ssa_def *index;
307 index = nir_imul(b, nir_channel(b, local_id, 2),
308 nir_imul(b, size_x, size_y));
309 index = nir_iadd(b, index,
310 nir_imul(b, nir_channel(b, local_id, 1), size_x));
311 index = nir_iadd(b, index, nir_channel(b, local_id, 0));
312 return nir_u2u(b, index, bit_size);
313 } else {
314 return NULL;
315 }
316
317 case nir_intrinsic_load_local_group_size:
318 if (b->shader->info.cs.local_size_variable) {
319 /* If the local work group size is variable it can't be lowered at
320 * this point. We do, however, have to make sure that the intrinsic
321 * is only 32-bit.
322 */
323 return NULL;
324 } else {
325 /* using a 32 bit constant is safe here as no device/driver needs more
326 * than 32 bits for the local size */
327 nir_const_value local_size_const[3];
328 memset(local_size_const, 0, sizeof(local_size_const));
329 local_size_const[0].u32 = b->shader->info.cs.local_size[0];
330 local_size_const[1].u32 = b->shader->info.cs.local_size[1];
331 local_size_const[2].u32 = b->shader->info.cs.local_size[2];
332 return nir_u2u(b, nir_build_imm(b, 3, 32, local_size_const), bit_size);
333 }
334
335 case nir_intrinsic_load_global_invocation_id_zero_base: {
336 if ((options && options->has_base_work_group_id) ||
337 !b->shader->options->has_cs_global_id) {
338 nir_ssa_def *group_size = nir_load_local_group_size(b);
339 nir_ssa_def *group_id = nir_load_work_group_id(b, bit_size);
340 nir_ssa_def *local_id = nir_load_local_invocation_id(b);
341
342 return nir_iadd(b, nir_imul(b, group_id,
343 nir_u2u(b, group_size, bit_size)),
344 nir_u2u(b, local_id, bit_size));
345 } else {
346 return NULL;
347 }
348 }
349
350 case nir_intrinsic_load_global_invocation_id: {
351 if (options && options->has_base_global_invocation_id)
352 return nir_iadd(b, nir_load_global_invocation_id_zero_base(b, bit_size),
353 nir_load_base_global_invocation_id(b, bit_size));
354 else if (!b->shader->options->has_cs_global_id)
355 return nir_load_global_invocation_id_zero_base(b, bit_size);
356 else
357 return NULL;
358 }
359
360 case nir_intrinsic_load_global_invocation_index: {
361 /* OpenCL's global_linear_id explicitly removes the global offset before computing this */
362 assert(b->shader->info.stage == MESA_SHADER_KERNEL);
363 nir_ssa_def *global_base_id = nir_load_base_global_invocation_id(b, bit_size);
364 nir_ssa_def *global_id = nir_isub(b, nir_load_global_invocation_id(b, bit_size), global_base_id);
365 nir_ssa_def *global_size = build_global_group_size(b, bit_size);
366
367 /* index = id.x + ((id.y + (id.z * size.y)) * size.x) */
368 nir_ssa_def *index;
369 index = nir_imul(b, nir_channel(b, global_id, 2),
370 nir_channel(b, global_size, 1));
371 index = nir_iadd(b, nir_channel(b, global_id, 1), index);
372 index = nir_imul(b, nir_channel(b, global_size, 0), index);
373 index = nir_iadd(b, nir_channel(b, global_id, 0), index);
374 return index;
375 }
376
377 case nir_intrinsic_load_work_group_id: {
378 if (options && options->has_base_work_group_id)
379 return nir_iadd(b, nir_u2u(b, nir_load_work_group_id_zero_base(b), bit_size),
380 nir_load_base_work_group_id(b, bit_size));
381 else
382 return NULL;
383 }
384
385 default:
386 return NULL;
387 }
388 }
389
390 bool
391 nir_lower_compute_system_values(nir_shader *shader,
392 const nir_lower_compute_system_values_options *options)
393 {
394 if (shader->info.stage != MESA_SHADER_COMPUTE &&
395 shader->info.stage != MESA_SHADER_KERNEL)
396 return false;
397
398 return nir_shader_lower_instructions(shader,
399 lower_compute_system_value_filter,
400 lower_compute_system_value_instr,
401 (void*)options);
402 }