87ee919710d545a900c3176ab4432350ad462cb2
[mesa.git] / src / compiler / nir / nir_range_analysis.c
1 /*
2 * Copyright © 2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23 #include <math.h>
24 #include <float.h>
25 #include "nir.h"
26 #include "nir_range_analysis.h"
27 #include "util/hash_table.h"
28
29 /**
30 * Analyzes a sequence of operations to determine some aspects of the range of
31 * the result.
32 */
33
34 static bool
35 is_not_negative(enum ssa_ranges r)
36 {
37 return r == gt_zero || r == ge_zero || r == eq_zero;
38 }
39
40 static void *
41 pack_data(const struct ssa_result_range r)
42 {
43 return (void *)(uintptr_t)(r.range | r.is_integral << 8);
44 }
45
46 static struct ssa_result_range
47 unpack_data(const void *p)
48 {
49 const uintptr_t v = (uintptr_t) p;
50
51 return (struct ssa_result_range){v & 0xff, (v & 0x0ff00) != 0};
52 }
53
54 static void *
55 pack_key(const struct nir_alu_instr *instr, nir_alu_type type)
56 {
57 uintptr_t type_encoding;
58 uintptr_t ptr = (uintptr_t) instr;
59
60 /* The low 2 bits have to be zero or this whole scheme falls apart. */
61 assert((ptr & 0x3) == 0);
62
63 /* NIR is typeless in the sense that sequences of bits have whatever
64 * meaning is attached to them by the instruction that consumes them.
65 * However, the number of bits must match between producer and consumer.
66 * As a result, the number of bits does not need to be encoded here.
67 */
68 switch (nir_alu_type_get_base_type(type)) {
69 case nir_type_int: type_encoding = 0; break;
70 case nir_type_uint: type_encoding = 1; break;
71 case nir_type_bool: type_encoding = 2; break;
72 case nir_type_float: type_encoding = 3; break;
73 default: unreachable("Invalid base type.");
74 }
75
76 return (void *)(ptr | type_encoding);
77 }
78
79 static nir_alu_type
80 nir_alu_src_type(const nir_alu_instr *instr, unsigned src)
81 {
82 return nir_alu_type_get_base_type(nir_op_infos[instr->op].input_types[src]) |
83 nir_src_bit_size(instr->src[src].src);
84 }
85
86 static struct ssa_result_range
87 analyze_constant(const struct nir_alu_instr *instr, unsigned src,
88 nir_alu_type use_type)
89 {
90 uint8_t swizzle[4] = { 0, 1, 2, 3 };
91
92 /* If the source is an explicitly sized source, then we need to reset
93 * both the number of components and the swizzle.
94 */
95 const unsigned num_components = nir_ssa_alu_instr_src_components(instr, src);
96
97 for (unsigned i = 0; i < num_components; ++i)
98 swizzle[i] = instr->src[src].swizzle[i];
99
100 const nir_load_const_instr *const load =
101 nir_instr_as_load_const(instr->src[src].src.ssa->parent_instr);
102
103 struct ssa_result_range r = { unknown, false };
104
105 switch (nir_alu_type_get_base_type(use_type)) {
106 case nir_type_float: {
107 double min_value = DBL_MAX;
108 double max_value = -DBL_MAX;
109 bool any_zero = false;
110 bool all_zero = true;
111
112 r.is_integral = true;
113
114 for (unsigned i = 0; i < num_components; ++i) {
115 const double v = nir_const_value_as_float(load->value[swizzle[i]],
116 load->def.bit_size);
117
118 if (floor(v) != v)
119 r.is_integral = false;
120
121 any_zero = any_zero || (v == 0.0);
122 all_zero = all_zero && (v == 0.0);
123 min_value = MIN2(min_value, v);
124 max_value = MAX2(max_value, v);
125 }
126
127 assert(any_zero >= all_zero);
128 assert(isnan(max_value) || max_value >= min_value);
129
130 if (all_zero)
131 r.range = eq_zero;
132 else if (min_value > 0.0)
133 r.range = gt_zero;
134 else if (min_value == 0.0)
135 r.range = ge_zero;
136 else if (max_value < 0.0)
137 r.range = lt_zero;
138 else if (max_value == 0.0)
139 r.range = le_zero;
140 else if (!any_zero)
141 r.range = ne_zero;
142 else
143 r.range = unknown;
144
145 return r;
146 }
147
148 case nir_type_int:
149 case nir_type_bool: {
150 int64_t min_value = INT_MAX;
151 int64_t max_value = INT_MIN;
152 bool any_zero = false;
153 bool all_zero = true;
154
155 for (unsigned i = 0; i < num_components; ++i) {
156 const int64_t v = nir_const_value_as_int(load->value[swizzle[i]],
157 load->def.bit_size);
158
159 any_zero = any_zero || (v == 0);
160 all_zero = all_zero && (v == 0);
161 min_value = MIN2(min_value, v);
162 max_value = MAX2(max_value, v);
163 }
164
165 assert(any_zero >= all_zero);
166 assert(max_value >= min_value);
167
168 if (all_zero)
169 r.range = eq_zero;
170 else if (min_value > 0)
171 r.range = gt_zero;
172 else if (min_value == 0)
173 r.range = ge_zero;
174 else if (max_value < 0)
175 r.range = lt_zero;
176 else if (max_value == 0)
177 r.range = le_zero;
178 else if (!any_zero)
179 r.range = ne_zero;
180 else
181 r.range = unknown;
182
183 return r;
184 }
185
186 case nir_type_uint: {
187 bool any_zero = false;
188 bool all_zero = true;
189
190 for (unsigned i = 0; i < num_components; ++i) {
191 const uint64_t v = nir_const_value_as_uint(load->value[swizzle[i]],
192 load->def.bit_size);
193
194 any_zero = any_zero || (v == 0);
195 all_zero = all_zero && (v == 0);
196 }
197
198 assert(any_zero >= all_zero);
199
200 if (all_zero)
201 r.range = eq_zero;
202 else if (any_zero)
203 r.range = ge_zero;
204 else
205 r.range = gt_zero;
206
207 return r;
208 }
209
210 default:
211 unreachable("Invalid alu source type");
212 }
213 }
214
215 /**
216 * Short-hand name for use in the tables in analyze_expression. If this name
217 * becomes a problem on some compiler, we can change it to _.
218 */
219 #define _______ unknown
220
221
222 #if defined(__clang__)
223 /* clang wants _Pragma("unroll X") */
224 #define pragma_unroll_5 _Pragma("unroll 5")
225 #define pragma_unroll_7 _Pragma("unroll 7")
226 /* gcc wants _Pragma("GCC unroll X") */
227 #elif defined(__GNUC__)
228 #if __GNUC__ >= 8
229 #define pragma_unroll_5 _Pragma("GCC unroll 5")
230 #define pragma_unroll_7 _Pragma("GCC unroll 7")
231 #else
232 #pragma GCC optimize ("unroll-loops")
233 #define pragma_unroll_5
234 #define pragma_unroll_7
235 #endif
236 #else
237 /* MSVC doesn't have C99's _Pragma() */
238 #define pragma_unroll_5
239 #define pragma_unroll_7
240 #endif
241
242
243 #ifndef NDEBUG
244 #define ASSERT_TABLE_IS_COMMUTATIVE(t) \
245 do { \
246 static bool first = true; \
247 if (first) { \
248 first = false; \
249 pragma_unroll_7 \
250 for (unsigned r = 0; r < ARRAY_SIZE(t); r++) { \
251 pragma_unroll_7 \
252 for (unsigned c = 0; c < ARRAY_SIZE(t[0]); c++) \
253 assert(t[r][c] == t[c][r]); \
254 } \
255 } \
256 } while (false)
257
258 #define ASSERT_TABLE_IS_DIAGONAL(t) \
259 do { \
260 static bool first = true; \
261 if (first) { \
262 first = false; \
263 pragma_unroll_7 \
264 for (unsigned r = 0; r < ARRAY_SIZE(t); r++) \
265 assert(t[r][r] == r); \
266 } \
267 } while (false)
268
269 static enum ssa_ranges
270 union_ranges(enum ssa_ranges a, enum ssa_ranges b)
271 {
272 static const enum ssa_ranges union_table[last_range + 1][last_range + 1] = {
273 /* left\right unknown lt_zero le_zero gt_zero ge_zero ne_zero eq_zero */
274 /* unknown */ { _______, _______, _______, _______, _______, _______, _______ },
275 /* lt_zero */ { _______, lt_zero, le_zero, ne_zero, _______, ne_zero, le_zero },
276 /* le_zero */ { _______, le_zero, le_zero, _______, _______, _______, le_zero },
277 /* gt_zero */ { _______, ne_zero, _______, gt_zero, ge_zero, ne_zero, ge_zero },
278 /* ge_zero */ { _______, _______, _______, ge_zero, ge_zero, _______, ge_zero },
279 /* ne_zero */ { _______, ne_zero, _______, ne_zero, _______, ne_zero, _______ },
280 /* eq_zero */ { _______, le_zero, le_zero, ge_zero, ge_zero, _______, eq_zero },
281 };
282
283 ASSERT_TABLE_IS_COMMUTATIVE(union_table);
284 ASSERT_TABLE_IS_DIAGONAL(union_table);
285
286 return union_table[a][b];
287 }
288
289 /* Verify that the 'unknown' entry in each row (or column) of the table is the
290 * union of all the other values in the row (or column).
291 */
292 #define ASSERT_UNION_OF_OTHERS_MATCHES_UNKNOWN_2_SOURCE(t) \
293 do { \
294 static bool first = true; \
295 if (first) { \
296 first = false; \
297 pragma_unroll_7 \
298 for (unsigned i = 0; i < last_range; i++) { \
299 enum ssa_ranges col_range = t[i][unknown + 1]; \
300 enum ssa_ranges row_range = t[unknown + 1][i]; \
301 \
302 pragma_unroll_5 \
303 for (unsigned j = unknown + 2; j < last_range; j++) { \
304 col_range = union_ranges(col_range, t[i][j]); \
305 row_range = union_ranges(row_range, t[j][i]); \
306 } \
307 \
308 assert(col_range == t[i][unknown]); \
309 assert(row_range == t[unknown][i]); \
310 } \
311 } \
312 } while (false)
313
314 /* For most operations, the union of ranges for a strict inequality and
315 * equality should be the range of the non-strict inequality (e.g.,
316 * union_ranges(range(op(lt_zero), range(op(eq_zero))) == range(op(le_zero)).
317 *
318 * Does not apply to selection-like opcodes (bcsel, fmin, fmax, etc.).
319 */
320 #define ASSERT_UNION_OF_EQ_AND_STRICT_INEQ_MATCHES_NONSTRICT_1_SOURCE(t) \
321 do { \
322 assert(union_ranges(t[lt_zero], t[eq_zero]) == t[le_zero]); \
323 assert(union_ranges(t[gt_zero], t[eq_zero]) == t[ge_zero]); \
324 } while (false)
325
326 #define ASSERT_UNION_OF_EQ_AND_STRICT_INEQ_MATCHES_NONSTRICT_2_SOURCE(t) \
327 do { \
328 static bool first = true; \
329 if (first) { \
330 first = false; \
331 pragma_unroll_7 \
332 for (unsigned i = 0; i < last_range; i++) { \
333 assert(union_ranges(t[i][lt_zero], t[i][eq_zero]) == t[i][le_zero]); \
334 assert(union_ranges(t[i][gt_zero], t[i][eq_zero]) == t[i][ge_zero]); \
335 assert(union_ranges(t[lt_zero][i], t[eq_zero][i]) == t[le_zero][i]); \
336 assert(union_ranges(t[gt_zero][i], t[eq_zero][i]) == t[ge_zero][i]); \
337 } \
338 } \
339 } while (false)
340
341 /* Several other unordered tuples span the range of "everything." Each should
342 * have the same value as unknown: (lt_zero, ge_zero), (le_zero, gt_zero), and
343 * (eq_zero, ne_zero). union_ranges is already commutative, so only one
344 * ordering needs to be checked.
345 *
346 * Does not apply to selection-like opcodes (bcsel, fmin, fmax, etc.).
347 *
348 * In cases where this can be used, it is unnecessary to also use
349 * ASSERT_UNION_OF_OTHERS_MATCHES_UNKNOWN_*_SOURCE. For any range X,
350 * union_ranges(X, X) == X. The disjoint ranges cover all of the non-unknown
351 * possibilities, so the union of all the unions of disjoint ranges is
352 * equivalent to the union of "others."
353 */
354 #define ASSERT_UNION_OF_DISJOINT_MATCHES_UNKNOWN_1_SOURCE(t) \
355 do { \
356 assert(union_ranges(t[lt_zero], t[ge_zero]) == t[unknown]); \
357 assert(union_ranges(t[le_zero], t[gt_zero]) == t[unknown]); \
358 assert(union_ranges(t[eq_zero], t[ne_zero]) == t[unknown]); \
359 } while (false)
360
361 #define ASSERT_UNION_OF_DISJOINT_MATCHES_UNKNOWN_2_SOURCE(t) \
362 do { \
363 static bool first = true; \
364 if (first) { \
365 first = false; \
366 pragma_unroll_7 \
367 for (unsigned i = 0; i < last_range; i++) { \
368 assert(union_ranges(t[i][lt_zero], t[i][ge_zero]) == \
369 t[i][unknown]); \
370 assert(union_ranges(t[i][le_zero], t[i][gt_zero]) == \
371 t[i][unknown]); \
372 assert(union_ranges(t[i][eq_zero], t[i][ne_zero]) == \
373 t[i][unknown]); \
374 \
375 assert(union_ranges(t[lt_zero][i], t[ge_zero][i]) == \
376 t[unknown][i]); \
377 assert(union_ranges(t[le_zero][i], t[gt_zero][i]) == \
378 t[unknown][i]); \
379 assert(union_ranges(t[eq_zero][i], t[ne_zero][i]) == \
380 t[unknown][i]); \
381 } \
382 } \
383 } while (false)
384
385 #else
386 #define ASSERT_TABLE_IS_COMMUTATIVE(t)
387 #define ASSERT_TABLE_IS_DIAGONAL(t)
388 #define ASSERT_UNION_OF_OTHERS_MATCHES_UNKNOWN_2_SOURCE(t)
389 #define ASSERT_UNION_OF_EQ_AND_STRICT_INEQ_MATCHES_NONSTRICT_1_SOURCE(t)
390 #define ASSERT_UNION_OF_EQ_AND_STRICT_INEQ_MATCHES_NONSTRICT_2_SOURCE(t)
391 #define ASSERT_UNION_OF_DISJOINT_MATCHES_UNKNOWN_1_SOURCE(t)
392 #define ASSERT_UNION_OF_DISJOINT_MATCHES_UNKNOWN_2_SOURCE(t)
393 #endif
394
395 /**
396 * Analyze an expression to determine the range of its result
397 *
398 * The end result of this analysis is a token that communicates something
399 * about the range of values. There's an implicit grammar that produces
400 * tokens from sequences of literal values, other tokens, and operations.
401 * This function implements this grammar as a recursive-descent parser. Some
402 * (but not all) of the grammar is listed in-line in the function.
403 */
404 static struct ssa_result_range
405 analyze_expression(const nir_alu_instr *instr, unsigned src,
406 struct hash_table *ht, nir_alu_type use_type)
407 {
408 /* Ensure that the _Pragma("GCC unroll 7") above are correct. */
409 STATIC_ASSERT(last_range + 1 == 7);
410
411 if (!instr->src[src].src.is_ssa)
412 return (struct ssa_result_range){unknown, false};
413
414 if (nir_src_is_const(instr->src[src].src))
415 return analyze_constant(instr, src, use_type);
416
417 if (instr->src[src].src.ssa->parent_instr->type != nir_instr_type_alu)
418 return (struct ssa_result_range){unknown, false};
419
420 const struct nir_alu_instr *const alu =
421 nir_instr_as_alu(instr->src[src].src.ssa->parent_instr);
422
423 /* Bail if the type of the instruction generating the value does not match
424 * the type the value will be interpreted as. int/uint/bool can be
425 * reinterpreted trivially. The most important cases are between float and
426 * non-float.
427 */
428 if (alu->op != nir_op_mov && alu->op != nir_op_bcsel) {
429 const nir_alu_type use_base_type =
430 nir_alu_type_get_base_type(use_type);
431 const nir_alu_type src_base_type =
432 nir_alu_type_get_base_type(nir_op_infos[alu->op].output_type);
433
434 if (use_base_type != src_base_type &&
435 (use_base_type == nir_type_float ||
436 src_base_type == nir_type_float)) {
437 return (struct ssa_result_range){unknown, false};
438 }
439 }
440
441 struct hash_entry *he = _mesa_hash_table_search(ht, pack_key(alu, use_type));
442 if (he != NULL)
443 return unpack_data(he->data);
444
445 struct ssa_result_range r = {unknown, false};
446
447 /* ge_zero: ge_zero + ge_zero
448 *
449 * gt_zero: gt_zero + eq_zero
450 * | gt_zero + ge_zero
451 * | eq_zero + gt_zero # Addition is commutative
452 * | ge_zero + gt_zero # Addition is commutative
453 * | gt_zero + gt_zero
454 * ;
455 *
456 * le_zero: le_zero + le_zero
457 *
458 * lt_zero: lt_zero + eq_zero
459 * | lt_zero + le_zero
460 * | eq_zero + lt_zero # Addition is commutative
461 * | le_zero + lt_zero # Addition is commutative
462 * | lt_zero + lt_zero
463 * ;
464 *
465 * ne_zero: eq_zero + ne_zero
466 * | ne_zero + eq_zero # Addition is commutative
467 * ;
468 *
469 * eq_zero: eq_zero + eq_zero
470 * ;
471 *
472 * All other cases are 'unknown'. The seeming odd entry is (ne_zero,
473 * ne_zero), but that could be (-5, +5) which is not ne_zero.
474 */
475 static const enum ssa_ranges fadd_table[last_range + 1][last_range + 1] = {
476 /* left\right unknown lt_zero le_zero gt_zero ge_zero ne_zero eq_zero */
477 /* unknown */ { _______, _______, _______, _______, _______, _______, _______ },
478 /* lt_zero */ { _______, lt_zero, lt_zero, _______, _______, _______, lt_zero },
479 /* le_zero */ { _______, lt_zero, le_zero, _______, _______, _______, le_zero },
480 /* gt_zero */ { _______, _______, _______, gt_zero, gt_zero, _______, gt_zero },
481 /* ge_zero */ { _______, _______, _______, gt_zero, ge_zero, _______, ge_zero },
482 /* ne_zero */ { _______, _______, _______, _______, _______, _______, ne_zero },
483 /* eq_zero */ { _______, lt_zero, le_zero, gt_zero, ge_zero, ne_zero, eq_zero },
484 };
485
486 ASSERT_TABLE_IS_COMMUTATIVE(fadd_table);
487 ASSERT_UNION_OF_DISJOINT_MATCHES_UNKNOWN_2_SOURCE(fadd_table);
488 ASSERT_UNION_OF_EQ_AND_STRICT_INEQ_MATCHES_NONSTRICT_2_SOURCE(fadd_table);
489
490 /* Due to flush-to-zero semanatics of floating-point numbers with very
491 * small mangnitudes, we can never really be sure a result will be
492 * non-zero.
493 *
494 * ge_zero: ge_zero * ge_zero
495 * | ge_zero * gt_zero
496 * | ge_zero * eq_zero
497 * | le_zero * lt_zero
498 * | lt_zero * le_zero # Multiplication is commutative
499 * | le_zero * le_zero
500 * | gt_zero * ge_zero # Multiplication is commutative
501 * | eq_zero * ge_zero # Multiplication is commutative
502 * | a * a # Left source == right source
503 * | gt_zero * gt_zero
504 * | lt_zero * lt_zero
505 * ;
506 *
507 * le_zero: ge_zero * le_zero
508 * | ge_zero * lt_zero
509 * | lt_zero * ge_zero # Multiplication is commutative
510 * | le_zero * ge_zero # Multiplication is commutative
511 * | le_zero * gt_zero
512 * | lt_zero * gt_zero
513 * | gt_zero * lt_zero # Multiplication is commutative
514 * ;
515 *
516 * eq_zero: eq_zero * <any>
517 * <any> * eq_zero # Multiplication is commutative
518 *
519 * All other cases are 'unknown'.
520 */
521 static const enum ssa_ranges fmul_table[last_range + 1][last_range + 1] = {
522 /* left\right unknown lt_zero le_zero gt_zero ge_zero ne_zero eq_zero */
523 /* unknown */ { _______, _______, _______, _______, _______, _______, eq_zero },
524 /* lt_zero */ { _______, ge_zero, ge_zero, le_zero, le_zero, _______, eq_zero },
525 /* le_zero */ { _______, ge_zero, ge_zero, le_zero, le_zero, _______, eq_zero },
526 /* gt_zero */ { _______, le_zero, le_zero, ge_zero, ge_zero, _______, eq_zero },
527 /* ge_zero */ { _______, le_zero, le_zero, ge_zero, ge_zero, _______, eq_zero },
528 /* ne_zero */ { _______, _______, _______, _______, _______, _______, eq_zero },
529 /* eq_zero */ { eq_zero, eq_zero, eq_zero, eq_zero, eq_zero, eq_zero, eq_zero }
530 };
531
532 ASSERT_TABLE_IS_COMMUTATIVE(fmul_table);
533 ASSERT_UNION_OF_DISJOINT_MATCHES_UNKNOWN_2_SOURCE(fmul_table);
534 ASSERT_UNION_OF_EQ_AND_STRICT_INEQ_MATCHES_NONSTRICT_2_SOURCE(fmul_table);
535
536 static const enum ssa_ranges fneg_table[last_range + 1] = {
537 /* unknown lt_zero le_zero gt_zero ge_zero ne_zero eq_zero */
538 _______, gt_zero, ge_zero, lt_zero, le_zero, ne_zero, eq_zero
539 };
540
541 ASSERT_UNION_OF_DISJOINT_MATCHES_UNKNOWN_1_SOURCE(fneg_table);
542 ASSERT_UNION_OF_EQ_AND_STRICT_INEQ_MATCHES_NONSTRICT_1_SOURCE(fneg_table);
543
544
545 switch (alu->op) {
546 case nir_op_b2f32:
547 case nir_op_b2i32:
548 r = (struct ssa_result_range){ge_zero, alu->op == nir_op_b2f32};
549 break;
550
551 case nir_op_bcsel: {
552 const struct ssa_result_range left =
553 analyze_expression(alu, 1, ht, use_type);
554 const struct ssa_result_range right =
555 analyze_expression(alu, 2, ht, use_type);
556
557 r.is_integral = left.is_integral && right.is_integral;
558
559 /* le_zero: bcsel(<any>, le_zero, lt_zero)
560 * | bcsel(<any>, eq_zero, lt_zero)
561 * | bcsel(<any>, le_zero, eq_zero)
562 * | bcsel(<any>, lt_zero, le_zero)
563 * | bcsel(<any>, lt_zero, eq_zero)
564 * | bcsel(<any>, eq_zero, le_zero)
565 * | bcsel(<any>, le_zero, le_zero)
566 * ;
567 *
568 * lt_zero: bcsel(<any>, lt_zero, lt_zero)
569 * ;
570 *
571 * ge_zero: bcsel(<any>, ge_zero, ge_zero)
572 * | bcsel(<any>, ge_zero, gt_zero)
573 * | bcsel(<any>, ge_zero, eq_zero)
574 * | bcsel(<any>, gt_zero, ge_zero)
575 * | bcsel(<any>, eq_zero, ge_zero)
576 * ;
577 *
578 * gt_zero: bcsel(<any>, gt_zero, gt_zero)
579 * ;
580 *
581 * ne_zero: bcsel(<any>, ne_zero, gt_zero)
582 * | bcsel(<any>, ne_zero, lt_zero)
583 * | bcsel(<any>, gt_zero, lt_zero)
584 * | bcsel(<any>, gt_zero, ne_zero)
585 * | bcsel(<any>, lt_zero, ne_zero)
586 * | bcsel(<any>, lt_zero, gt_zero)
587 * | bcsel(<any>, ne_zero, ne_zero)
588 * ;
589 *
590 * eq_zero: bcsel(<any>, eq_zero, eq_zero)
591 * ;
592 *
593 * All other cases are 'unknown'.
594 *
595 * The ranges could be tightened if the range of the first source is
596 * known. However, opt_algebraic will (eventually) elminiate the bcsel
597 * if the condition is known.
598 */
599 static const enum ssa_ranges table[last_range + 1][last_range + 1] = {
600 /* left\right unknown lt_zero le_zero gt_zero ge_zero ne_zero eq_zero */
601 /* unknown */ { _______, _______, _______, _______, _______, _______, _______ },
602 /* lt_zero */ { _______, lt_zero, le_zero, ne_zero, _______, ne_zero, le_zero },
603 /* le_zero */ { _______, le_zero, le_zero, _______, _______, _______, le_zero },
604 /* gt_zero */ { _______, ne_zero, _______, gt_zero, ge_zero, ne_zero, ge_zero },
605 /* ge_zero */ { _______, _______, _______, ge_zero, ge_zero, _______, ge_zero },
606 /* ne_zero */ { _______, ne_zero, _______, ne_zero, _______, ne_zero, _______ },
607 /* eq_zero */ { _______, le_zero, le_zero, ge_zero, ge_zero, _______, eq_zero },
608 };
609
610 ASSERT_TABLE_IS_COMMUTATIVE(table);
611 ASSERT_TABLE_IS_DIAGONAL(table);
612 ASSERT_UNION_OF_OTHERS_MATCHES_UNKNOWN_2_SOURCE(table);
613
614 r.range = table[left.range][right.range];
615 break;
616 }
617
618 case nir_op_i2f32:
619 case nir_op_u2f32:
620 r = analyze_expression(alu, 0, ht, nir_alu_src_type(alu, 0));
621
622 r.is_integral = true;
623
624 if (r.range == unknown && alu->op == nir_op_u2f32)
625 r.range = ge_zero;
626
627 break;
628
629 case nir_op_fabs:
630 r = analyze_expression(alu, 0, ht, nir_alu_src_type(alu, 0));
631
632 switch (r.range) {
633 case unknown:
634 case le_zero:
635 case ge_zero:
636 r.range = ge_zero;
637 break;
638
639 case lt_zero:
640 case gt_zero:
641 case ne_zero:
642 r.range = gt_zero;
643 break;
644
645 case eq_zero:
646 break;
647 }
648
649 break;
650
651 case nir_op_fadd: {
652 const struct ssa_result_range left =
653 analyze_expression(alu, 0, ht, nir_alu_src_type(alu, 0));
654 const struct ssa_result_range right =
655 analyze_expression(alu, 1, ht, nir_alu_src_type(alu, 1));
656
657 r.is_integral = left.is_integral && right.is_integral;
658 r.range = fadd_table[left.range][right.range];
659 break;
660 }
661
662 case nir_op_fexp2: {
663 /* If the parameter might be less than zero, the mathematically result
664 * will be on (0, 1). For sufficiently large magnitude negative
665 * parameters, the result will flush to zero.
666 */
667 static const enum ssa_ranges table[last_range + 1] = {
668 /* unknown lt_zero le_zero gt_zero ge_zero ne_zero eq_zero */
669 ge_zero, ge_zero, ge_zero, gt_zero, gt_zero, ge_zero, gt_zero
670 };
671
672 r = analyze_expression(alu, 0, ht, nir_alu_src_type(alu, 0));
673
674 ASSERT_UNION_OF_DISJOINT_MATCHES_UNKNOWN_1_SOURCE(table);
675 ASSERT_UNION_OF_EQ_AND_STRICT_INEQ_MATCHES_NONSTRICT_1_SOURCE(table);
676
677 r.is_integral = r.is_integral && is_not_negative(r.range);
678 r.range = table[r.range];
679 break;
680 }
681
682 case nir_op_fmax: {
683 const struct ssa_result_range left =
684 analyze_expression(alu, 0, ht, nir_alu_src_type(alu, 0));
685 const struct ssa_result_range right =
686 analyze_expression(alu, 1, ht, nir_alu_src_type(alu, 1));
687
688 r.is_integral = left.is_integral && right.is_integral;
689
690 /* gt_zero: fmax(gt_zero, *)
691 * | fmax(*, gt_zero) # Treat fmax as commutative
692 * ;
693 *
694 * ge_zero: fmax(ge_zero, ne_zero)
695 * | fmax(ge_zero, lt_zero)
696 * | fmax(ge_zero, le_zero)
697 * | fmax(ge_zero, eq_zero)
698 * | fmax(ne_zero, ge_zero) # Treat fmax as commutative
699 * | fmax(lt_zero, ge_zero) # Treat fmax as commutative
700 * | fmax(le_zero, ge_zero) # Treat fmax as commutative
701 * | fmax(eq_zero, ge_zero) # Treat fmax as commutative
702 * | fmax(ge_zero, ge_zero)
703 * ;
704 *
705 * le_zero: fmax(le_zero, lt_zero)
706 * | fmax(lt_zero, le_zero) # Treat fmax as commutative
707 * | fmax(le_zero, le_zero)
708 * ;
709 *
710 * lt_zero: fmax(lt_zero, lt_zero)
711 * ;
712 *
713 * ne_zero: fmax(ne_zero, lt_zero)
714 * | fmax(lt_zero, ne_zero) # Treat fmax as commutative
715 * | fmax(ne_zero, ne_zero)
716 * ;
717 *
718 * eq_zero: fmax(eq_zero, le_zero)
719 * | fmax(eq_zero, lt_zero)
720 * | fmax(le_zero, eq_zero) # Treat fmax as commutative
721 * | fmax(lt_zero, eq_zero) # Treat fmax as commutative
722 * | fmax(eq_zero, eq_zero)
723 * ;
724 *
725 * All other cases are 'unknown'.
726 */
727 static const enum ssa_ranges table[last_range + 1][last_range + 1] = {
728 /* left\right unknown lt_zero le_zero gt_zero ge_zero ne_zero eq_zero */
729 /* unknown */ { _______, _______, _______, gt_zero, ge_zero, _______, _______ },
730 /* lt_zero */ { _______, lt_zero, le_zero, gt_zero, ge_zero, ne_zero, eq_zero },
731 /* le_zero */ { _______, le_zero, le_zero, gt_zero, ge_zero, _______, eq_zero },
732 /* gt_zero */ { gt_zero, gt_zero, gt_zero, gt_zero, gt_zero, gt_zero, gt_zero },
733 /* ge_zero */ { ge_zero, ge_zero, ge_zero, gt_zero, ge_zero, ge_zero, ge_zero },
734 /* ne_zero */ { _______, ne_zero, _______, gt_zero, ge_zero, ne_zero, _______ },
735 /* eq_zero */ { _______, eq_zero, eq_zero, gt_zero, ge_zero, _______, eq_zero }
736 };
737
738 /* Treat fmax as commutative. */
739 ASSERT_TABLE_IS_COMMUTATIVE(table);
740 ASSERT_TABLE_IS_DIAGONAL(table);
741 ASSERT_UNION_OF_OTHERS_MATCHES_UNKNOWN_2_SOURCE(table);
742
743 r.range = table[left.range][right.range];
744 break;
745 }
746
747 case nir_op_fmin: {
748 const struct ssa_result_range left =
749 analyze_expression(alu, 0, ht, nir_alu_src_type(alu, 0));
750 const struct ssa_result_range right =
751 analyze_expression(alu, 1, ht, nir_alu_src_type(alu, 1));
752
753 r.is_integral = left.is_integral && right.is_integral;
754
755 /* lt_zero: fmin(lt_zero, *)
756 * | fmin(*, lt_zero) # Treat fmin as commutative
757 * ;
758 *
759 * le_zero: fmin(le_zero, ne_zero)
760 * | fmin(le_zero, gt_zero)
761 * | fmin(le_zero, ge_zero)
762 * | fmin(le_zero, eq_zero)
763 * | fmin(ne_zero, le_zero) # Treat fmin as commutative
764 * | fmin(gt_zero, le_zero) # Treat fmin as commutative
765 * | fmin(ge_zero, le_zero) # Treat fmin as commutative
766 * | fmin(eq_zero, le_zero) # Treat fmin as commutative
767 * | fmin(le_zero, le_zero)
768 * ;
769 *
770 * ge_zero: fmin(ge_zero, gt_zero)
771 * | fmin(gt_zero, ge_zero) # Treat fmin as commutative
772 * | fmin(ge_zero, ge_zero)
773 * ;
774 *
775 * gt_zero: fmin(gt_zero, gt_zero)
776 * ;
777 *
778 * ne_zero: fmin(ne_zero, gt_zero)
779 * | fmin(gt_zero, ne_zero) # Treat fmin as commutative
780 * | fmin(ne_zero, ne_zero)
781 * ;
782 *
783 * eq_zero: fmin(eq_zero, ge_zero)
784 * | fmin(eq_zero, gt_zero)
785 * | fmin(ge_zero, eq_zero) # Treat fmin as commutative
786 * | fmin(gt_zero, eq_zero) # Treat fmin as commutative
787 * | fmin(eq_zero, eq_zero)
788 * ;
789 *
790 * All other cases are 'unknown'.
791 */
792 static const enum ssa_ranges table[last_range + 1][last_range + 1] = {
793 /* left\right unknown lt_zero le_zero gt_zero ge_zero ne_zero eq_zero */
794 /* unknown */ { _______, lt_zero, le_zero, _______, _______, _______, _______ },
795 /* lt_zero */ { lt_zero, lt_zero, lt_zero, lt_zero, lt_zero, lt_zero, lt_zero },
796 /* le_zero */ { le_zero, lt_zero, le_zero, le_zero, le_zero, le_zero, le_zero },
797 /* gt_zero */ { _______, lt_zero, le_zero, gt_zero, ge_zero, ne_zero, eq_zero },
798 /* ge_zero */ { _______, lt_zero, le_zero, ge_zero, ge_zero, _______, eq_zero },
799 /* ne_zero */ { _______, lt_zero, le_zero, ne_zero, _______, ne_zero, _______ },
800 /* eq_zero */ { _______, lt_zero, le_zero, eq_zero, eq_zero, _______, eq_zero }
801 };
802
803 /* Treat fmin as commutative. */
804 ASSERT_TABLE_IS_COMMUTATIVE(table);
805 ASSERT_TABLE_IS_DIAGONAL(table);
806 ASSERT_UNION_OF_OTHERS_MATCHES_UNKNOWN_2_SOURCE(table);
807
808 r.range = table[left.range][right.range];
809 break;
810 }
811
812 case nir_op_fmul: {
813 const struct ssa_result_range left =
814 analyze_expression(alu, 0, ht, nir_alu_src_type(alu, 0));
815 const struct ssa_result_range right =
816 analyze_expression(alu, 1, ht, nir_alu_src_type(alu, 1));
817
818 r.is_integral = left.is_integral && right.is_integral;
819
820 /* x * x => ge_zero */
821 if (left.range != eq_zero && nir_alu_srcs_equal(alu, alu, 0, 1)) {
822 /* Even if x > 0, the result of x*x can be zero when x is, for
823 * example, a subnormal number.
824 */
825 r.range = ge_zero;
826 } else if (left.range != eq_zero && nir_alu_srcs_negative_equal(alu, alu, 0, 1)) {
827 /* -x * x => le_zero. */
828 r.range = le_zero;
829 } else
830 r.range = fmul_table[left.range][right.range];
831
832 break;
833 }
834
835 case nir_op_frcp:
836 r = (struct ssa_result_range){
837 analyze_expression(alu, 0, ht, nir_alu_src_type(alu, 0)).range,
838 false
839 };
840 break;
841
842 case nir_op_mov:
843 r = analyze_expression(alu, 0, ht, use_type);
844 break;
845
846 case nir_op_fneg:
847 r = analyze_expression(alu, 0, ht, nir_alu_src_type(alu, 0));
848
849 r.range = fneg_table[r.range];
850 break;
851
852 case nir_op_fsat:
853 r = analyze_expression(alu, 0, ht, nir_alu_src_type(alu, 0));
854
855 switch (r.range) {
856 case le_zero:
857 case lt_zero:
858 r.range = eq_zero;
859 r.is_integral = true;
860 break;
861
862 case eq_zero:
863 assert(r.is_integral);
864 case gt_zero:
865 case ge_zero:
866 /* The fsat doesn't add any information in these cases. */
867 break;
868
869 case ne_zero:
870 case unknown:
871 /* Since the result must be in [0, 1], the value must be >= 0. */
872 r.range = ge_zero;
873 break;
874 }
875 break;
876
877 case nir_op_fsign:
878 r = (struct ssa_result_range){
879 analyze_expression(alu, 0, ht, nir_alu_src_type(alu, 0)).range,
880 true
881 };
882 break;
883
884 case nir_op_fsqrt:
885 case nir_op_frsq:
886 r = (struct ssa_result_range){ge_zero, false};
887 break;
888
889 case nir_op_ffloor: {
890 const struct ssa_result_range left =
891 analyze_expression(alu, 0, ht, nir_alu_src_type(alu, 0));
892
893 r.is_integral = true;
894
895 if (left.is_integral || left.range == le_zero || left.range == lt_zero)
896 r.range = left.range;
897 else if (left.range == ge_zero || left.range == gt_zero)
898 r.range = ge_zero;
899 else if (left.range == ne_zero)
900 r.range = unknown;
901
902 break;
903 }
904
905 case nir_op_fceil: {
906 const struct ssa_result_range left =
907 analyze_expression(alu, 0, ht, nir_alu_src_type(alu, 0));
908
909 r.is_integral = true;
910
911 if (left.is_integral || left.range == ge_zero || left.range == gt_zero)
912 r.range = left.range;
913 else if (left.range == le_zero || left.range == lt_zero)
914 r.range = le_zero;
915 else if (left.range == ne_zero)
916 r.range = unknown;
917
918 break;
919 }
920
921 case nir_op_ftrunc: {
922 const struct ssa_result_range left =
923 analyze_expression(alu, 0, ht, nir_alu_src_type(alu, 0));
924
925 r.is_integral = true;
926
927 if (left.is_integral)
928 r.range = left.range;
929 else if (left.range == ge_zero || left.range == gt_zero)
930 r.range = ge_zero;
931 else if (left.range == le_zero || left.range == lt_zero)
932 r.range = le_zero;
933 else if (left.range == ne_zero)
934 r.range = unknown;
935
936 break;
937 }
938
939 case nir_op_flt:
940 case nir_op_fge:
941 case nir_op_feq:
942 case nir_op_fne:
943 case nir_op_ilt:
944 case nir_op_ige:
945 case nir_op_ieq:
946 case nir_op_ine:
947 case nir_op_ult:
948 case nir_op_uge:
949 /* Boolean results are 0 or -1. */
950 r = (struct ssa_result_range){le_zero, false};
951 break;
952
953 case nir_op_fpow: {
954 /* Due to flush-to-zero semanatics of floating-point numbers with very
955 * small mangnitudes, we can never really be sure a result will be
956 * non-zero.
957 *
958 * NIR uses pow() and powf() to constant evaluate nir_op_fpow. The man
959 * page for that function says:
960 *
961 * If y is 0, the result is 1.0 (even if x is a NaN).
962 *
963 * gt_zero: pow(*, eq_zero)
964 * | pow(eq_zero, lt_zero) # 0^-y = +inf
965 * | pow(eq_zero, le_zero) # 0^-y = +inf or 0^0 = 1.0
966 * ;
967 *
968 * eq_zero: pow(eq_zero, gt_zero)
969 * ;
970 *
971 * ge_zero: pow(gt_zero, gt_zero)
972 * | pow(gt_zero, ge_zero)
973 * | pow(gt_zero, lt_zero)
974 * | pow(gt_zero, le_zero)
975 * | pow(gt_zero, ne_zero)
976 * | pow(gt_zero, unknown)
977 * | pow(ge_zero, gt_zero)
978 * | pow(ge_zero, ge_zero)
979 * | pow(ge_zero, lt_zero)
980 * | pow(ge_zero, le_zero)
981 * | pow(ge_zero, ne_zero)
982 * | pow(ge_zero, unknown)
983 * | pow(eq_zero, ge_zero) # 0^0 = 1.0 or 0^+y = 0.0
984 * | pow(eq_zero, ne_zero) # 0^-y = +inf or 0^+y = 0.0
985 * | pow(eq_zero, unknown) # union of all other y cases
986 * ;
987 *
988 * All other cases are unknown.
989 *
990 * We could do better if the right operand is a constant, integral
991 * value.
992 */
993 static const enum ssa_ranges table[last_range + 1][last_range + 1] = {
994 /* left\right unknown lt_zero le_zero gt_zero ge_zero ne_zero eq_zero */
995 /* unknown */ { _______, _______, _______, _______, _______, _______, gt_zero },
996 /* lt_zero */ { _______, _______, _______, _______, _______, _______, gt_zero },
997 /* le_zero */ { _______, _______, _______, _______, _______, _______, gt_zero },
998 /* gt_zero */ { ge_zero, ge_zero, ge_zero, ge_zero, ge_zero, ge_zero, gt_zero },
999 /* ge_zero */ { ge_zero, ge_zero, ge_zero, ge_zero, ge_zero, ge_zero, gt_zero },
1000 /* ne_zero */ { _______, _______, _______, _______, _______, _______, gt_zero },
1001 /* eq_zero */ { ge_zero, gt_zero, gt_zero, eq_zero, ge_zero, ge_zero, gt_zero },
1002 };
1003
1004 const struct ssa_result_range left =
1005 analyze_expression(alu, 0, ht, nir_alu_src_type(alu, 0));
1006 const struct ssa_result_range right =
1007 analyze_expression(alu, 1, ht, nir_alu_src_type(alu, 1));
1008
1009 ASSERT_UNION_OF_DISJOINT_MATCHES_UNKNOWN_2_SOURCE(table);
1010 ASSERT_UNION_OF_EQ_AND_STRICT_INEQ_MATCHES_NONSTRICT_2_SOURCE(table);
1011
1012 r.is_integral = left.is_integral && right.is_integral &&
1013 is_not_negative(right.range);
1014 r.range = table[left.range][right.range];
1015 break;
1016 }
1017
1018 case nir_op_ffma: {
1019 const struct ssa_result_range first =
1020 analyze_expression(alu, 0, ht, nir_alu_src_type(alu, 0));
1021 const struct ssa_result_range second =
1022 analyze_expression(alu, 1, ht, nir_alu_src_type(alu, 1));
1023 const struct ssa_result_range third =
1024 analyze_expression(alu, 2, ht, nir_alu_src_type(alu, 2));
1025
1026 r.is_integral = first.is_integral && second.is_integral &&
1027 third.is_integral;
1028
1029 enum ssa_ranges fmul_range;
1030
1031 if (first.range != eq_zero && nir_alu_srcs_equal(alu, alu, 0, 1)) {
1032 /* See handling of nir_op_fmul for explanation of why ge_zero is the
1033 * range.
1034 */
1035 fmul_range = ge_zero;
1036 } else if (first.range != eq_zero && nir_alu_srcs_negative_equal(alu, alu, 0, 1)) {
1037 /* -x * x => le_zero */
1038 fmul_range = le_zero;
1039 } else
1040 fmul_range = fmul_table[first.range][second.range];
1041
1042 r.range = fadd_table[fmul_range][third.range];
1043 break;
1044 }
1045
1046 case nir_op_flrp: {
1047 const struct ssa_result_range first =
1048 analyze_expression(alu, 0, ht, nir_alu_src_type(alu, 0));
1049 const struct ssa_result_range second =
1050 analyze_expression(alu, 1, ht, nir_alu_src_type(alu, 1));
1051 const struct ssa_result_range third =
1052 analyze_expression(alu, 2, ht, nir_alu_src_type(alu, 2));
1053
1054 r.is_integral = first.is_integral && second.is_integral &&
1055 third.is_integral;
1056
1057 /* Decompose the flrp to first + third * (second + -first) */
1058 const enum ssa_ranges inner_fadd_range =
1059 fadd_table[second.range][fneg_table[first.range]];
1060
1061 const enum ssa_ranges fmul_range =
1062 fmul_table[third.range][inner_fadd_range];
1063
1064 r.range = fadd_table[first.range][fmul_range];
1065 break;
1066 }
1067
1068 default:
1069 r = (struct ssa_result_range){unknown, false};
1070 break;
1071 }
1072
1073 if (r.range == eq_zero)
1074 r.is_integral = true;
1075
1076 _mesa_hash_table_insert(ht, pack_key(alu, use_type), pack_data(r));
1077 return r;
1078 }
1079
1080 #undef _______
1081
1082 struct ssa_result_range
1083 nir_analyze_range(struct hash_table *range_ht,
1084 const nir_alu_instr *instr, unsigned src)
1085 {
1086 return analyze_expression(instr, src, range_ht,
1087 nir_alu_src_type(instr, src));
1088 }
1089
1090 static uint32_t bitmask(uint32_t size) {
1091 return size >= 32 ? 0xffffffffu : ((uint32_t)1 << size) - 1u;
1092 }
1093
1094 static uint64_t mul_clamp(uint32_t a, uint32_t b)
1095 {
1096 if (a != 0 && (a * b) / a != b)
1097 return (uint64_t)UINT32_MAX + 1;
1098 else
1099 return a * b;
1100 }
1101
1102 static unsigned
1103 search_phi_bcsel(nir_ssa_scalar scalar, nir_ssa_scalar *buf, unsigned buf_size, struct set *visited)
1104 {
1105 if (_mesa_set_search(visited, scalar.def))
1106 return 0;
1107 _mesa_set_add(visited, scalar.def);
1108
1109 if (scalar.def->parent_instr->type == nir_instr_type_phi) {
1110 nir_phi_instr *phi = nir_instr_as_phi(scalar.def->parent_instr);
1111 unsigned num_sources_left = exec_list_length(&phi->srcs);
1112 unsigned total_added = 0;
1113 nir_foreach_phi_src(src, phi) {
1114 unsigned added = search_phi_bcsel(
1115 (nir_ssa_scalar){src->src.ssa, 0}, buf + total_added, buf_size - num_sources_left, visited);
1116 buf_size -= added;
1117 total_added += added;
1118 num_sources_left--;
1119 }
1120 return total_added;
1121 }
1122
1123 if (nir_ssa_scalar_is_alu(scalar)) {
1124 nir_op op = nir_ssa_scalar_alu_op(scalar);
1125
1126 if ((op == nir_op_bcsel || op == nir_op_b32csel) && buf_size >= 2) {
1127 nir_ssa_scalar src0 = nir_ssa_scalar_chase_alu_src(scalar, 0);
1128 nir_ssa_scalar src1 = nir_ssa_scalar_chase_alu_src(scalar, 1);
1129
1130 unsigned added = search_phi_bcsel(src0, buf, buf_size - 1, visited);
1131 buf_size -= added;
1132 added += search_phi_bcsel(src1, buf + added, buf_size, visited);
1133 return added;
1134 }
1135 }
1136
1137 buf[0] = scalar;
1138 return 1;
1139 }
1140
1141 static nir_variable *
1142 lookup_input(nir_shader *shader, unsigned driver_location)
1143 {
1144 nir_foreach_variable(var, &shader->inputs) {
1145 if (driver_location == var->data.driver_location)
1146 return var;
1147 }
1148 return NULL;
1149 }
1150
1151 uint32_t
1152 nir_unsigned_upper_bound(nir_shader *shader, struct hash_table *range_ht,
1153 nir_ssa_scalar scalar,
1154 const nir_unsigned_upper_bound_config *config)
1155 {
1156 assert(scalar.def->bit_size <= 32);
1157
1158 if (nir_ssa_scalar_is_const(scalar))
1159 return nir_ssa_scalar_as_uint(scalar);
1160
1161 /* keys can't be 0, so we have to add 1 to the index */
1162 void *key = (void*)(((uintptr_t)(scalar.def->index + 1) << 4) | scalar.comp);
1163 struct hash_entry *he = _mesa_hash_table_search(range_ht, key);
1164 if (he != NULL)
1165 return (uintptr_t)he->data;
1166
1167 uint32_t max = bitmask(scalar.def->bit_size);
1168
1169 if (scalar.def->parent_instr->type == nir_instr_type_intrinsic) {
1170 uint32_t res = max;
1171 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(scalar.def->parent_instr);
1172 switch (intrin->intrinsic) {
1173 case nir_intrinsic_load_local_invocation_index:
1174 if (shader->info.cs.local_size_variable) {
1175 res = config->max_work_group_invocations - 1;
1176 } else {
1177 res = (shader->info.cs.local_size[0] *
1178 shader->info.cs.local_size[1] *
1179 shader->info.cs.local_size[2]) - 1u;
1180 }
1181 break;
1182 case nir_intrinsic_load_local_invocation_id:
1183 if (shader->info.cs.local_size_variable)
1184 res = config->max_work_group_size[scalar.comp] - 1u;
1185 else
1186 res = shader->info.cs.local_size[scalar.comp] - 1u;
1187 break;
1188 case nir_intrinsic_load_work_group_id:
1189 res = config->max_work_group_count[scalar.comp] - 1u;
1190 break;
1191 case nir_intrinsic_load_num_work_groups:
1192 res = config->max_work_group_count[scalar.comp];
1193 break;
1194 case nir_intrinsic_load_global_invocation_id:
1195 if (shader->info.cs.local_size_variable) {
1196 res = mul_clamp(config->max_work_group_size[scalar.comp],
1197 config->max_work_group_count[scalar.comp]) - 1u;
1198 } else {
1199 res = (shader->info.cs.local_size[scalar.comp] *
1200 config->max_work_group_count[scalar.comp]) - 1u;
1201 }
1202 break;
1203 case nir_intrinsic_load_subgroup_invocation:
1204 case nir_intrinsic_first_invocation:
1205 case nir_intrinsic_mbcnt_amd:
1206 res = config->max_subgroup_size - 1;
1207 break;
1208 case nir_intrinsic_load_subgroup_size:
1209 res = config->max_subgroup_size;
1210 break;
1211 case nir_intrinsic_load_subgroup_id:
1212 case nir_intrinsic_load_num_subgroups: {
1213 uint32_t work_group_size = config->max_work_group_invocations;
1214 if (!shader->info.cs.local_size_variable) {
1215 work_group_size = shader->info.cs.local_size[0] *
1216 shader->info.cs.local_size[1] *
1217 shader->info.cs.local_size[2];
1218 }
1219 res = (work_group_size + config->min_subgroup_size - 1) / config->min_subgroup_size;
1220 if (intrin->intrinsic == nir_intrinsic_load_subgroup_id)
1221 res--;
1222 break;
1223 }
1224 case nir_intrinsic_load_input: {
1225 if (shader->info.stage == MESA_SHADER_VERTEX && nir_src_is_const(intrin->src[0])) {
1226 nir_variable *var = lookup_input(shader, nir_intrinsic_base(intrin));
1227 if (var) {
1228 int loc = var->data.location - VERT_ATTRIB_GENERIC0;
1229 if (loc >= 0)
1230 res = config->vertex_attrib_max[loc];
1231 }
1232 }
1233 break;
1234 }
1235 case nir_intrinsic_reduce:
1236 case nir_intrinsic_inclusive_scan:
1237 case nir_intrinsic_exclusive_scan: {
1238 nir_op op = nir_intrinsic_reduction_op(intrin);
1239 if (op == nir_op_umin || op == nir_op_umax || op == nir_op_imin || op == nir_op_imax)
1240 res = nir_unsigned_upper_bound(shader, range_ht, (nir_ssa_scalar){intrin->src[0].ssa, 0}, config);
1241 break;
1242 }
1243 case nir_intrinsic_read_first_invocation:
1244 case nir_intrinsic_read_invocation:
1245 case nir_intrinsic_shuffle:
1246 case nir_intrinsic_shuffle_xor:
1247 case nir_intrinsic_shuffle_up:
1248 case nir_intrinsic_shuffle_down:
1249 case nir_intrinsic_quad_broadcast:
1250 case nir_intrinsic_quad_swap_horizontal:
1251 case nir_intrinsic_quad_swap_vertical:
1252 case nir_intrinsic_quad_swap_diagonal:
1253 case nir_intrinsic_quad_swizzle_amd:
1254 case nir_intrinsic_masked_swizzle_amd:
1255 res = nir_unsigned_upper_bound(shader, range_ht, (nir_ssa_scalar){intrin->src[0].ssa, 0}, config);
1256 break;
1257 case nir_intrinsic_write_invocation_amd: {
1258 uint32_t src0 = nir_unsigned_upper_bound(shader, range_ht, (nir_ssa_scalar){intrin->src[0].ssa, 0}, config);
1259 uint32_t src1 = nir_unsigned_upper_bound(shader, range_ht, (nir_ssa_scalar){intrin->src[1].ssa, 0}, config);
1260 res = MAX2(src0, src1);
1261 break;
1262 }
1263 default:
1264 break;
1265 }
1266 if (res != max)
1267 _mesa_hash_table_insert(range_ht, key, (void*)(uintptr_t)res);
1268 return res;
1269 }
1270
1271 if (scalar.def->parent_instr->type == nir_instr_type_phi) {
1272 bool cyclic = false;
1273 nir_foreach_phi_src(src, nir_instr_as_phi(scalar.def->parent_instr)) {
1274 if (nir_block_dominates(scalar.def->parent_instr->block, src->pred)) {
1275 cyclic = true;
1276 break;
1277 }
1278 }
1279
1280 uint32_t res = 0;
1281 if (cyclic) {
1282 _mesa_hash_table_insert(range_ht, key, (void*)(uintptr_t)max);
1283
1284 struct set *visited = _mesa_pointer_set_create(NULL);
1285 nir_ssa_scalar defs[64];
1286 unsigned def_count = search_phi_bcsel(scalar, defs, 64, visited);
1287 _mesa_set_destroy(visited, NULL);
1288
1289 for (unsigned i = 0; i < def_count; i++)
1290 res = MAX2(res, nir_unsigned_upper_bound(shader, range_ht, defs[i], config));
1291 } else {
1292 nir_foreach_phi_src(src, nir_instr_as_phi(scalar.def->parent_instr)) {
1293 res = MAX2(res, nir_unsigned_upper_bound(
1294 shader, range_ht, (nir_ssa_scalar){src->src.ssa, 0}, config));
1295 }
1296 }
1297
1298 _mesa_hash_table_insert(range_ht, key, (void*)(uintptr_t)res);
1299 return res;
1300 }
1301
1302 if (nir_ssa_scalar_is_alu(scalar)) {
1303 nir_op op = nir_ssa_scalar_alu_op(scalar);
1304
1305 switch (op) {
1306 case nir_op_umin:
1307 case nir_op_imin:
1308 case nir_op_imax:
1309 case nir_op_umax:
1310 case nir_op_iand:
1311 case nir_op_ior:
1312 case nir_op_ixor:
1313 case nir_op_ishl:
1314 case nir_op_imul:
1315 case nir_op_ushr:
1316 case nir_op_ishr:
1317 case nir_op_iadd:
1318 case nir_op_umod:
1319 case nir_op_udiv:
1320 case nir_op_bcsel:
1321 case nir_op_b32csel:
1322 case nir_op_imax3:
1323 case nir_op_imin3:
1324 case nir_op_umax3:
1325 case nir_op_umin3:
1326 case nir_op_ubfe:
1327 case nir_op_bfm:
1328 case nir_op_f2u32:
1329 case nir_op_fmul:
1330 break;
1331 default:
1332 return max;
1333 }
1334
1335 uint32_t src0 = nir_unsigned_upper_bound(shader, range_ht, nir_ssa_scalar_chase_alu_src(scalar, 0), config);
1336 uint32_t src1 = max, src2 = max;
1337 if (nir_op_infos[op].num_inputs > 1)
1338 src1 = nir_unsigned_upper_bound(shader, range_ht, nir_ssa_scalar_chase_alu_src(scalar, 1), config);
1339 if (nir_op_infos[op].num_inputs > 2)
1340 src2 = nir_unsigned_upper_bound(shader, range_ht, nir_ssa_scalar_chase_alu_src(scalar, 2), config);
1341
1342 uint32_t res = max;
1343 switch (op) {
1344 case nir_op_umin:
1345 res = src0 < src1 ? src0 : src1;
1346 break;
1347 case nir_op_imin:
1348 case nir_op_imax:
1349 case nir_op_umax:
1350 res = src0 > src1 ? src0 : src1;
1351 break;
1352 case nir_op_iand:
1353 res = bitmask(util_last_bit64(src0)) & bitmask(util_last_bit64(src1));
1354 break;
1355 case nir_op_ior:
1356 case nir_op_ixor:
1357 res = bitmask(util_last_bit64(src0)) | bitmask(util_last_bit64(src1));
1358 break;
1359 case nir_op_ishl:
1360 if (util_last_bit64(src0) + src1 > scalar.def->bit_size)
1361 res = max; /* overflow */
1362 else
1363 res = src0 << MIN2(src1, scalar.def->bit_size - 1u);
1364 break;
1365 case nir_op_imul:
1366 if (src0 != 0 && (src0 * src1) / src0 != src1)
1367 res = max;
1368 else
1369 res = src0 * src1;
1370 break;
1371 case nir_op_ushr: {
1372 nir_ssa_scalar src1_scalar = nir_ssa_scalar_chase_alu_src(scalar, 1);
1373 if (nir_ssa_scalar_is_const(src1_scalar))
1374 res = src0 >> nir_ssa_scalar_as_uint(src1_scalar);
1375 else
1376 res = src0;
1377 break;
1378 }
1379 case nir_op_ishr: {
1380 nir_ssa_scalar src1_scalar = nir_ssa_scalar_chase_alu_src(scalar, 1);
1381 if (src0 <= 2147483647 && nir_ssa_scalar_is_const(src1_scalar))
1382 res = src0 >> nir_ssa_scalar_as_uint(src1_scalar);
1383 else
1384 res = src0;
1385 break;
1386 }
1387 case nir_op_iadd:
1388 if (src0 + src1 < src0)
1389 res = max; /* overflow */
1390 else
1391 res = src0 + src1;
1392 break;
1393 case nir_op_umod:
1394 res = src1 ? src1 - 1 : 0;
1395 break;
1396 case nir_op_udiv: {
1397 nir_ssa_scalar src1_scalar = nir_ssa_scalar_chase_alu_src(scalar, 1);
1398 if (nir_ssa_scalar_is_const(src1_scalar))
1399 res = nir_ssa_scalar_as_uint(src1_scalar) ? src0 / nir_ssa_scalar_as_uint(src1_scalar) : 0;
1400 else
1401 res = src0;
1402 break;
1403 }
1404 case nir_op_bcsel:
1405 case nir_op_b32csel:
1406 res = src1 > src2 ? src1 : src2;
1407 break;
1408 case nir_op_imax3:
1409 case nir_op_imin3:
1410 case nir_op_umax3:
1411 src0 = src0 > src1 ? src0 : src1;
1412 res = src0 > src2 ? src0 : src2;
1413 break;
1414 case nir_op_umin3:
1415 src0 = src0 < src1 ? src0 : src1;
1416 res = src0 < src2 ? src0 : src2;
1417 break;
1418 case nir_op_ubfe:
1419 res = bitmask(MIN2(src2, scalar.def->bit_size));
1420 break;
1421 case nir_op_bfm: {
1422 nir_ssa_scalar src1_scalar = nir_ssa_scalar_chase_alu_src(scalar, 1);
1423 if (nir_ssa_scalar_is_const(src1_scalar)) {
1424 src0 = MIN2(src0, 31);
1425 src1 = nir_ssa_scalar_as_uint(src1_scalar) & 0x1fu;
1426 res = bitmask(src0) << src1;
1427 } else {
1428 src0 = MIN2(src0, 31);
1429 src1 = MIN2(src1, 31);
1430 res = bitmask(MIN2(src0 + src1, 32));
1431 }
1432 break;
1433 }
1434 /* limited floating-point support for f2u32(fmul(load_input(), <constant>)) */
1435 case nir_op_f2u32:
1436 /* infinity/NaN starts at 0x7f800000u, negative numbers at 0x80000000 */
1437 if (src0 < 0x7f800000u) {
1438 float val;
1439 memcpy(&val, &src0, 4);
1440 res = (uint32_t)val;
1441 }
1442 break;
1443 case nir_op_fmul:
1444 /* infinity/NaN starts at 0x7f800000u, negative numbers at 0x80000000 */
1445 if (src0 < 0x7f800000u && src1 < 0x7f800000u) {
1446 float src0_f, src1_f;
1447 memcpy(&src0_f, &src0, 4);
1448 memcpy(&src1_f, &src1, 4);
1449 /* not a proper rounding-up multiplication, but should be good enough */
1450 float max_f = ceilf(src0_f) * ceilf(src1_f);
1451 memcpy(&res, &max_f, 4);
1452 }
1453 break;
1454 default:
1455 res = max;
1456 break;
1457 }
1458 _mesa_hash_table_insert(range_ht, key, (void*)(uintptr_t)res);
1459 return res;
1460 }
1461
1462 return max;
1463 }
1464
1465 bool
1466 nir_addition_might_overflow(nir_shader *shader, struct hash_table *range_ht,
1467 nir_ssa_scalar ssa, unsigned const_val,
1468 const nir_unsigned_upper_bound_config *config)
1469 {
1470 nir_op alu_op = nir_ssa_scalar_alu_op(ssa);
1471
1472 /* iadd(imul(a, #b), #c) */
1473 if (alu_op == nir_op_imul || alu_op == nir_op_ishl) {
1474 nir_ssa_scalar mul_src0 = nir_ssa_scalar_chase_alu_src(ssa, 0);
1475 nir_ssa_scalar mul_src1 = nir_ssa_scalar_chase_alu_src(ssa, 1);
1476 uint32_t stride = 1;
1477 if (nir_ssa_scalar_is_const(mul_src0))
1478 stride = nir_ssa_scalar_as_uint(mul_src0);
1479 else if (nir_ssa_scalar_is_const(mul_src1))
1480 stride = nir_ssa_scalar_as_uint(mul_src1);
1481
1482 if (alu_op == nir_op_ishl)
1483 stride = 1u << (stride % 32u);
1484
1485 if (!stride || const_val <= UINT32_MAX - (UINT32_MAX / stride * stride))
1486 return false;
1487 }
1488
1489 /* iadd(iand(a, #b), #c) */
1490 if (alu_op == nir_op_iand) {
1491 nir_ssa_scalar and_src0 = nir_ssa_scalar_chase_alu_src(ssa, 0);
1492 nir_ssa_scalar and_src1 = nir_ssa_scalar_chase_alu_src(ssa, 1);
1493 uint32_t mask = 0xffffffff;
1494 if (nir_ssa_scalar_is_const(and_src0))
1495 mask = nir_ssa_scalar_as_uint(and_src0);
1496 else if (nir_ssa_scalar_is_const(and_src1))
1497 mask = nir_ssa_scalar_as_uint(and_src1);
1498 if (mask == 0 || const_val < (1u << (ffs(mask) - 1)))
1499 return false;
1500 }
1501
1502 uint32_t ub = nir_unsigned_upper_bound(shader, range_ht, ssa, config);
1503 return const_val + ub < const_val;
1504 }
1505