alias.c: Reorder #include statements and remove duplicates.
[gcc.git] / gcc / internal-fn.c
1 /* Internal functions.
2 Copyright (C) 2011-2015 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "target.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "predict.h"
29 #include "stringpool.h"
30 #include "tree-ssanames.h"
31 #include "expmed.h"
32 #include "optabs.h"
33 #include "emit-rtl.h"
34 #include "diagnostic-core.h"
35 #include "alias.h"
36 #include "fold-const.h"
37 #include "internal-fn.h"
38 #include "stor-layout.h"
39 #include "flags.h"
40 #include "dojump.h"
41 #include "explow.h"
42 #include "calls.h"
43 #include "varasm.h"
44 #include "stmt.h"
45 #include "expr.h"
46 #include "ubsan.h"
47
48 /* The names of each internal function, indexed by function number. */
49 const char *const internal_fn_name_array[] = {
50 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) #CODE,
51 #include "internal-fn.def"
52 #undef DEF_INTERNAL_FN
53 "<invalid-fn>"
54 };
55
56 /* The ECF_* flags of each internal function, indexed by function number. */
57 const int internal_fn_flags_array[] = {
58 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) FLAGS,
59 #include "internal-fn.def"
60 #undef DEF_INTERNAL_FN
61 0
62 };
63
64 /* Fnspec of each internal function, indexed by function number. */
65 const_tree internal_fn_fnspec_array[IFN_LAST + 1];
66
67 void
68 init_internal_fns ()
69 {
70 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) \
71 if (FNSPEC) internal_fn_fnspec_array[IFN_##CODE] = \
72 build_string ((int) sizeof (FNSPEC), FNSPEC ? FNSPEC : "");
73 #include "internal-fn.def"
74 #undef DEF_INTERNAL_FN
75 internal_fn_fnspec_array[IFN_LAST] = 0;
76 }
77
78 /* ARRAY_TYPE is an array of vector modes. Return the associated insn
79 for load-lanes-style optab OPTAB. The insn must exist. */
80
81 static enum insn_code
82 get_multi_vector_move (tree array_type, convert_optab optab)
83 {
84 enum insn_code icode;
85 machine_mode imode;
86 machine_mode vmode;
87
88 gcc_assert (TREE_CODE (array_type) == ARRAY_TYPE);
89 imode = TYPE_MODE (array_type);
90 vmode = TYPE_MODE (TREE_TYPE (array_type));
91
92 icode = convert_optab_handler (optab, imode, vmode);
93 gcc_assert (icode != CODE_FOR_nothing);
94 return icode;
95 }
96
97 /* Expand LOAD_LANES call STMT. */
98
99 static void
100 expand_LOAD_LANES (gcall *stmt)
101 {
102 struct expand_operand ops[2];
103 tree type, lhs, rhs;
104 rtx target, mem;
105
106 lhs = gimple_call_lhs (stmt);
107 rhs = gimple_call_arg (stmt, 0);
108 type = TREE_TYPE (lhs);
109
110 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
111 mem = expand_normal (rhs);
112
113 gcc_assert (MEM_P (mem));
114 PUT_MODE (mem, TYPE_MODE (type));
115
116 create_output_operand (&ops[0], target, TYPE_MODE (type));
117 create_fixed_operand (&ops[1], mem);
118 expand_insn (get_multi_vector_move (type, vec_load_lanes_optab), 2, ops);
119 }
120
121 /* Expand STORE_LANES call STMT. */
122
123 static void
124 expand_STORE_LANES (gcall *stmt)
125 {
126 struct expand_operand ops[2];
127 tree type, lhs, rhs;
128 rtx target, reg;
129
130 lhs = gimple_call_lhs (stmt);
131 rhs = gimple_call_arg (stmt, 0);
132 type = TREE_TYPE (rhs);
133
134 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
135 reg = expand_normal (rhs);
136
137 gcc_assert (MEM_P (target));
138 PUT_MODE (target, TYPE_MODE (type));
139
140 create_fixed_operand (&ops[0], target);
141 create_input_operand (&ops[1], reg, TYPE_MODE (type));
142 expand_insn (get_multi_vector_move (type, vec_store_lanes_optab), 2, ops);
143 }
144
145 static void
146 expand_ANNOTATE (gcall *)
147 {
148 gcc_unreachable ();
149 }
150
151 /* This should get expanded in adjust_simduid_builtins. */
152
153 static void
154 expand_GOMP_SIMD_LANE (gcall *)
155 {
156 gcc_unreachable ();
157 }
158
159 /* This should get expanded in adjust_simduid_builtins. */
160
161 static void
162 expand_GOMP_SIMD_VF (gcall *)
163 {
164 gcc_unreachable ();
165 }
166
167 /* This should get expanded in adjust_simduid_builtins. */
168
169 static void
170 expand_GOMP_SIMD_LAST_LANE (gcall *)
171 {
172 gcc_unreachable ();
173 }
174
175 /* This should get expanded in adjust_simduid_builtins. */
176
177 static void
178 expand_GOMP_SIMD_ORDERED_START (gcall *)
179 {
180 gcc_unreachable ();
181 }
182
183 /* This should get expanded in adjust_simduid_builtins. */
184
185 static void
186 expand_GOMP_SIMD_ORDERED_END (gcall *)
187 {
188 gcc_unreachable ();
189 }
190
191 /* This should get expanded in the sanopt pass. */
192
193 static void
194 expand_UBSAN_NULL (gcall *)
195 {
196 gcc_unreachable ();
197 }
198
199 /* This should get expanded in the sanopt pass. */
200
201 static void
202 expand_UBSAN_BOUNDS (gcall *)
203 {
204 gcc_unreachable ();
205 }
206
207 /* This should get expanded in the sanopt pass. */
208
209 static void
210 expand_UBSAN_VPTR (gcall *)
211 {
212 gcc_unreachable ();
213 }
214
215 /* This should get expanded in the sanopt pass. */
216
217 static void
218 expand_UBSAN_OBJECT_SIZE (gcall *)
219 {
220 gcc_unreachable ();
221 }
222
223 /* This should get expanded in the sanopt pass. */
224
225 static void
226 expand_ASAN_CHECK (gcall *)
227 {
228 gcc_unreachable ();
229 }
230
231 /* This should get expanded in the tsan pass. */
232
233 static void
234 expand_TSAN_FUNC_EXIT (gcall *)
235 {
236 gcc_unreachable ();
237 }
238
239 /* Helper function for expand_addsub_overflow. Return 1
240 if ARG interpreted as signed in its precision is known to be always
241 positive or 2 if ARG is known to be always negative, or 3 if ARG may
242 be positive or negative. */
243
244 static int
245 get_range_pos_neg (tree arg)
246 {
247 if (arg == error_mark_node)
248 return 3;
249
250 int prec = TYPE_PRECISION (TREE_TYPE (arg));
251 int cnt = 0;
252 if (TREE_CODE (arg) == INTEGER_CST)
253 {
254 wide_int w = wi::sext (arg, prec);
255 if (wi::neg_p (w))
256 return 2;
257 else
258 return 1;
259 }
260 while (CONVERT_EXPR_P (arg)
261 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0)))
262 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg, 0))) <= prec)
263 {
264 arg = TREE_OPERAND (arg, 0);
265 /* Narrower value zero extended into wider type
266 will always result in positive values. */
267 if (TYPE_UNSIGNED (TREE_TYPE (arg))
268 && TYPE_PRECISION (TREE_TYPE (arg)) < prec)
269 return 1;
270 prec = TYPE_PRECISION (TREE_TYPE (arg));
271 if (++cnt > 30)
272 return 3;
273 }
274
275 if (TREE_CODE (arg) != SSA_NAME)
276 return 3;
277 wide_int arg_min, arg_max;
278 while (get_range_info (arg, &arg_min, &arg_max) != VR_RANGE)
279 {
280 gimple *g = SSA_NAME_DEF_STMT (arg);
281 if (is_gimple_assign (g)
282 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (g)))
283 {
284 tree t = gimple_assign_rhs1 (g);
285 if (INTEGRAL_TYPE_P (TREE_TYPE (t))
286 && TYPE_PRECISION (TREE_TYPE (t)) <= prec)
287 {
288 if (TYPE_UNSIGNED (TREE_TYPE (t))
289 && TYPE_PRECISION (TREE_TYPE (t)) < prec)
290 return 1;
291 prec = TYPE_PRECISION (TREE_TYPE (t));
292 arg = t;
293 if (++cnt > 30)
294 return 3;
295 continue;
296 }
297 }
298 return 3;
299 }
300 if (TYPE_UNSIGNED (TREE_TYPE (arg)))
301 {
302 /* For unsigned values, the "positive" range comes
303 below the "negative" range. */
304 if (!wi::neg_p (wi::sext (arg_max, prec), SIGNED))
305 return 1;
306 if (wi::neg_p (wi::sext (arg_min, prec), SIGNED))
307 return 2;
308 }
309 else
310 {
311 if (!wi::neg_p (wi::sext (arg_min, prec), SIGNED))
312 return 1;
313 if (wi::neg_p (wi::sext (arg_max, prec), SIGNED))
314 return 2;
315 }
316 return 3;
317 }
318
319 /* Return minimum precision needed to represent all values
320 of ARG in SIGNed integral type. */
321
322 static int
323 get_min_precision (tree arg, signop sign)
324 {
325 int prec = TYPE_PRECISION (TREE_TYPE (arg));
326 int cnt = 0;
327 signop orig_sign = sign;
328 if (TREE_CODE (arg) == INTEGER_CST)
329 {
330 int p;
331 if (TYPE_SIGN (TREE_TYPE (arg)) != sign)
332 {
333 widest_int w = wi::to_widest (arg);
334 w = wi::ext (w, prec, sign);
335 p = wi::min_precision (w, sign);
336 }
337 else
338 p = wi::min_precision (arg, sign);
339 return MIN (p, prec);
340 }
341 while (CONVERT_EXPR_P (arg)
342 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0)))
343 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg, 0))) <= prec)
344 {
345 arg = TREE_OPERAND (arg, 0);
346 if (TYPE_PRECISION (TREE_TYPE (arg)) < prec)
347 {
348 if (TYPE_UNSIGNED (TREE_TYPE (arg)))
349 sign = UNSIGNED;
350 else if (sign == UNSIGNED && get_range_pos_neg (arg) != 1)
351 return prec + (orig_sign != sign);
352 prec = TYPE_PRECISION (TREE_TYPE (arg));
353 }
354 if (++cnt > 30)
355 return prec + (orig_sign != sign);
356 }
357 if (TREE_CODE (arg) != SSA_NAME)
358 return prec + (orig_sign != sign);
359 wide_int arg_min, arg_max;
360 while (get_range_info (arg, &arg_min, &arg_max) != VR_RANGE)
361 {
362 gimple *g = SSA_NAME_DEF_STMT (arg);
363 if (is_gimple_assign (g)
364 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (g)))
365 {
366 tree t = gimple_assign_rhs1 (g);
367 if (INTEGRAL_TYPE_P (TREE_TYPE (t))
368 && TYPE_PRECISION (TREE_TYPE (t)) <= prec)
369 {
370 arg = t;
371 if (TYPE_PRECISION (TREE_TYPE (arg)) < prec)
372 {
373 if (TYPE_UNSIGNED (TREE_TYPE (arg)))
374 sign = UNSIGNED;
375 else if (sign == UNSIGNED && get_range_pos_neg (arg) != 1)
376 return prec + (orig_sign != sign);
377 prec = TYPE_PRECISION (TREE_TYPE (arg));
378 }
379 if (++cnt > 30)
380 return prec + (orig_sign != sign);
381 continue;
382 }
383 }
384 return prec + (orig_sign != sign);
385 }
386 if (sign == TYPE_SIGN (TREE_TYPE (arg)))
387 {
388 int p1 = wi::min_precision (arg_min, sign);
389 int p2 = wi::min_precision (arg_max, sign);
390 p1 = MAX (p1, p2);
391 prec = MIN (prec, p1);
392 }
393 else if (sign == UNSIGNED && !wi::neg_p (arg_min, SIGNED))
394 {
395 int p = wi::min_precision (arg_max, UNSIGNED);
396 prec = MIN (prec, p);
397 }
398 return prec + (orig_sign != sign);
399 }
400
401 /* Helper for expand_*_overflow. Store RES into the __real__ part
402 of TARGET. If RES has larger MODE than __real__ part of TARGET,
403 set the __imag__ part to 1 if RES doesn't fit into it. */
404
405 static void
406 expand_arith_overflow_result_store (tree lhs, rtx target,
407 machine_mode mode, rtx res)
408 {
409 machine_mode tgtmode = GET_MODE_INNER (GET_MODE (target));
410 rtx lres = res;
411 if (tgtmode != mode)
412 {
413 rtx_code_label *done_label = gen_label_rtx ();
414 int uns = TYPE_UNSIGNED (TREE_TYPE (TREE_TYPE (lhs)));
415 lres = convert_modes (tgtmode, mode, res, uns);
416 gcc_assert (GET_MODE_PRECISION (tgtmode) < GET_MODE_PRECISION (mode));
417 do_compare_rtx_and_jump (res, convert_modes (mode, tgtmode, lres, uns),
418 EQ, true, mode, NULL_RTX, NULL, done_label,
419 PROB_VERY_LIKELY);
420 write_complex_part (target, const1_rtx, true);
421 emit_label (done_label);
422 }
423 write_complex_part (target, lres, false);
424 }
425
426 /* Helper for expand_*_overflow. Store RES into TARGET. */
427
428 static void
429 expand_ubsan_result_store (rtx target, rtx res)
430 {
431 if (GET_CODE (target) == SUBREG && SUBREG_PROMOTED_VAR_P (target))
432 /* If this is a scalar in a register that is stored in a wider mode
433 than the declared mode, compute the result into its declared mode
434 and then convert to the wider mode. Our value is the computed
435 expression. */
436 convert_move (SUBREG_REG (target), res, SUBREG_PROMOTED_SIGN (target));
437 else
438 emit_move_insn (target, res);
439 }
440
441 /* Add sub/add overflow checking to the statement STMT.
442 CODE says whether the operation is +, or -. */
443
444 static void
445 expand_addsub_overflow (location_t loc, tree_code code, tree lhs,
446 tree arg0, tree arg1, bool unsr_p, bool uns0_p,
447 bool uns1_p, bool is_ubsan)
448 {
449 rtx res, target = NULL_RTX;
450 tree fn;
451 rtx_code_label *done_label = gen_label_rtx ();
452 rtx_code_label *do_error = gen_label_rtx ();
453 do_pending_stack_adjust ();
454 rtx op0 = expand_normal (arg0);
455 rtx op1 = expand_normal (arg1);
456 machine_mode mode = TYPE_MODE (TREE_TYPE (arg0));
457 int prec = GET_MODE_PRECISION (mode);
458 rtx sgn = immed_wide_int_const (wi::min_value (prec, SIGNED), mode);
459 bool do_xor = false;
460
461 if (is_ubsan)
462 gcc_assert (!unsr_p && !uns0_p && !uns1_p);
463
464 if (lhs)
465 {
466 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
467 if (!is_ubsan)
468 write_complex_part (target, const0_rtx, true);
469 }
470
471 /* We assume both operands and result have the same precision
472 here (GET_MODE_BITSIZE (mode)), S stands for signed type
473 with that precision, U for unsigned type with that precision,
474 sgn for unsigned most significant bit in that precision.
475 s1 is signed first operand, u1 is unsigned first operand,
476 s2 is signed second operand, u2 is unsigned second operand,
477 sr is signed result, ur is unsigned result and the following
478 rules say how to compute result (which is always result of
479 the operands as if both were unsigned, cast to the right
480 signedness) and how to compute whether operation overflowed.
481
482 s1 + s2 -> sr
483 res = (S) ((U) s1 + (U) s2)
484 ovf = s2 < 0 ? res > s1 : res < s1 (or jump on overflow)
485 s1 - s2 -> sr
486 res = (S) ((U) s1 - (U) s2)
487 ovf = s2 < 0 ? res < s1 : res > s2 (or jump on overflow)
488 u1 + u2 -> ur
489 res = u1 + u2
490 ovf = res < u1 (or jump on carry, but RTL opts will handle it)
491 u1 - u2 -> ur
492 res = u1 - u2
493 ovf = res > u1 (or jump on carry, but RTL opts will handle it)
494 s1 + u2 -> sr
495 res = (S) ((U) s1 + u2)
496 ovf = ((U) res ^ sgn) < u2
497 s1 + u2 -> ur
498 t1 = (S) (u2 ^ sgn)
499 t2 = s1 + t1
500 res = (U) t2 ^ sgn
501 ovf = t1 < 0 ? t2 > s1 : t2 < s1 (or jump on overflow)
502 s1 - u2 -> sr
503 res = (S) ((U) s1 - u2)
504 ovf = u2 > ((U) s1 ^ sgn)
505 s1 - u2 -> ur
506 res = (U) s1 - u2
507 ovf = s1 < 0 || u2 > (U) s1
508 u1 - s2 -> sr
509 res = u1 - (U) s2
510 ovf = u1 >= ((U) s2 ^ sgn)
511 u1 - s2 -> ur
512 t1 = u1 ^ sgn
513 t2 = t1 - (U) s2
514 res = t2 ^ sgn
515 ovf = s2 < 0 ? (S) t2 < (S) t1 : (S) t2 > (S) t1 (or jump on overflow)
516 s1 + s2 -> ur
517 res = (U) s1 + (U) s2
518 ovf = s2 < 0 ? (s1 | (S) res) < 0) : (s1 & (S) res) < 0)
519 u1 + u2 -> sr
520 res = (S) (u1 + u2)
521 ovf = (U) res < u2 || res < 0
522 u1 - u2 -> sr
523 res = (S) (u1 - u2)
524 ovf = u1 >= u2 ? res < 0 : res >= 0
525 s1 - s2 -> ur
526 res = (U) s1 - (U) s2
527 ovf = s2 >= 0 ? ((s1 | (S) res) < 0) : ((s1 & (S) res) < 0) */
528
529 if (code == PLUS_EXPR && uns0_p && !uns1_p)
530 {
531 /* PLUS_EXPR is commutative, if operand signedness differs,
532 canonicalize to the first operand being signed and second
533 unsigned to simplify following code. */
534 std::swap (op0, op1);
535 std::swap (arg0, arg1);
536 uns0_p = false;
537 uns1_p = true;
538 }
539
540 /* u1 +- u2 -> ur */
541 if (uns0_p && uns1_p && unsr_p)
542 {
543 /* Compute the operation. On RTL level, the addition is always
544 unsigned. */
545 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
546 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
547 rtx tem = op0;
548 /* For PLUS_EXPR, the operation is commutative, so we can pick
549 operand to compare against. For prec <= BITS_PER_WORD, I think
550 preferring REG operand is better over CONST_INT, because
551 the CONST_INT might enlarge the instruction or CSE would need
552 to figure out we'd already loaded it into a register before.
553 For prec > BITS_PER_WORD, I think CONST_INT might be more beneficial,
554 as then the multi-word comparison can be perhaps simplified. */
555 if (code == PLUS_EXPR
556 && (prec <= BITS_PER_WORD
557 ? (CONST_SCALAR_INT_P (op0) && REG_P (op1))
558 : CONST_SCALAR_INT_P (op1)))
559 tem = op1;
560 do_compare_rtx_and_jump (res, tem, code == PLUS_EXPR ? GEU : LEU,
561 true, mode, NULL_RTX, NULL, done_label,
562 PROB_VERY_LIKELY);
563 goto do_error_label;
564 }
565
566 /* s1 +- u2 -> sr */
567 if (!uns0_p && uns1_p && !unsr_p)
568 {
569 /* Compute the operation. On RTL level, the addition is always
570 unsigned. */
571 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
572 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
573 rtx tem = expand_binop (mode, add_optab,
574 code == PLUS_EXPR ? res : op0, sgn,
575 NULL_RTX, false, OPTAB_LIB_WIDEN);
576 do_compare_rtx_and_jump (tem, op1, GEU, true, mode, NULL_RTX, NULL,
577 done_label, PROB_VERY_LIKELY);
578 goto do_error_label;
579 }
580
581 /* s1 + u2 -> ur */
582 if (code == PLUS_EXPR && !uns0_p && uns1_p && unsr_p)
583 {
584 op1 = expand_binop (mode, add_optab, op1, sgn, NULL_RTX, false,
585 OPTAB_LIB_WIDEN);
586 /* As we've changed op1, we have to avoid using the value range
587 for the original argument. */
588 arg1 = error_mark_node;
589 do_xor = true;
590 goto do_signed;
591 }
592
593 /* u1 - s2 -> ur */
594 if (code == MINUS_EXPR && uns0_p && !uns1_p && unsr_p)
595 {
596 op0 = expand_binop (mode, add_optab, op0, sgn, NULL_RTX, false,
597 OPTAB_LIB_WIDEN);
598 /* As we've changed op0, we have to avoid using the value range
599 for the original argument. */
600 arg0 = error_mark_node;
601 do_xor = true;
602 goto do_signed;
603 }
604
605 /* s1 - u2 -> ur */
606 if (code == MINUS_EXPR && !uns0_p && uns1_p && unsr_p)
607 {
608 /* Compute the operation. On RTL level, the addition is always
609 unsigned. */
610 res = expand_binop (mode, sub_optab, op0, op1, NULL_RTX, false,
611 OPTAB_LIB_WIDEN);
612 int pos_neg = get_range_pos_neg (arg0);
613 if (pos_neg == 2)
614 /* If ARG0 is known to be always negative, this is always overflow. */
615 emit_jump (do_error);
616 else if (pos_neg == 3)
617 /* If ARG0 is not known to be always positive, check at runtime. */
618 do_compare_rtx_and_jump (op0, const0_rtx, LT, false, mode, NULL_RTX,
619 NULL, do_error, PROB_VERY_UNLIKELY);
620 do_compare_rtx_and_jump (op1, op0, LEU, true, mode, NULL_RTX, NULL,
621 done_label, PROB_VERY_LIKELY);
622 goto do_error_label;
623 }
624
625 /* u1 - s2 -> sr */
626 if (code == MINUS_EXPR && uns0_p && !uns1_p && !unsr_p)
627 {
628 /* Compute the operation. On RTL level, the addition is always
629 unsigned. */
630 res = expand_binop (mode, sub_optab, op0, op1, NULL_RTX, false,
631 OPTAB_LIB_WIDEN);
632 rtx tem = expand_binop (mode, add_optab, op1, sgn, NULL_RTX, false,
633 OPTAB_LIB_WIDEN);
634 do_compare_rtx_and_jump (op0, tem, LTU, true, mode, NULL_RTX, NULL,
635 done_label, PROB_VERY_LIKELY);
636 goto do_error_label;
637 }
638
639 /* u1 + u2 -> sr */
640 if (code == PLUS_EXPR && uns0_p && uns1_p && !unsr_p)
641 {
642 /* Compute the operation. On RTL level, the addition is always
643 unsigned. */
644 res = expand_binop (mode, add_optab, op0, op1, NULL_RTX, false,
645 OPTAB_LIB_WIDEN);
646 do_compare_rtx_and_jump (res, const0_rtx, LT, false, mode, NULL_RTX,
647 NULL, do_error, PROB_VERY_UNLIKELY);
648 rtx tem = op1;
649 /* The operation is commutative, so we can pick operand to compare
650 against. For prec <= BITS_PER_WORD, I think preferring REG operand
651 is better over CONST_INT, because the CONST_INT might enlarge the
652 instruction or CSE would need to figure out we'd already loaded it
653 into a register before. For prec > BITS_PER_WORD, I think CONST_INT
654 might be more beneficial, as then the multi-word comparison can be
655 perhaps simplified. */
656 if (prec <= BITS_PER_WORD
657 ? (CONST_SCALAR_INT_P (op1) && REG_P (op0))
658 : CONST_SCALAR_INT_P (op0))
659 tem = op0;
660 do_compare_rtx_and_jump (res, tem, GEU, true, mode, NULL_RTX, NULL,
661 done_label, PROB_VERY_LIKELY);
662 goto do_error_label;
663 }
664
665 /* s1 +- s2 -> ur */
666 if (!uns0_p && !uns1_p && unsr_p)
667 {
668 /* Compute the operation. On RTL level, the addition is always
669 unsigned. */
670 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
671 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
672 int pos_neg = get_range_pos_neg (arg1);
673 if (code == PLUS_EXPR)
674 {
675 int pos_neg0 = get_range_pos_neg (arg0);
676 if (pos_neg0 != 3 && pos_neg == 3)
677 {
678 std::swap (op0, op1);
679 pos_neg = pos_neg0;
680 }
681 }
682 rtx tem;
683 if (pos_neg != 3)
684 {
685 tem = expand_binop (mode, ((pos_neg == 1) ^ (code == MINUS_EXPR))
686 ? and_optab : ior_optab,
687 op0, res, NULL_RTX, false, OPTAB_LIB_WIDEN);
688 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL,
689 NULL, done_label, PROB_VERY_LIKELY);
690 }
691 else
692 {
693 rtx_code_label *do_ior_label = gen_label_rtx ();
694 do_compare_rtx_and_jump (op1, const0_rtx,
695 code == MINUS_EXPR ? GE : LT, false, mode,
696 NULL_RTX, NULL, do_ior_label,
697 PROB_EVEN);
698 tem = expand_binop (mode, and_optab, op0, res, NULL_RTX, false,
699 OPTAB_LIB_WIDEN);
700 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
701 NULL, done_label, PROB_VERY_LIKELY);
702 emit_jump (do_error);
703 emit_label (do_ior_label);
704 tem = expand_binop (mode, ior_optab, op0, res, NULL_RTX, false,
705 OPTAB_LIB_WIDEN);
706 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
707 NULL, done_label, PROB_VERY_LIKELY);
708 }
709 goto do_error_label;
710 }
711
712 /* u1 - u2 -> sr */
713 if (code == MINUS_EXPR && uns0_p && uns1_p && !unsr_p)
714 {
715 /* Compute the operation. On RTL level, the addition is always
716 unsigned. */
717 res = expand_binop (mode, sub_optab, op0, op1, NULL_RTX, false,
718 OPTAB_LIB_WIDEN);
719 rtx_code_label *op0_geu_op1 = gen_label_rtx ();
720 do_compare_rtx_and_jump (op0, op1, GEU, true, mode, NULL_RTX, NULL,
721 op0_geu_op1, PROB_EVEN);
722 do_compare_rtx_and_jump (res, const0_rtx, LT, false, mode, NULL_RTX,
723 NULL, done_label, PROB_VERY_LIKELY);
724 emit_jump (do_error);
725 emit_label (op0_geu_op1);
726 do_compare_rtx_and_jump (res, const0_rtx, GE, false, mode, NULL_RTX,
727 NULL, done_label, PROB_VERY_LIKELY);
728 goto do_error_label;
729 }
730
731 gcc_assert (!uns0_p && !uns1_p && !unsr_p);
732
733 /* s1 +- s2 -> sr */
734 do_signed: ;
735 enum insn_code icode;
736 icode = optab_handler (code == PLUS_EXPR ? addv4_optab : subv4_optab, mode);
737 if (icode != CODE_FOR_nothing)
738 {
739 struct expand_operand ops[4];
740 rtx_insn *last = get_last_insn ();
741
742 res = gen_reg_rtx (mode);
743 create_output_operand (&ops[0], res, mode);
744 create_input_operand (&ops[1], op0, mode);
745 create_input_operand (&ops[2], op1, mode);
746 create_fixed_operand (&ops[3], do_error);
747 if (maybe_expand_insn (icode, 4, ops))
748 {
749 last = get_last_insn ();
750 if (profile_status_for_fn (cfun) != PROFILE_ABSENT
751 && JUMP_P (last)
752 && any_condjump_p (last)
753 && !find_reg_note (last, REG_BR_PROB, 0))
754 add_int_reg_note (last, REG_BR_PROB, PROB_VERY_UNLIKELY);
755 emit_jump (done_label);
756 }
757 else
758 {
759 delete_insns_since (last);
760 icode = CODE_FOR_nothing;
761 }
762 }
763
764 if (icode == CODE_FOR_nothing)
765 {
766 rtx_code_label *sub_check = gen_label_rtx ();
767 int pos_neg = 3;
768
769 /* Compute the operation. On RTL level, the addition is always
770 unsigned. */
771 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
772 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
773
774 /* If we can prove one of the arguments (for MINUS_EXPR only
775 the second operand, as subtraction is not commutative) is always
776 non-negative or always negative, we can do just one comparison
777 and conditional jump instead of 2 at runtime, 3 present in the
778 emitted code. If one of the arguments is CONST_INT, all we
779 need is to make sure it is op1, then the first
780 do_compare_rtx_and_jump will be just folded. Otherwise try
781 to use range info if available. */
782 if (code == PLUS_EXPR && CONST_INT_P (op0))
783 std::swap (op0, op1);
784 else if (CONST_INT_P (op1))
785 ;
786 else if (code == PLUS_EXPR && TREE_CODE (arg0) == SSA_NAME)
787 {
788 pos_neg = get_range_pos_neg (arg0);
789 if (pos_neg != 3)
790 std::swap (op0, op1);
791 }
792 if (pos_neg == 3 && !CONST_INT_P (op1) && TREE_CODE (arg1) == SSA_NAME)
793 pos_neg = get_range_pos_neg (arg1);
794
795 /* If the op1 is negative, we have to use a different check. */
796 if (pos_neg == 3)
797 do_compare_rtx_and_jump (op1, const0_rtx, LT, false, mode, NULL_RTX,
798 NULL, sub_check, PROB_EVEN);
799
800 /* Compare the result of the operation with one of the operands. */
801 if (pos_neg & 1)
802 do_compare_rtx_and_jump (res, op0, code == PLUS_EXPR ? GE : LE,
803 false, mode, NULL_RTX, NULL, done_label,
804 PROB_VERY_LIKELY);
805
806 /* If we get here, we have to print the error. */
807 if (pos_neg == 3)
808 {
809 emit_jump (do_error);
810
811 emit_label (sub_check);
812 }
813
814 /* We have k = a + b for b < 0 here. k <= a must hold. */
815 if (pos_neg & 2)
816 do_compare_rtx_and_jump (res, op0, code == PLUS_EXPR ? LE : GE,
817 false, mode, NULL_RTX, NULL, done_label,
818 PROB_VERY_LIKELY);
819 }
820
821 do_error_label:
822 emit_label (do_error);
823 if (is_ubsan)
824 {
825 /* Expand the ubsan builtin call. */
826 push_temp_slots ();
827 fn = ubsan_build_overflow_builtin (code, loc, TREE_TYPE (arg0),
828 arg0, arg1);
829 expand_normal (fn);
830 pop_temp_slots ();
831 do_pending_stack_adjust ();
832 }
833 else if (lhs)
834 write_complex_part (target, const1_rtx, true);
835
836 /* We're done. */
837 emit_label (done_label);
838
839 if (lhs)
840 {
841 if (is_ubsan)
842 expand_ubsan_result_store (target, res);
843 else
844 {
845 if (do_xor)
846 res = expand_binop (mode, add_optab, res, sgn, NULL_RTX, false,
847 OPTAB_LIB_WIDEN);
848
849 expand_arith_overflow_result_store (lhs, target, mode, res);
850 }
851 }
852 }
853
854 /* Add negate overflow checking to the statement STMT. */
855
856 static void
857 expand_neg_overflow (location_t loc, tree lhs, tree arg1, bool is_ubsan)
858 {
859 rtx res, op1;
860 tree fn;
861 rtx_code_label *done_label, *do_error;
862 rtx target = NULL_RTX;
863
864 done_label = gen_label_rtx ();
865 do_error = gen_label_rtx ();
866
867 do_pending_stack_adjust ();
868 op1 = expand_normal (arg1);
869
870 machine_mode mode = TYPE_MODE (TREE_TYPE (arg1));
871 if (lhs)
872 {
873 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
874 if (!is_ubsan)
875 write_complex_part (target, const0_rtx, true);
876 }
877
878 enum insn_code icode = optab_handler (negv3_optab, mode);
879 if (icode != CODE_FOR_nothing)
880 {
881 struct expand_operand ops[3];
882 rtx_insn *last = get_last_insn ();
883
884 res = gen_reg_rtx (mode);
885 create_output_operand (&ops[0], res, mode);
886 create_input_operand (&ops[1], op1, mode);
887 create_fixed_operand (&ops[2], do_error);
888 if (maybe_expand_insn (icode, 3, ops))
889 {
890 last = get_last_insn ();
891 if (profile_status_for_fn (cfun) != PROFILE_ABSENT
892 && JUMP_P (last)
893 && any_condjump_p (last)
894 && !find_reg_note (last, REG_BR_PROB, 0))
895 add_int_reg_note (last, REG_BR_PROB, PROB_VERY_UNLIKELY);
896 emit_jump (done_label);
897 }
898 else
899 {
900 delete_insns_since (last);
901 icode = CODE_FOR_nothing;
902 }
903 }
904
905 if (icode == CODE_FOR_nothing)
906 {
907 /* Compute the operation. On RTL level, the addition is always
908 unsigned. */
909 res = expand_unop (mode, neg_optab, op1, NULL_RTX, false);
910
911 /* Compare the operand with the most negative value. */
912 rtx minv = expand_normal (TYPE_MIN_VALUE (TREE_TYPE (arg1)));
913 do_compare_rtx_and_jump (op1, minv, NE, true, mode, NULL_RTX, NULL,
914 done_label, PROB_VERY_LIKELY);
915 }
916
917 emit_label (do_error);
918 if (is_ubsan)
919 {
920 /* Expand the ubsan builtin call. */
921 push_temp_slots ();
922 fn = ubsan_build_overflow_builtin (NEGATE_EXPR, loc, TREE_TYPE (arg1),
923 arg1, NULL_TREE);
924 expand_normal (fn);
925 pop_temp_slots ();
926 do_pending_stack_adjust ();
927 }
928 else if (lhs)
929 write_complex_part (target, const1_rtx, true);
930
931 /* We're done. */
932 emit_label (done_label);
933
934 if (lhs)
935 {
936 if (is_ubsan)
937 expand_ubsan_result_store (target, res);
938 else
939 expand_arith_overflow_result_store (lhs, target, mode, res);
940 }
941 }
942
943 /* Add mul overflow checking to the statement STMT. */
944
945 static void
946 expand_mul_overflow (location_t loc, tree lhs, tree arg0, tree arg1,
947 bool unsr_p, bool uns0_p, bool uns1_p, bool is_ubsan)
948 {
949 rtx res, op0, op1;
950 tree fn, type;
951 rtx_code_label *done_label, *do_error;
952 rtx target = NULL_RTX;
953 signop sign;
954 enum insn_code icode;
955
956 done_label = gen_label_rtx ();
957 do_error = gen_label_rtx ();
958
959 do_pending_stack_adjust ();
960 op0 = expand_normal (arg0);
961 op1 = expand_normal (arg1);
962
963 machine_mode mode = TYPE_MODE (TREE_TYPE (arg0));
964 bool uns = unsr_p;
965 if (lhs)
966 {
967 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
968 if (!is_ubsan)
969 write_complex_part (target, const0_rtx, true);
970 }
971
972 if (is_ubsan)
973 gcc_assert (!unsr_p && !uns0_p && !uns1_p);
974
975 /* We assume both operands and result have the same precision
976 here (GET_MODE_BITSIZE (mode)), S stands for signed type
977 with that precision, U for unsigned type with that precision,
978 sgn for unsigned most significant bit in that precision.
979 s1 is signed first operand, u1 is unsigned first operand,
980 s2 is signed second operand, u2 is unsigned second operand,
981 sr is signed result, ur is unsigned result and the following
982 rules say how to compute result (which is always result of
983 the operands as if both were unsigned, cast to the right
984 signedness) and how to compute whether operation overflowed.
985 main_ovf (false) stands for jump on signed multiplication
986 overflow or the main algorithm with uns == false.
987 main_ovf (true) stands for jump on unsigned multiplication
988 overflow or the main algorithm with uns == true.
989
990 s1 * s2 -> sr
991 res = (S) ((U) s1 * (U) s2)
992 ovf = main_ovf (false)
993 u1 * u2 -> ur
994 res = u1 * u2
995 ovf = main_ovf (true)
996 s1 * u2 -> ur
997 res = (U) s1 * u2
998 ovf = (s1 < 0 && u2) || main_ovf (true)
999 u1 * u2 -> sr
1000 res = (S) (u1 * u2)
1001 ovf = res < 0 || main_ovf (true)
1002 s1 * u2 -> sr
1003 res = (S) ((U) s1 * u2)
1004 ovf = (S) u2 >= 0 ? main_ovf (false)
1005 : (s1 != 0 && (s1 != -1 || u2 != (U) res))
1006 s1 * s2 -> ur
1007 t1 = (s1 & s2) < 0 ? (-(U) s1) : ((U) s1)
1008 t2 = (s1 & s2) < 0 ? (-(U) s2) : ((U) s2)
1009 res = t1 * t2
1010 ovf = (s1 ^ s2) < 0 ? (s1 && s2) : main_ovf (true) */
1011
1012 if (uns0_p && !uns1_p)
1013 {
1014 /* Multiplication is commutative, if operand signedness differs,
1015 canonicalize to the first operand being signed and second
1016 unsigned to simplify following code. */
1017 std::swap (op0, op1);
1018 std::swap (arg0, arg1);
1019 uns0_p = false;
1020 uns1_p = true;
1021 }
1022
1023 int pos_neg0 = get_range_pos_neg (arg0);
1024 int pos_neg1 = get_range_pos_neg (arg1);
1025
1026 /* s1 * u2 -> ur */
1027 if (!uns0_p && uns1_p && unsr_p)
1028 {
1029 switch (pos_neg0)
1030 {
1031 case 1:
1032 /* If s1 is non-negative, just perform normal u1 * u2 -> ur. */
1033 goto do_main;
1034 case 2:
1035 /* If s1 is negative, avoid the main code, just multiply and
1036 signal overflow if op1 is not 0. */
1037 struct separate_ops ops;
1038 ops.code = MULT_EXPR;
1039 ops.type = TREE_TYPE (arg1);
1040 ops.op0 = make_tree (ops.type, op0);
1041 ops.op1 = make_tree (ops.type, op1);
1042 ops.op2 = NULL_TREE;
1043 ops.location = loc;
1044 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1045 do_compare_rtx_and_jump (op1, const0_rtx, EQ, true, mode, NULL_RTX,
1046 NULL, done_label, PROB_VERY_LIKELY);
1047 goto do_error_label;
1048 case 3:
1049 rtx_code_label *do_main_label;
1050 do_main_label = gen_label_rtx ();
1051 do_compare_rtx_and_jump (op0, const0_rtx, GE, false, mode, NULL_RTX,
1052 NULL, do_main_label, PROB_VERY_LIKELY);
1053 do_compare_rtx_and_jump (op1, const0_rtx, EQ, true, mode, NULL_RTX,
1054 NULL, do_main_label, PROB_VERY_LIKELY);
1055 write_complex_part (target, const1_rtx, true);
1056 emit_label (do_main_label);
1057 goto do_main;
1058 default:
1059 gcc_unreachable ();
1060 }
1061 }
1062
1063 /* u1 * u2 -> sr */
1064 if (uns0_p && uns1_p && !unsr_p)
1065 {
1066 uns = true;
1067 /* Rest of handling of this case after res is computed. */
1068 goto do_main;
1069 }
1070
1071 /* s1 * u2 -> sr */
1072 if (!uns0_p && uns1_p && !unsr_p)
1073 {
1074 switch (pos_neg1)
1075 {
1076 case 1:
1077 goto do_main;
1078 case 2:
1079 /* If (S) u2 is negative (i.e. u2 is larger than maximum of S,
1080 avoid the main code, just multiply and signal overflow
1081 unless 0 * u2 or -1 * ((U) Smin). */
1082 struct separate_ops ops;
1083 ops.code = MULT_EXPR;
1084 ops.type = TREE_TYPE (arg1);
1085 ops.op0 = make_tree (ops.type, op0);
1086 ops.op1 = make_tree (ops.type, op1);
1087 ops.op2 = NULL_TREE;
1088 ops.location = loc;
1089 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1090 do_compare_rtx_and_jump (op0, const0_rtx, EQ, true, mode, NULL_RTX,
1091 NULL, done_label, PROB_VERY_LIKELY);
1092 do_compare_rtx_and_jump (op0, constm1_rtx, NE, true, mode, NULL_RTX,
1093 NULL, do_error, PROB_VERY_UNLIKELY);
1094 int prec;
1095 prec = GET_MODE_PRECISION (mode);
1096 rtx sgn;
1097 sgn = immed_wide_int_const (wi::min_value (prec, SIGNED), mode);
1098 do_compare_rtx_and_jump (op1, sgn, EQ, true, mode, NULL_RTX,
1099 NULL, done_label, PROB_VERY_LIKELY);
1100 goto do_error_label;
1101 case 3:
1102 /* Rest of handling of this case after res is computed. */
1103 goto do_main;
1104 default:
1105 gcc_unreachable ();
1106 }
1107 }
1108
1109 /* s1 * s2 -> ur */
1110 if (!uns0_p && !uns1_p && unsr_p)
1111 {
1112 rtx tem, tem2;
1113 switch (pos_neg0 | pos_neg1)
1114 {
1115 case 1: /* Both operands known to be non-negative. */
1116 goto do_main;
1117 case 2: /* Both operands known to be negative. */
1118 op0 = expand_unop (mode, neg_optab, op0, NULL_RTX, false);
1119 op1 = expand_unop (mode, neg_optab, op1, NULL_RTX, false);
1120 /* Avoid looking at arg0/arg1 ranges, as we've changed
1121 the arguments. */
1122 arg0 = error_mark_node;
1123 arg1 = error_mark_node;
1124 goto do_main;
1125 case 3:
1126 if ((pos_neg0 ^ pos_neg1) == 3)
1127 {
1128 /* If one operand is known to be negative and the other
1129 non-negative, this overflows always, unless the non-negative
1130 one is 0. Just do normal multiply and set overflow
1131 unless one of the operands is 0. */
1132 struct separate_ops ops;
1133 ops.code = MULT_EXPR;
1134 ops.type
1135 = build_nonstandard_integer_type (GET_MODE_PRECISION (mode),
1136 1);
1137 ops.op0 = make_tree (ops.type, op0);
1138 ops.op1 = make_tree (ops.type, op1);
1139 ops.op2 = NULL_TREE;
1140 ops.location = loc;
1141 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1142 tem = expand_binop (mode, and_optab, op0, op1, NULL_RTX, false,
1143 OPTAB_LIB_WIDEN);
1144 do_compare_rtx_and_jump (tem, const0_rtx, EQ, true, mode,
1145 NULL_RTX, NULL, done_label,
1146 PROB_VERY_LIKELY);
1147 goto do_error_label;
1148 }
1149 /* The general case, do all the needed comparisons at runtime. */
1150 rtx_code_label *do_main_label, *after_negate_label;
1151 rtx rop0, rop1;
1152 rop0 = gen_reg_rtx (mode);
1153 rop1 = gen_reg_rtx (mode);
1154 emit_move_insn (rop0, op0);
1155 emit_move_insn (rop1, op1);
1156 op0 = rop0;
1157 op1 = rop1;
1158 do_main_label = gen_label_rtx ();
1159 after_negate_label = gen_label_rtx ();
1160 tem = expand_binop (mode, and_optab, op0, op1, NULL_RTX, false,
1161 OPTAB_LIB_WIDEN);
1162 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
1163 NULL, after_negate_label, PROB_VERY_LIKELY);
1164 /* Both arguments negative here, negate them and continue with
1165 normal unsigned overflow checking multiplication. */
1166 emit_move_insn (op0, expand_unop (mode, neg_optab, op0,
1167 NULL_RTX, false));
1168 emit_move_insn (op1, expand_unop (mode, neg_optab, op1,
1169 NULL_RTX, false));
1170 /* Avoid looking at arg0/arg1 ranges, as we might have changed
1171 the arguments. */
1172 arg0 = error_mark_node;
1173 arg1 = error_mark_node;
1174 emit_jump (do_main_label);
1175 emit_label (after_negate_label);
1176 tem2 = expand_binop (mode, xor_optab, op0, op1, NULL_RTX, false,
1177 OPTAB_LIB_WIDEN);
1178 do_compare_rtx_and_jump (tem2, const0_rtx, GE, false, mode, NULL_RTX,
1179 NULL, do_main_label, PROB_VERY_LIKELY);
1180 /* One argument is negative here, the other positive. This
1181 overflows always, unless one of the arguments is 0. But
1182 if e.g. s2 is 0, (U) s1 * 0 doesn't overflow, whatever s1
1183 is, thus we can keep do_main code oring in overflow as is. */
1184 do_compare_rtx_and_jump (tem, const0_rtx, EQ, true, mode, NULL_RTX,
1185 NULL, do_main_label, PROB_VERY_LIKELY);
1186 write_complex_part (target, const1_rtx, true);
1187 emit_label (do_main_label);
1188 goto do_main;
1189 default:
1190 gcc_unreachable ();
1191 }
1192 }
1193
1194 do_main:
1195 type = build_nonstandard_integer_type (GET_MODE_PRECISION (mode), uns);
1196 sign = uns ? UNSIGNED : SIGNED;
1197 icode = optab_handler (uns ? umulv4_optab : mulv4_optab, mode);
1198 if (icode != CODE_FOR_nothing)
1199 {
1200 struct expand_operand ops[4];
1201 rtx_insn *last = get_last_insn ();
1202
1203 res = gen_reg_rtx (mode);
1204 create_output_operand (&ops[0], res, mode);
1205 create_input_operand (&ops[1], op0, mode);
1206 create_input_operand (&ops[2], op1, mode);
1207 create_fixed_operand (&ops[3], do_error);
1208 if (maybe_expand_insn (icode, 4, ops))
1209 {
1210 last = get_last_insn ();
1211 if (profile_status_for_fn (cfun) != PROFILE_ABSENT
1212 && JUMP_P (last)
1213 && any_condjump_p (last)
1214 && !find_reg_note (last, REG_BR_PROB, 0))
1215 add_int_reg_note (last, REG_BR_PROB, PROB_VERY_UNLIKELY);
1216 emit_jump (done_label);
1217 }
1218 else
1219 {
1220 delete_insns_since (last);
1221 icode = CODE_FOR_nothing;
1222 }
1223 }
1224
1225 if (icode == CODE_FOR_nothing)
1226 {
1227 struct separate_ops ops;
1228 int prec = GET_MODE_PRECISION (mode);
1229 machine_mode hmode = mode_for_size (prec / 2, MODE_INT, 1);
1230 ops.op0 = make_tree (type, op0);
1231 ops.op1 = make_tree (type, op1);
1232 ops.op2 = NULL_TREE;
1233 ops.location = loc;
1234 if (GET_MODE_2XWIDER_MODE (mode) != VOIDmode
1235 && targetm.scalar_mode_supported_p (GET_MODE_2XWIDER_MODE (mode)))
1236 {
1237 machine_mode wmode = GET_MODE_2XWIDER_MODE (mode);
1238 ops.code = WIDEN_MULT_EXPR;
1239 ops.type
1240 = build_nonstandard_integer_type (GET_MODE_PRECISION (wmode), uns);
1241
1242 res = expand_expr_real_2 (&ops, NULL_RTX, wmode, EXPAND_NORMAL);
1243 rtx hipart = expand_shift (RSHIFT_EXPR, wmode, res, prec,
1244 NULL_RTX, uns);
1245 hipart = gen_lowpart (mode, hipart);
1246 res = gen_lowpart (mode, res);
1247 if (uns)
1248 /* For the unsigned multiplication, there was overflow if
1249 HIPART is non-zero. */
1250 do_compare_rtx_and_jump (hipart, const0_rtx, EQ, true, mode,
1251 NULL_RTX, NULL, done_label,
1252 PROB_VERY_LIKELY);
1253 else
1254 {
1255 rtx signbit = expand_shift (RSHIFT_EXPR, mode, res, prec - 1,
1256 NULL_RTX, 0);
1257 /* RES is low half of the double width result, HIPART
1258 the high half. There was overflow if
1259 HIPART is different from RES < 0 ? -1 : 0. */
1260 do_compare_rtx_and_jump (signbit, hipart, EQ, true, mode,
1261 NULL_RTX, NULL, done_label,
1262 PROB_VERY_LIKELY);
1263 }
1264 }
1265 else if (hmode != BLKmode && 2 * GET_MODE_PRECISION (hmode) == prec)
1266 {
1267 rtx_code_label *large_op0 = gen_label_rtx ();
1268 rtx_code_label *small_op0_large_op1 = gen_label_rtx ();
1269 rtx_code_label *one_small_one_large = gen_label_rtx ();
1270 rtx_code_label *both_ops_large = gen_label_rtx ();
1271 rtx_code_label *after_hipart_neg = uns ? NULL : gen_label_rtx ();
1272 rtx_code_label *after_lopart_neg = uns ? NULL : gen_label_rtx ();
1273 rtx_code_label *do_overflow = gen_label_rtx ();
1274 rtx_code_label *hipart_different = uns ? NULL : gen_label_rtx ();
1275
1276 unsigned int hprec = GET_MODE_PRECISION (hmode);
1277 rtx hipart0 = expand_shift (RSHIFT_EXPR, mode, op0, hprec,
1278 NULL_RTX, uns);
1279 hipart0 = gen_lowpart (hmode, hipart0);
1280 rtx lopart0 = gen_lowpart (hmode, op0);
1281 rtx signbit0 = const0_rtx;
1282 if (!uns)
1283 signbit0 = expand_shift (RSHIFT_EXPR, hmode, lopart0, hprec - 1,
1284 NULL_RTX, 0);
1285 rtx hipart1 = expand_shift (RSHIFT_EXPR, mode, op1, hprec,
1286 NULL_RTX, uns);
1287 hipart1 = gen_lowpart (hmode, hipart1);
1288 rtx lopart1 = gen_lowpart (hmode, op1);
1289 rtx signbit1 = const0_rtx;
1290 if (!uns)
1291 signbit1 = expand_shift (RSHIFT_EXPR, hmode, lopart1, hprec - 1,
1292 NULL_RTX, 0);
1293
1294 res = gen_reg_rtx (mode);
1295
1296 /* True if op0 resp. op1 are known to be in the range of
1297 halfstype. */
1298 bool op0_small_p = false;
1299 bool op1_small_p = false;
1300 /* True if op0 resp. op1 are known to have all zeros or all ones
1301 in the upper half of bits, but are not known to be
1302 op{0,1}_small_p. */
1303 bool op0_medium_p = false;
1304 bool op1_medium_p = false;
1305 /* -1 if op{0,1} is known to be negative, 0 if it is known to be
1306 nonnegative, 1 if unknown. */
1307 int op0_sign = 1;
1308 int op1_sign = 1;
1309
1310 if (pos_neg0 == 1)
1311 op0_sign = 0;
1312 else if (pos_neg0 == 2)
1313 op0_sign = -1;
1314 if (pos_neg1 == 1)
1315 op1_sign = 0;
1316 else if (pos_neg1 == 2)
1317 op1_sign = -1;
1318
1319 unsigned int mprec0 = prec;
1320 if (arg0 != error_mark_node)
1321 mprec0 = get_min_precision (arg0, sign);
1322 if (mprec0 <= hprec)
1323 op0_small_p = true;
1324 else if (!uns && mprec0 <= hprec + 1)
1325 op0_medium_p = true;
1326 unsigned int mprec1 = prec;
1327 if (arg1 != error_mark_node)
1328 mprec1 = get_min_precision (arg1, sign);
1329 if (mprec1 <= hprec)
1330 op1_small_p = true;
1331 else if (!uns && mprec1 <= hprec + 1)
1332 op1_medium_p = true;
1333
1334 int smaller_sign = 1;
1335 int larger_sign = 1;
1336 if (op0_small_p)
1337 {
1338 smaller_sign = op0_sign;
1339 larger_sign = op1_sign;
1340 }
1341 else if (op1_small_p)
1342 {
1343 smaller_sign = op1_sign;
1344 larger_sign = op0_sign;
1345 }
1346 else if (op0_sign == op1_sign)
1347 {
1348 smaller_sign = op0_sign;
1349 larger_sign = op0_sign;
1350 }
1351
1352 if (!op0_small_p)
1353 do_compare_rtx_and_jump (signbit0, hipart0, NE, true, hmode,
1354 NULL_RTX, NULL, large_op0,
1355 PROB_UNLIKELY);
1356
1357 if (!op1_small_p)
1358 do_compare_rtx_and_jump (signbit1, hipart1, NE, true, hmode,
1359 NULL_RTX, NULL, small_op0_large_op1,
1360 PROB_UNLIKELY);
1361
1362 /* If both op0 and op1 are sign (!uns) or zero (uns) extended from
1363 hmode to mode, the multiplication will never overflow. We can
1364 do just one hmode x hmode => mode widening multiplication. */
1365 rtx lopart0s = lopart0, lopart1s = lopart1;
1366 if (GET_CODE (lopart0) == SUBREG)
1367 {
1368 lopart0s = shallow_copy_rtx (lopart0);
1369 SUBREG_PROMOTED_VAR_P (lopart0s) = 1;
1370 SUBREG_PROMOTED_SET (lopart0s, uns ? SRP_UNSIGNED : SRP_SIGNED);
1371 }
1372 if (GET_CODE (lopart1) == SUBREG)
1373 {
1374 lopart1s = shallow_copy_rtx (lopart1);
1375 SUBREG_PROMOTED_VAR_P (lopart1s) = 1;
1376 SUBREG_PROMOTED_SET (lopart1s, uns ? SRP_UNSIGNED : SRP_SIGNED);
1377 }
1378 tree halfstype = build_nonstandard_integer_type (hprec, uns);
1379 ops.op0 = make_tree (halfstype, lopart0s);
1380 ops.op1 = make_tree (halfstype, lopart1s);
1381 ops.code = WIDEN_MULT_EXPR;
1382 ops.type = type;
1383 rtx thisres
1384 = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1385 emit_move_insn (res, thisres);
1386 emit_jump (done_label);
1387
1388 emit_label (small_op0_large_op1);
1389
1390 /* If op0 is sign (!uns) or zero (uns) extended from hmode to mode,
1391 but op1 is not, just swap the arguments and handle it as op1
1392 sign/zero extended, op0 not. */
1393 rtx larger = gen_reg_rtx (mode);
1394 rtx hipart = gen_reg_rtx (hmode);
1395 rtx lopart = gen_reg_rtx (hmode);
1396 emit_move_insn (larger, op1);
1397 emit_move_insn (hipart, hipart1);
1398 emit_move_insn (lopart, lopart0);
1399 emit_jump (one_small_one_large);
1400
1401 emit_label (large_op0);
1402
1403 if (!op1_small_p)
1404 do_compare_rtx_and_jump (signbit1, hipart1, NE, true, hmode,
1405 NULL_RTX, NULL, both_ops_large,
1406 PROB_UNLIKELY);
1407
1408 /* If op1 is sign (!uns) or zero (uns) extended from hmode to mode,
1409 but op0 is not, prepare larger, hipart and lopart pseudos and
1410 handle it together with small_op0_large_op1. */
1411 emit_move_insn (larger, op0);
1412 emit_move_insn (hipart, hipart0);
1413 emit_move_insn (lopart, lopart1);
1414
1415 emit_label (one_small_one_large);
1416
1417 /* lopart is the low part of the operand that is sign extended
1418 to mode, larger is the other operand, hipart is the
1419 high part of larger and lopart0 and lopart1 are the low parts
1420 of both operands.
1421 We perform lopart0 * lopart1 and lopart * hipart widening
1422 multiplications. */
1423 tree halfutype = build_nonstandard_integer_type (hprec, 1);
1424 ops.op0 = make_tree (halfutype, lopart0);
1425 ops.op1 = make_tree (halfutype, lopart1);
1426 rtx lo0xlo1
1427 = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1428
1429 ops.op0 = make_tree (halfutype, lopart);
1430 ops.op1 = make_tree (halfutype, hipart);
1431 rtx loxhi = gen_reg_rtx (mode);
1432 rtx tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1433 emit_move_insn (loxhi, tem);
1434
1435 if (!uns)
1436 {
1437 /* if (hipart < 0) loxhi -= lopart << (bitsize / 2); */
1438 if (larger_sign == 0)
1439 emit_jump (after_hipart_neg);
1440 else if (larger_sign != -1)
1441 do_compare_rtx_and_jump (hipart, const0_rtx, GE, false, hmode,
1442 NULL_RTX, NULL, after_hipart_neg,
1443 PROB_EVEN);
1444
1445 tem = convert_modes (mode, hmode, lopart, 1);
1446 tem = expand_shift (LSHIFT_EXPR, mode, tem, hprec, NULL_RTX, 1);
1447 tem = expand_simple_binop (mode, MINUS, loxhi, tem, NULL_RTX,
1448 1, OPTAB_DIRECT);
1449 emit_move_insn (loxhi, tem);
1450
1451 emit_label (after_hipart_neg);
1452
1453 /* if (lopart < 0) loxhi -= larger; */
1454 if (smaller_sign == 0)
1455 emit_jump (after_lopart_neg);
1456 else if (smaller_sign != -1)
1457 do_compare_rtx_and_jump (lopart, const0_rtx, GE, false, hmode,
1458 NULL_RTX, NULL, after_lopart_neg,
1459 PROB_EVEN);
1460
1461 tem = expand_simple_binop (mode, MINUS, loxhi, larger, NULL_RTX,
1462 1, OPTAB_DIRECT);
1463 emit_move_insn (loxhi, tem);
1464
1465 emit_label (after_lopart_neg);
1466 }
1467
1468 /* loxhi += (uns) lo0xlo1 >> (bitsize / 2); */
1469 tem = expand_shift (RSHIFT_EXPR, mode, lo0xlo1, hprec, NULL_RTX, 1);
1470 tem = expand_simple_binop (mode, PLUS, loxhi, tem, NULL_RTX,
1471 1, OPTAB_DIRECT);
1472 emit_move_insn (loxhi, tem);
1473
1474 /* if (loxhi >> (bitsize / 2)
1475 == (hmode) loxhi >> (bitsize / 2 - 1)) (if !uns)
1476 if (loxhi >> (bitsize / 2) == 0 (if uns). */
1477 rtx hipartloxhi = expand_shift (RSHIFT_EXPR, mode, loxhi, hprec,
1478 NULL_RTX, 0);
1479 hipartloxhi = gen_lowpart (hmode, hipartloxhi);
1480 rtx signbitloxhi = const0_rtx;
1481 if (!uns)
1482 signbitloxhi = expand_shift (RSHIFT_EXPR, hmode,
1483 gen_lowpart (hmode, loxhi),
1484 hprec - 1, NULL_RTX, 0);
1485
1486 do_compare_rtx_and_jump (signbitloxhi, hipartloxhi, NE, true, hmode,
1487 NULL_RTX, NULL, do_overflow,
1488 PROB_VERY_UNLIKELY);
1489
1490 /* res = (loxhi << (bitsize / 2)) | (hmode) lo0xlo1; */
1491 rtx loxhishifted = expand_shift (LSHIFT_EXPR, mode, loxhi, hprec,
1492 NULL_RTX, 1);
1493 tem = convert_modes (mode, hmode, gen_lowpart (hmode, lo0xlo1), 1);
1494
1495 tem = expand_simple_binop (mode, IOR, loxhishifted, tem, res,
1496 1, OPTAB_DIRECT);
1497 if (tem != res)
1498 emit_move_insn (res, tem);
1499 emit_jump (done_label);
1500
1501 emit_label (both_ops_large);
1502
1503 /* If both operands are large (not sign (!uns) or zero (uns)
1504 extended from hmode), then perform the full multiplication
1505 which will be the result of the operation.
1506 The only cases which don't overflow are for signed multiplication
1507 some cases where both hipart0 and highpart1 are 0 or -1.
1508 For unsigned multiplication when high parts are both non-zero
1509 this overflows always. */
1510 ops.code = MULT_EXPR;
1511 ops.op0 = make_tree (type, op0);
1512 ops.op1 = make_tree (type, op1);
1513 tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1514 emit_move_insn (res, tem);
1515
1516 if (!uns)
1517 {
1518 if (!op0_medium_p)
1519 {
1520 tem = expand_simple_binop (hmode, PLUS, hipart0, const1_rtx,
1521 NULL_RTX, 1, OPTAB_DIRECT);
1522 do_compare_rtx_and_jump (tem, const1_rtx, GTU, true, hmode,
1523 NULL_RTX, NULL, do_error,
1524 PROB_VERY_UNLIKELY);
1525 }
1526
1527 if (!op1_medium_p)
1528 {
1529 tem = expand_simple_binop (hmode, PLUS, hipart1, const1_rtx,
1530 NULL_RTX, 1, OPTAB_DIRECT);
1531 do_compare_rtx_and_jump (tem, const1_rtx, GTU, true, hmode,
1532 NULL_RTX, NULL, do_error,
1533 PROB_VERY_UNLIKELY);
1534 }
1535
1536 /* At this point hipart{0,1} are both in [-1, 0]. If they are
1537 the same, overflow happened if res is negative, if they are
1538 different, overflow happened if res is positive. */
1539 if (op0_sign != 1 && op1_sign != 1 && op0_sign != op1_sign)
1540 emit_jump (hipart_different);
1541 else if (op0_sign == 1 || op1_sign == 1)
1542 do_compare_rtx_and_jump (hipart0, hipart1, NE, true, hmode,
1543 NULL_RTX, NULL, hipart_different,
1544 PROB_EVEN);
1545
1546 do_compare_rtx_and_jump (res, const0_rtx, LT, false, mode,
1547 NULL_RTX, NULL, do_error,
1548 PROB_VERY_UNLIKELY);
1549 emit_jump (done_label);
1550
1551 emit_label (hipart_different);
1552
1553 do_compare_rtx_and_jump (res, const0_rtx, GE, false, mode,
1554 NULL_RTX, NULL, do_error,
1555 PROB_VERY_UNLIKELY);
1556 emit_jump (done_label);
1557 }
1558
1559 emit_label (do_overflow);
1560
1561 /* Overflow, do full multiplication and fallthru into do_error. */
1562 ops.op0 = make_tree (type, op0);
1563 ops.op1 = make_tree (type, op1);
1564 tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1565 emit_move_insn (res, tem);
1566 }
1567 else
1568 {
1569 gcc_assert (!is_ubsan);
1570 ops.code = MULT_EXPR;
1571 ops.type = type;
1572 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1573 emit_jump (done_label);
1574 }
1575 }
1576
1577 do_error_label:
1578 emit_label (do_error);
1579 if (is_ubsan)
1580 {
1581 /* Expand the ubsan builtin call. */
1582 push_temp_slots ();
1583 fn = ubsan_build_overflow_builtin (MULT_EXPR, loc, TREE_TYPE (arg0),
1584 arg0, arg1);
1585 expand_normal (fn);
1586 pop_temp_slots ();
1587 do_pending_stack_adjust ();
1588 }
1589 else if (lhs)
1590 write_complex_part (target, const1_rtx, true);
1591
1592 /* We're done. */
1593 emit_label (done_label);
1594
1595 /* u1 * u2 -> sr */
1596 if (uns0_p && uns1_p && !unsr_p)
1597 {
1598 rtx_code_label *all_done_label = gen_label_rtx ();
1599 do_compare_rtx_and_jump (res, const0_rtx, GE, false, mode, NULL_RTX,
1600 NULL, all_done_label, PROB_VERY_LIKELY);
1601 write_complex_part (target, const1_rtx, true);
1602 emit_label (all_done_label);
1603 }
1604
1605 /* s1 * u2 -> sr */
1606 if (!uns0_p && uns1_p && !unsr_p && pos_neg1 == 3)
1607 {
1608 rtx_code_label *all_done_label = gen_label_rtx ();
1609 rtx_code_label *set_noovf = gen_label_rtx ();
1610 do_compare_rtx_and_jump (op1, const0_rtx, GE, false, mode, NULL_RTX,
1611 NULL, all_done_label, PROB_VERY_LIKELY);
1612 write_complex_part (target, const1_rtx, true);
1613 do_compare_rtx_and_jump (op0, const0_rtx, EQ, true, mode, NULL_RTX,
1614 NULL, set_noovf, PROB_VERY_LIKELY);
1615 do_compare_rtx_and_jump (op0, constm1_rtx, NE, true, mode, NULL_RTX,
1616 NULL, all_done_label, PROB_VERY_UNLIKELY);
1617 do_compare_rtx_and_jump (op1, res, NE, true, mode, NULL_RTX, NULL,
1618 all_done_label, PROB_VERY_UNLIKELY);
1619 emit_label (set_noovf);
1620 write_complex_part (target, const0_rtx, true);
1621 emit_label (all_done_label);
1622 }
1623
1624 if (lhs)
1625 {
1626 if (is_ubsan)
1627 expand_ubsan_result_store (target, res);
1628 else
1629 expand_arith_overflow_result_store (lhs, target, mode, res);
1630 }
1631 }
1632
1633 /* Expand UBSAN_CHECK_ADD call STMT. */
1634
1635 static void
1636 expand_UBSAN_CHECK_ADD (gcall *stmt)
1637 {
1638 location_t loc = gimple_location (stmt);
1639 tree lhs = gimple_call_lhs (stmt);
1640 tree arg0 = gimple_call_arg (stmt, 0);
1641 tree arg1 = gimple_call_arg (stmt, 1);
1642 expand_addsub_overflow (loc, PLUS_EXPR, lhs, arg0, arg1,
1643 false, false, false, true);
1644 }
1645
1646 /* Expand UBSAN_CHECK_SUB call STMT. */
1647
1648 static void
1649 expand_UBSAN_CHECK_SUB (gcall *stmt)
1650 {
1651 location_t loc = gimple_location (stmt);
1652 tree lhs = gimple_call_lhs (stmt);
1653 tree arg0 = gimple_call_arg (stmt, 0);
1654 tree arg1 = gimple_call_arg (stmt, 1);
1655 if (integer_zerop (arg0))
1656 expand_neg_overflow (loc, lhs, arg1, true);
1657 else
1658 expand_addsub_overflow (loc, MINUS_EXPR, lhs, arg0, arg1,
1659 false, false, false, true);
1660 }
1661
1662 /* Expand UBSAN_CHECK_MUL call STMT. */
1663
1664 static void
1665 expand_UBSAN_CHECK_MUL (gcall *stmt)
1666 {
1667 location_t loc = gimple_location (stmt);
1668 tree lhs = gimple_call_lhs (stmt);
1669 tree arg0 = gimple_call_arg (stmt, 0);
1670 tree arg1 = gimple_call_arg (stmt, 1);
1671 expand_mul_overflow (loc, lhs, arg0, arg1, false, false, false, true);
1672 }
1673
1674 /* Helper function for {ADD,SUB,MUL}_OVERFLOW call stmt expansion. */
1675
1676 static void
1677 expand_arith_overflow (enum tree_code code, gimple *stmt)
1678 {
1679 tree lhs = gimple_call_lhs (stmt);
1680 if (lhs == NULL_TREE)
1681 return;
1682 tree arg0 = gimple_call_arg (stmt, 0);
1683 tree arg1 = gimple_call_arg (stmt, 1);
1684 tree type = TREE_TYPE (TREE_TYPE (lhs));
1685 int uns0_p = TYPE_UNSIGNED (TREE_TYPE (arg0));
1686 int uns1_p = TYPE_UNSIGNED (TREE_TYPE (arg1));
1687 int unsr_p = TYPE_UNSIGNED (type);
1688 int prec0 = TYPE_PRECISION (TREE_TYPE (arg0));
1689 int prec1 = TYPE_PRECISION (TREE_TYPE (arg1));
1690 int precres = TYPE_PRECISION (type);
1691 location_t loc = gimple_location (stmt);
1692 if (!uns0_p && get_range_pos_neg (arg0) == 1)
1693 uns0_p = true;
1694 if (!uns1_p && get_range_pos_neg (arg1) == 1)
1695 uns1_p = true;
1696 int pr = get_min_precision (arg0, uns0_p ? UNSIGNED : SIGNED);
1697 prec0 = MIN (prec0, pr);
1698 pr = get_min_precision (arg1, uns1_p ? UNSIGNED : SIGNED);
1699 prec1 = MIN (prec1, pr);
1700
1701 /* If uns0_p && uns1_p, precop is minimum needed precision
1702 of unsigned type to hold the exact result, otherwise
1703 precop is minimum needed precision of signed type to
1704 hold the exact result. */
1705 int precop;
1706 if (code == MULT_EXPR)
1707 precop = prec0 + prec1 + (uns0_p != uns1_p);
1708 else
1709 {
1710 if (uns0_p == uns1_p)
1711 precop = MAX (prec0, prec1) + 1;
1712 else if (uns0_p)
1713 precop = MAX (prec0 + 1, prec1) + 1;
1714 else
1715 precop = MAX (prec0, prec1 + 1) + 1;
1716 }
1717 int orig_precres = precres;
1718
1719 do
1720 {
1721 if ((uns0_p && uns1_p)
1722 ? ((precop + !unsr_p) <= precres
1723 /* u1 - u2 -> ur can overflow, no matter what precision
1724 the result has. */
1725 && (code != MINUS_EXPR || !unsr_p))
1726 : (!unsr_p && precop <= precres))
1727 {
1728 /* The infinity precision result will always fit into result. */
1729 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1730 write_complex_part (target, const0_rtx, true);
1731 enum machine_mode mode = TYPE_MODE (type);
1732 struct separate_ops ops;
1733 ops.code = code;
1734 ops.type = type;
1735 ops.op0 = fold_convert_loc (loc, type, arg0);
1736 ops.op1 = fold_convert_loc (loc, type, arg1);
1737 ops.op2 = NULL_TREE;
1738 ops.location = loc;
1739 rtx tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1740 expand_arith_overflow_result_store (lhs, target, mode, tem);
1741 return;
1742 }
1743
1744 /* For sub-word operations, if target doesn't have them, start
1745 with precres widening right away, otherwise do it only
1746 if the most simple cases can't be used. */
1747 if (WORD_REGISTER_OPERATIONS
1748 && orig_precres == precres
1749 && precres < BITS_PER_WORD)
1750 ;
1751 else if ((uns0_p && uns1_p && unsr_p && prec0 <= precres
1752 && prec1 <= precres)
1753 || ((!uns0_p || !uns1_p) && !unsr_p
1754 && prec0 + uns0_p <= precres
1755 && prec1 + uns1_p <= precres))
1756 {
1757 arg0 = fold_convert_loc (loc, type, arg0);
1758 arg1 = fold_convert_loc (loc, type, arg1);
1759 switch (code)
1760 {
1761 case MINUS_EXPR:
1762 if (integer_zerop (arg0) && !unsr_p)
1763 expand_neg_overflow (loc, lhs, arg1, false);
1764 /* FALLTHRU */
1765 case PLUS_EXPR:
1766 expand_addsub_overflow (loc, code, lhs, arg0, arg1,
1767 unsr_p, unsr_p, unsr_p, false);
1768 return;
1769 case MULT_EXPR:
1770 expand_mul_overflow (loc, lhs, arg0, arg1,
1771 unsr_p, unsr_p, unsr_p, false);
1772 return;
1773 default:
1774 gcc_unreachable ();
1775 }
1776 }
1777
1778 /* For sub-word operations, retry with a wider type first. */
1779 if (orig_precres == precres && precop <= BITS_PER_WORD)
1780 {
1781 #if WORD_REGISTER_OPERATIONS
1782 int p = BITS_PER_WORD;
1783 #else
1784 int p = precop;
1785 #endif
1786 enum machine_mode m = smallest_mode_for_size (p, MODE_INT);
1787 tree optype = build_nonstandard_integer_type (GET_MODE_PRECISION (m),
1788 uns0_p && uns1_p
1789 && unsr_p);
1790 p = TYPE_PRECISION (optype);
1791 if (p > precres)
1792 {
1793 precres = p;
1794 unsr_p = TYPE_UNSIGNED (optype);
1795 type = optype;
1796 continue;
1797 }
1798 }
1799
1800 if (prec0 <= precres && prec1 <= precres)
1801 {
1802 tree types[2];
1803 if (unsr_p)
1804 {
1805 types[0] = build_nonstandard_integer_type (precres, 0);
1806 types[1] = type;
1807 }
1808 else
1809 {
1810 types[0] = type;
1811 types[1] = build_nonstandard_integer_type (precres, 1);
1812 }
1813 arg0 = fold_convert_loc (loc, types[uns0_p], arg0);
1814 arg1 = fold_convert_loc (loc, types[uns1_p], arg1);
1815 if (code != MULT_EXPR)
1816 expand_addsub_overflow (loc, code, lhs, arg0, arg1, unsr_p,
1817 uns0_p, uns1_p, false);
1818 else
1819 expand_mul_overflow (loc, lhs, arg0, arg1, unsr_p,
1820 uns0_p, uns1_p, false);
1821 return;
1822 }
1823
1824 /* Retry with a wider type. */
1825 if (orig_precres == precres)
1826 {
1827 int p = MAX (prec0, prec1);
1828 enum machine_mode m = smallest_mode_for_size (p, MODE_INT);
1829 tree optype = build_nonstandard_integer_type (GET_MODE_PRECISION (m),
1830 uns0_p && uns1_p
1831 && unsr_p);
1832 p = TYPE_PRECISION (optype);
1833 if (p > precres)
1834 {
1835 precres = p;
1836 unsr_p = TYPE_UNSIGNED (optype);
1837 type = optype;
1838 continue;
1839 }
1840 }
1841
1842 gcc_unreachable ();
1843 }
1844 while (1);
1845 }
1846
1847 /* Expand ADD_OVERFLOW STMT. */
1848
1849 static void
1850 expand_ADD_OVERFLOW (gcall *stmt)
1851 {
1852 expand_arith_overflow (PLUS_EXPR, stmt);
1853 }
1854
1855 /* Expand SUB_OVERFLOW STMT. */
1856
1857 static void
1858 expand_SUB_OVERFLOW (gcall *stmt)
1859 {
1860 expand_arith_overflow (MINUS_EXPR, stmt);
1861 }
1862
1863 /* Expand MUL_OVERFLOW STMT. */
1864
1865 static void
1866 expand_MUL_OVERFLOW (gcall *stmt)
1867 {
1868 expand_arith_overflow (MULT_EXPR, stmt);
1869 }
1870
1871 /* This should get folded in tree-vectorizer.c. */
1872
1873 static void
1874 expand_LOOP_VECTORIZED (gcall *)
1875 {
1876 gcc_unreachable ();
1877 }
1878
1879 static void
1880 expand_MASK_LOAD (gcall *stmt)
1881 {
1882 struct expand_operand ops[3];
1883 tree type, lhs, rhs, maskt;
1884 rtx mem, target, mask;
1885
1886 maskt = gimple_call_arg (stmt, 2);
1887 lhs = gimple_call_lhs (stmt);
1888 if (lhs == NULL_TREE)
1889 return;
1890 type = TREE_TYPE (lhs);
1891 rhs = fold_build2 (MEM_REF, type, gimple_call_arg (stmt, 0),
1892 gimple_call_arg (stmt, 1));
1893
1894 mem = expand_expr (rhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1895 gcc_assert (MEM_P (mem));
1896 mask = expand_normal (maskt);
1897 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1898 create_output_operand (&ops[0], target, TYPE_MODE (type));
1899 create_fixed_operand (&ops[1], mem);
1900 create_input_operand (&ops[2], mask, TYPE_MODE (TREE_TYPE (maskt)));
1901 expand_insn (optab_handler (maskload_optab, TYPE_MODE (type)), 3, ops);
1902 }
1903
1904 static void
1905 expand_MASK_STORE (gcall *stmt)
1906 {
1907 struct expand_operand ops[3];
1908 tree type, lhs, rhs, maskt;
1909 rtx mem, reg, mask;
1910
1911 maskt = gimple_call_arg (stmt, 2);
1912 rhs = gimple_call_arg (stmt, 3);
1913 type = TREE_TYPE (rhs);
1914 lhs = fold_build2 (MEM_REF, type, gimple_call_arg (stmt, 0),
1915 gimple_call_arg (stmt, 1));
1916
1917 mem = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1918 gcc_assert (MEM_P (mem));
1919 mask = expand_normal (maskt);
1920 reg = expand_normal (rhs);
1921 create_fixed_operand (&ops[0], mem);
1922 create_input_operand (&ops[1], reg, TYPE_MODE (type));
1923 create_input_operand (&ops[2], mask, TYPE_MODE (TREE_TYPE (maskt)));
1924 expand_insn (optab_handler (maskstore_optab, TYPE_MODE (type)), 3, ops);
1925 }
1926
1927 static void
1928 expand_ABNORMAL_DISPATCHER (gcall *)
1929 {
1930 }
1931
1932 static void
1933 expand_BUILTIN_EXPECT (gcall *stmt)
1934 {
1935 /* When guessing was done, the hints should be already stripped away. */
1936 gcc_assert (!flag_guess_branch_prob || optimize == 0 || seen_error ());
1937
1938 rtx target;
1939 tree lhs = gimple_call_lhs (stmt);
1940 if (lhs)
1941 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1942 else
1943 target = const0_rtx;
1944 rtx val = expand_expr (gimple_call_arg (stmt, 0), target, VOIDmode, EXPAND_NORMAL);
1945 if (lhs && val != target)
1946 emit_move_insn (target, val);
1947 }
1948
1949 /* IFN_VA_ARG is supposed to be expanded at pass_stdarg. So this dummy function
1950 should never be called. */
1951
1952 static void
1953 expand_VA_ARG (gcall *stmt ATTRIBUTE_UNUSED)
1954 {
1955 gcc_unreachable ();
1956 }
1957
1958 /* Expand the IFN_UNIQUE function according to its first argument. */
1959
1960 static void
1961 expand_UNIQUE (gcall *stmt)
1962 {
1963 rtx pattern = NULL_RTX;
1964 enum ifn_unique_kind kind
1965 = (enum ifn_unique_kind) TREE_INT_CST_LOW (gimple_call_arg (stmt, 0));
1966
1967 switch (kind)
1968 {
1969 default:
1970 gcc_unreachable ();
1971
1972 case IFN_UNIQUE_UNSPEC:
1973 if (targetm.have_unique ())
1974 pattern = targetm.gen_unique ();
1975 break;
1976
1977 case IFN_UNIQUE_OACC_FORK:
1978 case IFN_UNIQUE_OACC_JOIN:
1979 if (targetm.have_oacc_fork () && targetm.have_oacc_join ())
1980 {
1981 tree lhs = gimple_call_lhs (stmt);
1982 rtx target = const0_rtx;
1983
1984 if (lhs)
1985 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1986
1987 rtx data_dep = expand_normal (gimple_call_arg (stmt, 1));
1988 rtx axis = expand_normal (gimple_call_arg (stmt, 2));
1989
1990 if (kind == IFN_UNIQUE_OACC_FORK)
1991 pattern = targetm.gen_oacc_fork (target, data_dep, axis);
1992 else
1993 pattern = targetm.gen_oacc_join (target, data_dep, axis);
1994 }
1995 else
1996 gcc_unreachable ();
1997 break;
1998 }
1999
2000 if (pattern)
2001 emit_insn (pattern);
2002 }
2003
2004 /* The size of an OpenACC compute dimension. */
2005
2006 static void
2007 expand_GOACC_DIM_SIZE (gcall *stmt)
2008 {
2009 tree lhs = gimple_call_lhs (stmt);
2010
2011 if (!lhs)
2012 return;
2013
2014 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2015 if (targetm.have_oacc_dim_size ())
2016 {
2017 rtx dim = expand_expr (gimple_call_arg (stmt, 0), NULL_RTX,
2018 VOIDmode, EXPAND_NORMAL);
2019 emit_insn (targetm.gen_oacc_dim_size (target, dim));
2020 }
2021 else
2022 emit_move_insn (target, GEN_INT (1));
2023 }
2024
2025 /* The position of an OpenACC execution engine along one compute axis. */
2026
2027 static void
2028 expand_GOACC_DIM_POS (gcall *stmt)
2029 {
2030 tree lhs = gimple_call_lhs (stmt);
2031
2032 if (!lhs)
2033 return;
2034
2035 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2036 if (targetm.have_oacc_dim_pos ())
2037 {
2038 rtx dim = expand_expr (gimple_call_arg (stmt, 0), NULL_RTX,
2039 VOIDmode, EXPAND_NORMAL);
2040 emit_insn (targetm.gen_oacc_dim_pos (target, dim));
2041 }
2042 else
2043 emit_move_insn (target, const0_rtx);
2044 }
2045
2046 /* This is expanded by oacc_device_lower pass. */
2047
2048 static void
2049 expand_GOACC_LOOP (gcall *stmt ATTRIBUTE_UNUSED)
2050 {
2051 gcc_unreachable ();
2052 }
2053
2054 /* Routines to expand each internal function, indexed by function number.
2055 Each routine has the prototype:
2056
2057 expand_<NAME> (gcall *stmt)
2058
2059 where STMT is the statement that performs the call. */
2060 static void (*const internal_fn_expanders[]) (gcall *) = {
2061 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) expand_##CODE,
2062 #include "internal-fn.def"
2063 #undef DEF_INTERNAL_FN
2064 0
2065 };
2066
2067 /* Expand STMT, which is a call to internal function FN. */
2068
2069 void
2070 expand_internal_call (gcall *stmt)
2071 {
2072 internal_fn_expanders[(int) gimple_call_internal_fn (stmt)] (stmt);
2073 }