ipa-cp.c (ipcp_cloning_candidate_p): Use opt_for_fn.
[gcc.git] / gcc / convert.c
1 /* Utility routines for data type conversion for GCC.
2 Copyright (C) 1987-2014 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20
21 /* These routines are somewhat language-independent utility function
22 intended to be called by the language-specific convert () functions. */
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "tree.h"
29 #include "stor-layout.h"
30 #include "flags.h"
31 #include "convert.h"
32 #include "diagnostic-core.h"
33 #include "target.h"
34 #include "langhooks.h"
35 #include "builtins.h"
36 #include "ubsan.h"
37
38 /* Convert EXPR to some pointer or reference type TYPE.
39 EXPR must be pointer, reference, integer, enumeral, or literal zero;
40 in other cases error is called. */
41
42 tree
43 convert_to_pointer (tree type, tree expr)
44 {
45 location_t loc = EXPR_LOCATION (expr);
46 if (TREE_TYPE (expr) == type)
47 return expr;
48
49 switch (TREE_CODE (TREE_TYPE (expr)))
50 {
51 case POINTER_TYPE:
52 case REFERENCE_TYPE:
53 {
54 /* If the pointers point to different address spaces, conversion needs
55 to be done via a ADDR_SPACE_CONVERT_EXPR instead of a NOP_EXPR. */
56 addr_space_t to_as = TYPE_ADDR_SPACE (TREE_TYPE (type));
57 addr_space_t from_as = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (expr)));
58
59 if (to_as == from_as)
60 return fold_build1_loc (loc, NOP_EXPR, type, expr);
61 else
62 return fold_build1_loc (loc, ADDR_SPACE_CONVERT_EXPR, type, expr);
63 }
64
65 case INTEGER_TYPE:
66 case ENUMERAL_TYPE:
67 case BOOLEAN_TYPE:
68 {
69 /* If the input precision differs from the target pointer type
70 precision, first convert the input expression to an integer type of
71 the target precision. Some targets, e.g. VMS, need several pointer
72 sizes to coexist so the latter isn't necessarily POINTER_SIZE. */
73 unsigned int pprec = TYPE_PRECISION (type);
74 unsigned int eprec = TYPE_PRECISION (TREE_TYPE (expr));
75
76 if (eprec != pprec)
77 expr = fold_build1_loc (loc, NOP_EXPR,
78 lang_hooks.types.type_for_size (pprec, 0),
79 expr);
80 }
81
82 return fold_build1_loc (loc, CONVERT_EXPR, type, expr);
83
84 default:
85 error ("cannot convert to a pointer type");
86 return convert_to_pointer (type, integer_zero_node);
87 }
88 }
89
90
91 /* Convert EXPR to some floating-point type TYPE.
92
93 EXPR must be float, fixed-point, integer, or enumeral;
94 in other cases error is called. */
95
96 tree
97 convert_to_real (tree type, tree expr)
98 {
99 enum built_in_function fcode = builtin_mathfn_code (expr);
100 tree itype = TREE_TYPE (expr);
101
102 /* Disable until we figure out how to decide whether the functions are
103 present in runtime. */
104 /* Convert (float)sqrt((double)x) where x is float into sqrtf(x) */
105 if (optimize
106 && (TYPE_MODE (type) == TYPE_MODE (double_type_node)
107 || TYPE_MODE (type) == TYPE_MODE (float_type_node)))
108 {
109 switch (fcode)
110 {
111 #define CASE_MATHFN(FN) case BUILT_IN_##FN: case BUILT_IN_##FN##L:
112 CASE_MATHFN (COSH)
113 CASE_MATHFN (EXP)
114 CASE_MATHFN (EXP10)
115 CASE_MATHFN (EXP2)
116 CASE_MATHFN (EXPM1)
117 CASE_MATHFN (GAMMA)
118 CASE_MATHFN (J0)
119 CASE_MATHFN (J1)
120 CASE_MATHFN (LGAMMA)
121 CASE_MATHFN (POW10)
122 CASE_MATHFN (SINH)
123 CASE_MATHFN (TGAMMA)
124 CASE_MATHFN (Y0)
125 CASE_MATHFN (Y1)
126 /* The above functions may set errno differently with float
127 input or output so this transformation is not safe with
128 -fmath-errno. */
129 if (flag_errno_math)
130 break;
131 CASE_MATHFN (ACOS)
132 CASE_MATHFN (ACOSH)
133 CASE_MATHFN (ASIN)
134 CASE_MATHFN (ASINH)
135 CASE_MATHFN (ATAN)
136 CASE_MATHFN (ATANH)
137 CASE_MATHFN (CBRT)
138 CASE_MATHFN (COS)
139 CASE_MATHFN (ERF)
140 CASE_MATHFN (ERFC)
141 CASE_MATHFN (LOG)
142 CASE_MATHFN (LOG10)
143 CASE_MATHFN (LOG2)
144 CASE_MATHFN (LOG1P)
145 CASE_MATHFN (SIN)
146 CASE_MATHFN (TAN)
147 CASE_MATHFN (TANH)
148 /* The above functions are not safe to do this conversion. */
149 if (!flag_unsafe_math_optimizations)
150 break;
151 CASE_MATHFN (SQRT)
152 CASE_MATHFN (FABS)
153 CASE_MATHFN (LOGB)
154 #undef CASE_MATHFN
155 {
156 tree arg0 = strip_float_extensions (CALL_EXPR_ARG (expr, 0));
157 tree newtype = type;
158
159 /* We have (outertype)sqrt((innertype)x). Choose the wider mode from
160 the both as the safe type for operation. */
161 if (TYPE_PRECISION (TREE_TYPE (arg0)) > TYPE_PRECISION (type))
162 newtype = TREE_TYPE (arg0);
163
164 /* We consider to convert
165
166 (T1) sqrtT2 ((T2) exprT3)
167 to
168 (T1) sqrtT4 ((T4) exprT3)
169
170 , where T1 is TYPE, T2 is ITYPE, T3 is TREE_TYPE (ARG0),
171 and T4 is NEWTYPE. All those types are of floating point types.
172 T4 (NEWTYPE) should be narrower than T2 (ITYPE). This conversion
173 is safe only if P1 >= P2*2+2, where P1 and P2 are precisions of
174 T2 and T4. See the following URL for a reference:
175 http://stackoverflow.com/questions/9235456/determining-
176 floating-point-square-root
177 */
178 if ((fcode == BUILT_IN_SQRT || fcode == BUILT_IN_SQRTL)
179 && !flag_unsafe_math_optimizations)
180 {
181 /* The following conversion is unsafe even the precision condition
182 below is satisfied:
183
184 (float) sqrtl ((long double) double_val) -> (float) sqrt (double_val)
185 */
186 if (TYPE_MODE (type) != TYPE_MODE (newtype))
187 break;
188
189 int p1 = REAL_MODE_FORMAT (TYPE_MODE (itype))->p;
190 int p2 = REAL_MODE_FORMAT (TYPE_MODE (newtype))->p;
191 if (p1 < p2 * 2 + 2)
192 break;
193 }
194
195 /* Be careful about integer to fp conversions.
196 These may overflow still. */
197 if (FLOAT_TYPE_P (TREE_TYPE (arg0))
198 && TYPE_PRECISION (newtype) < TYPE_PRECISION (itype)
199 && (TYPE_MODE (newtype) == TYPE_MODE (double_type_node)
200 || TYPE_MODE (newtype) == TYPE_MODE (float_type_node)))
201 {
202 tree fn = mathfn_built_in (newtype, fcode);
203
204 if (fn)
205 {
206 tree arg = fold (convert_to_real (newtype, arg0));
207 expr = build_call_expr (fn, 1, arg);
208 if (newtype == type)
209 return expr;
210 }
211 }
212 }
213 default:
214 break;
215 }
216 }
217 if (optimize
218 && (((fcode == BUILT_IN_FLOORL
219 || fcode == BUILT_IN_CEILL
220 || fcode == BUILT_IN_ROUNDL
221 || fcode == BUILT_IN_RINTL
222 || fcode == BUILT_IN_TRUNCL
223 || fcode == BUILT_IN_NEARBYINTL)
224 && (TYPE_MODE (type) == TYPE_MODE (double_type_node)
225 || TYPE_MODE (type) == TYPE_MODE (float_type_node)))
226 || ((fcode == BUILT_IN_FLOOR
227 || fcode == BUILT_IN_CEIL
228 || fcode == BUILT_IN_ROUND
229 || fcode == BUILT_IN_RINT
230 || fcode == BUILT_IN_TRUNC
231 || fcode == BUILT_IN_NEARBYINT)
232 && (TYPE_MODE (type) == TYPE_MODE (float_type_node)))))
233 {
234 tree fn = mathfn_built_in (type, fcode);
235
236 if (fn)
237 {
238 tree arg = strip_float_extensions (CALL_EXPR_ARG (expr, 0));
239
240 /* Make sure (type)arg0 is an extension, otherwise we could end up
241 changing (float)floor(double d) into floorf((float)d), which is
242 incorrect because (float)d uses round-to-nearest and can round
243 up to the next integer. */
244 if (TYPE_PRECISION (type) >= TYPE_PRECISION (TREE_TYPE (arg)))
245 return build_call_expr (fn, 1, fold (convert_to_real (type, arg)));
246 }
247 }
248
249 /* Propagate the cast into the operation. */
250 if (itype != type && FLOAT_TYPE_P (type))
251 switch (TREE_CODE (expr))
252 {
253 /* Convert (float)-x into -(float)x. This is safe for
254 round-to-nearest rounding mode when the inner type is float. */
255 case ABS_EXPR:
256 case NEGATE_EXPR:
257 if (!flag_rounding_math
258 && FLOAT_TYPE_P (itype)
259 && TYPE_PRECISION (type) < TYPE_PRECISION (itype))
260 return build1 (TREE_CODE (expr), type,
261 fold (convert_to_real (type,
262 TREE_OPERAND (expr, 0))));
263 break;
264 /* Convert (outertype)((innertype0)a+(innertype1)b)
265 into ((newtype)a+(newtype)b) where newtype
266 is the widest mode from all of these. */
267 case PLUS_EXPR:
268 case MINUS_EXPR:
269 case MULT_EXPR:
270 case RDIV_EXPR:
271 {
272 tree arg0 = strip_float_extensions (TREE_OPERAND (expr, 0));
273 tree arg1 = strip_float_extensions (TREE_OPERAND (expr, 1));
274
275 if (FLOAT_TYPE_P (TREE_TYPE (arg0))
276 && FLOAT_TYPE_P (TREE_TYPE (arg1))
277 && DECIMAL_FLOAT_TYPE_P (itype) == DECIMAL_FLOAT_TYPE_P (type))
278 {
279 tree newtype = type;
280
281 if (TYPE_MODE (TREE_TYPE (arg0)) == SDmode
282 || TYPE_MODE (TREE_TYPE (arg1)) == SDmode
283 || TYPE_MODE (type) == SDmode)
284 newtype = dfloat32_type_node;
285 if (TYPE_MODE (TREE_TYPE (arg0)) == DDmode
286 || TYPE_MODE (TREE_TYPE (arg1)) == DDmode
287 || TYPE_MODE (type) == DDmode)
288 newtype = dfloat64_type_node;
289 if (TYPE_MODE (TREE_TYPE (arg0)) == TDmode
290 || TYPE_MODE (TREE_TYPE (arg1)) == TDmode
291 || TYPE_MODE (type) == TDmode)
292 newtype = dfloat128_type_node;
293 if (newtype == dfloat32_type_node
294 || newtype == dfloat64_type_node
295 || newtype == dfloat128_type_node)
296 {
297 expr = build2 (TREE_CODE (expr), newtype,
298 fold (convert_to_real (newtype, arg0)),
299 fold (convert_to_real (newtype, arg1)));
300 if (newtype == type)
301 return expr;
302 break;
303 }
304
305 if (TYPE_PRECISION (TREE_TYPE (arg0)) > TYPE_PRECISION (newtype))
306 newtype = TREE_TYPE (arg0);
307 if (TYPE_PRECISION (TREE_TYPE (arg1)) > TYPE_PRECISION (newtype))
308 newtype = TREE_TYPE (arg1);
309 /* Sometimes this transformation is safe (cannot
310 change results through affecting double rounding
311 cases) and sometimes it is not. If NEWTYPE is
312 wider than TYPE, e.g. (float)((long double)double
313 + (long double)double) converted to
314 (float)(double + double), the transformation is
315 unsafe regardless of the details of the types
316 involved; double rounding can arise if the result
317 of NEWTYPE arithmetic is a NEWTYPE value half way
318 between two representable TYPE values but the
319 exact value is sufficiently different (in the
320 right direction) for this difference to be
321 visible in ITYPE arithmetic. If NEWTYPE is the
322 same as TYPE, however, the transformation may be
323 safe depending on the types involved: it is safe
324 if the ITYPE has strictly more than twice as many
325 mantissa bits as TYPE, can represent infinities
326 and NaNs if the TYPE can, and has sufficient
327 exponent range for the product or ratio of two
328 values representable in the TYPE to be within the
329 range of normal values of ITYPE. */
330 if (TYPE_PRECISION (newtype) < TYPE_PRECISION (itype)
331 && (flag_unsafe_math_optimizations
332 || (TYPE_PRECISION (newtype) == TYPE_PRECISION (type)
333 && real_can_shorten_arithmetic (TYPE_MODE (itype),
334 TYPE_MODE (type))
335 && !excess_precision_type (newtype))))
336 {
337 expr = build2 (TREE_CODE (expr), newtype,
338 fold (convert_to_real (newtype, arg0)),
339 fold (convert_to_real (newtype, arg1)));
340 if (newtype == type)
341 return expr;
342 }
343 }
344 }
345 break;
346 default:
347 break;
348 }
349
350 switch (TREE_CODE (TREE_TYPE (expr)))
351 {
352 case REAL_TYPE:
353 /* Ignore the conversion if we don't need to store intermediate
354 results and neither type is a decimal float. */
355 return build1 ((flag_float_store
356 || DECIMAL_FLOAT_TYPE_P (type)
357 || DECIMAL_FLOAT_TYPE_P (itype))
358 ? CONVERT_EXPR : NOP_EXPR, type, expr);
359
360 case INTEGER_TYPE:
361 case ENUMERAL_TYPE:
362 case BOOLEAN_TYPE:
363 return build1 (FLOAT_EXPR, type, expr);
364
365 case FIXED_POINT_TYPE:
366 return build1 (FIXED_CONVERT_EXPR, type, expr);
367
368 case COMPLEX_TYPE:
369 return convert (type,
370 fold_build1 (REALPART_EXPR,
371 TREE_TYPE (TREE_TYPE (expr)), expr));
372
373 case POINTER_TYPE:
374 case REFERENCE_TYPE:
375 error ("pointer value used where a floating point value was expected");
376 return convert_to_real (type, integer_zero_node);
377
378 default:
379 error ("aggregate value used where a float was expected");
380 return convert_to_real (type, integer_zero_node);
381 }
382 }
383
384 /* Convert EXPR to some integer (or enum) type TYPE.
385
386 EXPR must be pointer, integer, discrete (enum, char, or bool), float,
387 fixed-point or vector; in other cases error is called.
388
389 The result of this is always supposed to be a newly created tree node
390 not in use in any existing structure. */
391
392 tree
393 convert_to_integer (tree type, tree expr)
394 {
395 enum tree_code ex_form = TREE_CODE (expr);
396 tree intype = TREE_TYPE (expr);
397 unsigned int inprec = element_precision (intype);
398 unsigned int outprec = element_precision (type);
399 location_t loc = EXPR_LOCATION (expr);
400
401 /* An INTEGER_TYPE cannot be incomplete, but an ENUMERAL_TYPE can
402 be. Consider `enum E = { a, b = (enum E) 3 };'. */
403 if (!COMPLETE_TYPE_P (type))
404 {
405 error ("conversion to incomplete type");
406 return error_mark_node;
407 }
408
409 /* Convert e.g. (long)round(d) -> lround(d). */
410 /* If we're converting to char, we may encounter differing behavior
411 between converting from double->char vs double->long->char.
412 We're in "undefined" territory but we prefer to be conservative,
413 so only proceed in "unsafe" math mode. */
414 if (optimize
415 && (flag_unsafe_math_optimizations
416 || (long_integer_type_node
417 && outprec >= TYPE_PRECISION (long_integer_type_node))))
418 {
419 tree s_expr = strip_float_extensions (expr);
420 tree s_intype = TREE_TYPE (s_expr);
421 const enum built_in_function fcode = builtin_mathfn_code (s_expr);
422 tree fn = 0;
423
424 switch (fcode)
425 {
426 CASE_FLT_FN (BUILT_IN_CEIL):
427 /* Only convert in ISO C99 mode. */
428 if (!targetm.libc_has_function (function_c99_misc))
429 break;
430 if (outprec < TYPE_PRECISION (integer_type_node)
431 || (outprec == TYPE_PRECISION (integer_type_node)
432 && !TYPE_UNSIGNED (type)))
433 fn = mathfn_built_in (s_intype, BUILT_IN_ICEIL);
434 else if (outprec == TYPE_PRECISION (long_integer_type_node)
435 && !TYPE_UNSIGNED (type))
436 fn = mathfn_built_in (s_intype, BUILT_IN_LCEIL);
437 else if (outprec == TYPE_PRECISION (long_long_integer_type_node)
438 && !TYPE_UNSIGNED (type))
439 fn = mathfn_built_in (s_intype, BUILT_IN_LLCEIL);
440 break;
441
442 CASE_FLT_FN (BUILT_IN_FLOOR):
443 /* Only convert in ISO C99 mode. */
444 if (!targetm.libc_has_function (function_c99_misc))
445 break;
446 if (outprec < TYPE_PRECISION (integer_type_node)
447 || (outprec == TYPE_PRECISION (integer_type_node)
448 && !TYPE_UNSIGNED (type)))
449 fn = mathfn_built_in (s_intype, BUILT_IN_IFLOOR);
450 else if (outprec == TYPE_PRECISION (long_integer_type_node)
451 && !TYPE_UNSIGNED (type))
452 fn = mathfn_built_in (s_intype, BUILT_IN_LFLOOR);
453 else if (outprec == TYPE_PRECISION (long_long_integer_type_node)
454 && !TYPE_UNSIGNED (type))
455 fn = mathfn_built_in (s_intype, BUILT_IN_LLFLOOR);
456 break;
457
458 CASE_FLT_FN (BUILT_IN_ROUND):
459 /* Only convert in ISO C99 mode and with -fno-math-errno. */
460 if (!targetm.libc_has_function (function_c99_misc) || flag_errno_math)
461 break;
462 if (outprec < TYPE_PRECISION (integer_type_node)
463 || (outprec == TYPE_PRECISION (integer_type_node)
464 && !TYPE_UNSIGNED (type)))
465 fn = mathfn_built_in (s_intype, BUILT_IN_IROUND);
466 else if (outprec == TYPE_PRECISION (long_integer_type_node)
467 && !TYPE_UNSIGNED (type))
468 fn = mathfn_built_in (s_intype, BUILT_IN_LROUND);
469 else if (outprec == TYPE_PRECISION (long_long_integer_type_node)
470 && !TYPE_UNSIGNED (type))
471 fn = mathfn_built_in (s_intype, BUILT_IN_LLROUND);
472 break;
473
474 CASE_FLT_FN (BUILT_IN_NEARBYINT):
475 /* Only convert nearbyint* if we can ignore math exceptions. */
476 if (flag_trapping_math)
477 break;
478 /* ... Fall through ... */
479 CASE_FLT_FN (BUILT_IN_RINT):
480 /* Only convert in ISO C99 mode and with -fno-math-errno. */
481 if (!targetm.libc_has_function (function_c99_misc) || flag_errno_math)
482 break;
483 if (outprec < TYPE_PRECISION (integer_type_node)
484 || (outprec == TYPE_PRECISION (integer_type_node)
485 && !TYPE_UNSIGNED (type)))
486 fn = mathfn_built_in (s_intype, BUILT_IN_IRINT);
487 else if (outprec == TYPE_PRECISION (long_integer_type_node)
488 && !TYPE_UNSIGNED (type))
489 fn = mathfn_built_in (s_intype, BUILT_IN_LRINT);
490 else if (outprec == TYPE_PRECISION (long_long_integer_type_node)
491 && !TYPE_UNSIGNED (type))
492 fn = mathfn_built_in (s_intype, BUILT_IN_LLRINT);
493 break;
494
495 CASE_FLT_FN (BUILT_IN_TRUNC):
496 return convert_to_integer (type, CALL_EXPR_ARG (s_expr, 0));
497
498 default:
499 break;
500 }
501
502 if (fn)
503 {
504 tree newexpr = build_call_expr (fn, 1, CALL_EXPR_ARG (s_expr, 0));
505 return convert_to_integer (type, newexpr);
506 }
507 }
508
509 /* Convert (int)logb(d) -> ilogb(d). */
510 if (optimize
511 && flag_unsafe_math_optimizations
512 && !flag_trapping_math && !flag_errno_math && flag_finite_math_only
513 && integer_type_node
514 && (outprec > TYPE_PRECISION (integer_type_node)
515 || (outprec == TYPE_PRECISION (integer_type_node)
516 && !TYPE_UNSIGNED (type))))
517 {
518 tree s_expr = strip_float_extensions (expr);
519 tree s_intype = TREE_TYPE (s_expr);
520 const enum built_in_function fcode = builtin_mathfn_code (s_expr);
521 tree fn = 0;
522
523 switch (fcode)
524 {
525 CASE_FLT_FN (BUILT_IN_LOGB):
526 fn = mathfn_built_in (s_intype, BUILT_IN_ILOGB);
527 break;
528
529 default:
530 break;
531 }
532
533 if (fn)
534 {
535 tree newexpr = build_call_expr (fn, 1, CALL_EXPR_ARG (s_expr, 0));
536 return convert_to_integer (type, newexpr);
537 }
538 }
539
540 switch (TREE_CODE (intype))
541 {
542 case POINTER_TYPE:
543 case REFERENCE_TYPE:
544 if (integer_zerop (expr))
545 return build_int_cst (type, 0);
546
547 /* Convert to an unsigned integer of the correct width first, and from
548 there widen/truncate to the required type. Some targets support the
549 coexistence of multiple valid pointer sizes, so fetch the one we need
550 from the type. */
551 expr = fold_build1 (CONVERT_EXPR,
552 lang_hooks.types.type_for_size
553 (TYPE_PRECISION (intype), 0),
554 expr);
555 return fold_convert (type, expr);
556
557 case INTEGER_TYPE:
558 case ENUMERAL_TYPE:
559 case BOOLEAN_TYPE:
560 case OFFSET_TYPE:
561 /* If this is a logical operation, which just returns 0 or 1, we can
562 change the type of the expression. */
563
564 if (TREE_CODE_CLASS (ex_form) == tcc_comparison)
565 {
566 expr = copy_node (expr);
567 TREE_TYPE (expr) = type;
568 return expr;
569 }
570
571 /* If we are widening the type, put in an explicit conversion.
572 Similarly if we are not changing the width. After this, we know
573 we are truncating EXPR. */
574
575 else if (outprec >= inprec)
576 {
577 enum tree_code code;
578
579 /* If the precision of the EXPR's type is K bits and the
580 destination mode has more bits, and the sign is changing,
581 it is not safe to use a NOP_EXPR. For example, suppose
582 that EXPR's type is a 3-bit unsigned integer type, the
583 TYPE is a 3-bit signed integer type, and the machine mode
584 for the types is 8-bit QImode. In that case, the
585 conversion necessitates an explicit sign-extension. In
586 the signed-to-unsigned case the high-order bits have to
587 be cleared. */
588 if (TYPE_UNSIGNED (type) != TYPE_UNSIGNED (TREE_TYPE (expr))
589 && (TYPE_PRECISION (TREE_TYPE (expr))
590 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (expr)))))
591 code = CONVERT_EXPR;
592 else
593 code = NOP_EXPR;
594
595 return fold_build1 (code, type, expr);
596 }
597
598 /* If TYPE is an enumeral type or a type with a precision less
599 than the number of bits in its mode, do the conversion to the
600 type corresponding to its mode, then do a nop conversion
601 to TYPE. */
602 else if (TREE_CODE (type) == ENUMERAL_TYPE
603 || outprec != GET_MODE_PRECISION (TYPE_MODE (type)))
604 return build1 (NOP_EXPR, type,
605 convert (lang_hooks.types.type_for_mode
606 (TYPE_MODE (type), TYPE_UNSIGNED (type)),
607 expr));
608
609 /* Here detect when we can distribute the truncation down past some
610 arithmetic. For example, if adding two longs and converting to an
611 int, we can equally well convert both to ints and then add.
612 For the operations handled here, such truncation distribution
613 is always safe.
614 It is desirable in these cases:
615 1) when truncating down to full-word from a larger size
616 2) when truncating takes no work.
617 3) when at least one operand of the arithmetic has been extended
618 (as by C's default conversions). In this case we need two conversions
619 if we do the arithmetic as already requested, so we might as well
620 truncate both and then combine. Perhaps that way we need only one.
621
622 Note that in general we cannot do the arithmetic in a type
623 shorter than the desired result of conversion, even if the operands
624 are both extended from a shorter type, because they might overflow
625 if combined in that type. The exceptions to this--the times when
626 two narrow values can be combined in their narrow type even to
627 make a wider result--are handled by "shorten" in build_binary_op. */
628
629 switch (ex_form)
630 {
631 case RSHIFT_EXPR:
632 /* We can pass truncation down through right shifting
633 when the shift count is a nonpositive constant. */
634 if (TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST
635 && tree_int_cst_sgn (TREE_OPERAND (expr, 1)) <= 0)
636 goto trunc1;
637 break;
638
639 case LSHIFT_EXPR:
640 /* We can pass truncation down through left shifting
641 when the shift count is a nonnegative constant and
642 the target type is unsigned. */
643 if (TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST
644 && tree_int_cst_sgn (TREE_OPERAND (expr, 1)) >= 0
645 && TYPE_UNSIGNED (type)
646 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
647 {
648 /* If shift count is less than the width of the truncated type,
649 really shift. */
650 if (tree_int_cst_lt (TREE_OPERAND (expr, 1), TYPE_SIZE (type)))
651 /* In this case, shifting is like multiplication. */
652 goto trunc1;
653 else
654 {
655 /* If it is >= that width, result is zero.
656 Handling this with trunc1 would give the wrong result:
657 (int) ((long long) a << 32) is well defined (as 0)
658 but (int) a << 32 is undefined and would get a
659 warning. */
660
661 tree t = build_int_cst (type, 0);
662
663 /* If the original expression had side-effects, we must
664 preserve it. */
665 if (TREE_SIDE_EFFECTS (expr))
666 return build2 (COMPOUND_EXPR, type, expr, t);
667 else
668 return t;
669 }
670 }
671 break;
672
673 case TRUNC_DIV_EXPR:
674 {
675 tree arg0 = get_unwidened (TREE_OPERAND (expr, 0), type);
676 tree arg1 = get_unwidened (TREE_OPERAND (expr, 1), type);
677
678 /* Don't distribute unless the output precision is at least as big
679 as the actual inputs and it has the same signedness. */
680 if (outprec >= TYPE_PRECISION (TREE_TYPE (arg0))
681 && outprec >= TYPE_PRECISION (TREE_TYPE (arg1))
682 /* If signedness of arg0 and arg1 don't match,
683 we can't necessarily find a type to compare them in. */
684 && (TYPE_UNSIGNED (TREE_TYPE (arg0))
685 == TYPE_UNSIGNED (TREE_TYPE (arg1)))
686 /* Do not change the sign of the division. */
687 && (TYPE_UNSIGNED (TREE_TYPE (expr))
688 == TYPE_UNSIGNED (TREE_TYPE (arg0)))
689 /* Either require unsigned division or a division by
690 a constant that is not -1. */
691 && (TYPE_UNSIGNED (TREE_TYPE (arg0))
692 || (TREE_CODE (arg1) == INTEGER_CST
693 && !integer_all_onesp (arg1))))
694 goto trunc1;
695 break;
696 }
697
698 case MAX_EXPR:
699 case MIN_EXPR:
700 case MULT_EXPR:
701 {
702 tree arg0 = get_unwidened (TREE_OPERAND (expr, 0), type);
703 tree arg1 = get_unwidened (TREE_OPERAND (expr, 1), type);
704
705 /* Don't distribute unless the output precision is at least as big
706 as the actual inputs. Otherwise, the comparison of the
707 truncated values will be wrong. */
708 if (outprec >= TYPE_PRECISION (TREE_TYPE (arg0))
709 && outprec >= TYPE_PRECISION (TREE_TYPE (arg1))
710 /* If signedness of arg0 and arg1 don't match,
711 we can't necessarily find a type to compare them in. */
712 && (TYPE_UNSIGNED (TREE_TYPE (arg0))
713 == TYPE_UNSIGNED (TREE_TYPE (arg1))))
714 goto trunc1;
715 break;
716 }
717
718 case PLUS_EXPR:
719 case MINUS_EXPR:
720 case BIT_AND_EXPR:
721 case BIT_IOR_EXPR:
722 case BIT_XOR_EXPR:
723 trunc1:
724 {
725 tree arg0 = get_unwidened (TREE_OPERAND (expr, 0), type);
726 tree arg1 = get_unwidened (TREE_OPERAND (expr, 1), type);
727
728 /* Do not try to narrow operands of pointer subtraction;
729 that will interfere with other folding. */
730 if (ex_form == MINUS_EXPR
731 && CONVERT_EXPR_P (arg0)
732 && CONVERT_EXPR_P (arg1)
733 && POINTER_TYPE_P (TREE_TYPE (TREE_OPERAND (arg0, 0)))
734 && POINTER_TYPE_P (TREE_TYPE (TREE_OPERAND (arg1, 0))))
735 break;
736
737 if (outprec >= BITS_PER_WORD
738 || TRULY_NOOP_TRUNCATION (outprec, inprec)
739 || inprec > TYPE_PRECISION (TREE_TYPE (arg0))
740 || inprec > TYPE_PRECISION (TREE_TYPE (arg1)))
741 {
742 /* Do the arithmetic in type TYPEX,
743 then convert result to TYPE. */
744 tree typex = type;
745
746 /* Can't do arithmetic in enumeral types
747 so use an integer type that will hold the values. */
748 if (TREE_CODE (typex) == ENUMERAL_TYPE)
749 typex
750 = lang_hooks.types.type_for_size (TYPE_PRECISION (typex),
751 TYPE_UNSIGNED (typex));
752
753 /* But now perhaps TYPEX is as wide as INPREC.
754 In that case, do nothing special here.
755 (Otherwise would recurse infinitely in convert. */
756 if (TYPE_PRECISION (typex) != inprec)
757 {
758 /* Don't do unsigned arithmetic where signed was wanted,
759 or vice versa.
760 Exception: if both of the original operands were
761 unsigned then we can safely do the work as unsigned.
762 Exception: shift operations take their type solely
763 from the first argument.
764 Exception: the LSHIFT_EXPR case above requires that
765 we perform this operation unsigned lest we produce
766 signed-overflow undefinedness.
767 And we may need to do it as unsigned
768 if we truncate to the original size. */
769 if (TYPE_UNSIGNED (TREE_TYPE (expr))
770 || (TYPE_UNSIGNED (TREE_TYPE (arg0))
771 && (TYPE_UNSIGNED (TREE_TYPE (arg1))
772 || ex_form == LSHIFT_EXPR
773 || ex_form == RSHIFT_EXPR
774 || ex_form == LROTATE_EXPR
775 || ex_form == RROTATE_EXPR))
776 || ex_form == LSHIFT_EXPR
777 /* If we have !flag_wrapv, and either ARG0 or
778 ARG1 is of a signed type, we have to do
779 PLUS_EXPR, MINUS_EXPR or MULT_EXPR in an unsigned
780 type in case the operation in outprec precision
781 could overflow. Otherwise, we would introduce
782 signed-overflow undefinedness. */
783 || ((!TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg0))
784 || !TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg1)))
785 && ((TYPE_PRECISION (TREE_TYPE (arg0)) * 2u
786 > outprec)
787 || (TYPE_PRECISION (TREE_TYPE (arg1)) * 2u
788 > outprec))
789 && (ex_form == PLUS_EXPR
790 || ex_form == MINUS_EXPR
791 || ex_form == MULT_EXPR)))
792 {
793 if (!TYPE_UNSIGNED (typex))
794 typex = unsigned_type_for (typex);
795 }
796 else
797 {
798 if (TYPE_UNSIGNED (typex))
799 typex = signed_type_for (typex);
800 }
801 return convert (type,
802 fold_build2 (ex_form, typex,
803 convert (typex, arg0),
804 convert (typex, arg1)));
805 }
806 }
807 }
808 break;
809
810 case NEGATE_EXPR:
811 case BIT_NOT_EXPR:
812 /* This is not correct for ABS_EXPR,
813 since we must test the sign before truncation. */
814 {
815 /* Do the arithmetic in type TYPEX,
816 then convert result to TYPE. */
817 tree typex = type;
818
819 /* Can't do arithmetic in enumeral types
820 so use an integer type that will hold the values. */
821 if (TREE_CODE (typex) == ENUMERAL_TYPE)
822 typex
823 = lang_hooks.types.type_for_size (TYPE_PRECISION (typex),
824 TYPE_UNSIGNED (typex));
825
826 if (!TYPE_UNSIGNED (typex))
827 typex = unsigned_type_for (typex);
828 return convert (type,
829 fold_build1 (ex_form, typex,
830 convert (typex,
831 TREE_OPERAND (expr, 0))));
832 }
833
834 CASE_CONVERT:
835 /* Don't introduce a
836 "can't convert between vector values of different size" error. */
837 if (TREE_CODE (TREE_TYPE (TREE_OPERAND (expr, 0))) == VECTOR_TYPE
838 && (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (TREE_OPERAND (expr, 0))))
839 != GET_MODE_SIZE (TYPE_MODE (type))))
840 break;
841 /* If truncating after truncating, might as well do all at once.
842 If truncating after extending, we may get rid of wasted work. */
843 return convert (type, get_unwidened (TREE_OPERAND (expr, 0), type));
844
845 case COND_EXPR:
846 /* It is sometimes worthwhile to push the narrowing down through
847 the conditional and never loses. A COND_EXPR may have a throw
848 as one operand, which then has void type. Just leave void
849 operands as they are. */
850 return fold_build3 (COND_EXPR, type, TREE_OPERAND (expr, 0),
851 VOID_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 1)))
852 ? TREE_OPERAND (expr, 1)
853 : convert (type, TREE_OPERAND (expr, 1)),
854 VOID_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 2)))
855 ? TREE_OPERAND (expr, 2)
856 : convert (type, TREE_OPERAND (expr, 2)));
857
858 default:
859 break;
860 }
861
862 /* When parsing long initializers, we might end up with a lot of casts.
863 Shortcut this. */
864 if (TREE_CODE (expr) == INTEGER_CST)
865 return fold_convert (type, expr);
866 return build1 (CONVERT_EXPR, type, expr);
867
868 case REAL_TYPE:
869 if (flag_sanitize & SANITIZE_FLOAT_CAST
870 && current_function_decl != NULL_TREE
871 && !lookup_attribute ("no_sanitize_undefined",
872 DECL_ATTRIBUTES (current_function_decl)))
873 {
874 expr = save_expr (expr);
875 tree check = ubsan_instrument_float_cast (loc, type, expr);
876 expr = build1 (FIX_TRUNC_EXPR, type, expr);
877 if (check == NULL)
878 return expr;
879 return fold_build2 (COMPOUND_EXPR, TREE_TYPE (expr), check, expr);
880 }
881 else
882 return build1 (FIX_TRUNC_EXPR, type, expr);
883
884 case FIXED_POINT_TYPE:
885 return build1 (FIXED_CONVERT_EXPR, type, expr);
886
887 case COMPLEX_TYPE:
888 return convert (type,
889 fold_build1 (REALPART_EXPR,
890 TREE_TYPE (TREE_TYPE (expr)), expr));
891
892 case VECTOR_TYPE:
893 if (!tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (TREE_TYPE (expr))))
894 {
895 error ("can%'t convert between vector values of different size");
896 return error_mark_node;
897 }
898 return build1 (VIEW_CONVERT_EXPR, type, expr);
899
900 default:
901 error ("aggregate value used where an integer was expected");
902 return convert (type, integer_zero_node);
903 }
904 }
905
906 /* Convert EXPR to the complex type TYPE in the usual ways. */
907
908 tree
909 convert_to_complex (tree type, tree expr)
910 {
911 tree subtype = TREE_TYPE (type);
912
913 switch (TREE_CODE (TREE_TYPE (expr)))
914 {
915 case REAL_TYPE:
916 case FIXED_POINT_TYPE:
917 case INTEGER_TYPE:
918 case ENUMERAL_TYPE:
919 case BOOLEAN_TYPE:
920 return build2 (COMPLEX_EXPR, type, convert (subtype, expr),
921 convert (subtype, integer_zero_node));
922
923 case COMPLEX_TYPE:
924 {
925 tree elt_type = TREE_TYPE (TREE_TYPE (expr));
926
927 if (TYPE_MAIN_VARIANT (elt_type) == TYPE_MAIN_VARIANT (subtype))
928 return expr;
929 else if (TREE_CODE (expr) == COMPLEX_EXPR)
930 return fold_build2 (COMPLEX_EXPR, type,
931 convert (subtype, TREE_OPERAND (expr, 0)),
932 convert (subtype, TREE_OPERAND (expr, 1)));
933 else
934 {
935 expr = save_expr (expr);
936 return
937 fold_build2 (COMPLEX_EXPR, type,
938 convert (subtype,
939 fold_build1 (REALPART_EXPR,
940 TREE_TYPE (TREE_TYPE (expr)),
941 expr)),
942 convert (subtype,
943 fold_build1 (IMAGPART_EXPR,
944 TREE_TYPE (TREE_TYPE (expr)),
945 expr)));
946 }
947 }
948
949 case POINTER_TYPE:
950 case REFERENCE_TYPE:
951 error ("pointer value used where a complex was expected");
952 return convert_to_complex (type, integer_zero_node);
953
954 default:
955 error ("aggregate value used where a complex was expected");
956 return convert_to_complex (type, integer_zero_node);
957 }
958 }
959
960 /* Convert EXPR to the vector type TYPE in the usual ways. */
961
962 tree
963 convert_to_vector (tree type, tree expr)
964 {
965 switch (TREE_CODE (TREE_TYPE (expr)))
966 {
967 case INTEGER_TYPE:
968 case VECTOR_TYPE:
969 if (!tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (TREE_TYPE (expr))))
970 {
971 error ("can%'t convert between vector values of different size");
972 return error_mark_node;
973 }
974 return build1 (VIEW_CONVERT_EXPR, type, expr);
975
976 default:
977 error ("can%'t convert value to a vector");
978 return error_mark_node;
979 }
980 }
981
982 /* Convert EXPR to some fixed-point type TYPE.
983
984 EXPR must be fixed-point, float, integer, or enumeral;
985 in other cases error is called. */
986
987 tree
988 convert_to_fixed (tree type, tree expr)
989 {
990 if (integer_zerop (expr))
991 {
992 tree fixed_zero_node = build_fixed (type, FCONST0 (TYPE_MODE (type)));
993 return fixed_zero_node;
994 }
995 else if (integer_onep (expr) && ALL_SCALAR_ACCUM_MODE_P (TYPE_MODE (type)))
996 {
997 tree fixed_one_node = build_fixed (type, FCONST1 (TYPE_MODE (type)));
998 return fixed_one_node;
999 }
1000
1001 switch (TREE_CODE (TREE_TYPE (expr)))
1002 {
1003 case FIXED_POINT_TYPE:
1004 case INTEGER_TYPE:
1005 case ENUMERAL_TYPE:
1006 case BOOLEAN_TYPE:
1007 case REAL_TYPE:
1008 return build1 (FIXED_CONVERT_EXPR, type, expr);
1009
1010 case COMPLEX_TYPE:
1011 return convert (type,
1012 fold_build1 (REALPART_EXPR,
1013 TREE_TYPE (TREE_TYPE (expr)), expr));
1014
1015 default:
1016 error ("aggregate value used where a fixed-point was expected");
1017 return error_mark_node;
1018 }
1019 }