gengtype.c (main): Make uintptr_t a known type.
[gcc.git] / gcc / convert.c
1 /* Utility routines for data type conversion for GCC.
2 Copyright (C) 1987, 1988, 1991, 1992, 1993, 1994, 1995, 1997, 1998,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22
23 /* These routines are somewhat language-independent utility function
24 intended to be called by the language-specific convert () functions. */
25
26 #include "config.h"
27 #include "system.h"
28 #include "coretypes.h"
29 #include "tm.h"
30 #include "tree.h"
31 #include "flags.h"
32 #include "convert.h"
33 #include "diagnostic-core.h"
34 #include "langhooks.h"
35
36 /* Convert EXPR to some pointer or reference type TYPE.
37 EXPR must be pointer, reference, integer, enumeral, or literal zero;
38 in other cases error is called. */
39
40 tree
41 convert_to_pointer (tree type, tree expr)
42 {
43 location_t loc = EXPR_LOCATION (expr);
44 if (TREE_TYPE (expr) == type)
45 return expr;
46
47 /* Propagate overflow to the NULL pointer. */
48 if (integer_zerop (expr))
49 return force_fit_type_double (type, double_int_zero, 0,
50 TREE_OVERFLOW (expr));
51
52 switch (TREE_CODE (TREE_TYPE (expr)))
53 {
54 case POINTER_TYPE:
55 case REFERENCE_TYPE:
56 {
57 /* If the pointers point to different address spaces, conversion needs
58 to be done via a ADDR_SPACE_CONVERT_EXPR instead of a NOP_EXPR. */
59 addr_space_t to_as = TYPE_ADDR_SPACE (TREE_TYPE (type));
60 addr_space_t from_as = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (expr)));
61
62 if (to_as == from_as)
63 return fold_build1_loc (loc, NOP_EXPR, type, expr);
64 else
65 return fold_build1_loc (loc, ADDR_SPACE_CONVERT_EXPR, type, expr);
66 }
67
68 case INTEGER_TYPE:
69 case ENUMERAL_TYPE:
70 case BOOLEAN_TYPE:
71 {
72 /* If the input precision differs from the target pointer type
73 precision, first convert the input expression to an integer type of
74 the target precision. Some targets, e.g. VMS, need several pointer
75 sizes to coexist so the latter isn't necessarily POINTER_SIZE. */
76 unsigned int pprec = TYPE_PRECISION (type);
77 unsigned int eprec = TYPE_PRECISION (TREE_TYPE (expr));
78
79 if (eprec != pprec)
80 expr = fold_build1_loc (loc, NOP_EXPR,
81 lang_hooks.types.type_for_size (pprec, 0),
82 expr);
83 }
84
85 return fold_build1_loc (loc, CONVERT_EXPR, type, expr);
86
87 default:
88 error ("cannot convert to a pointer type");
89 return convert_to_pointer (type, integer_zero_node);
90 }
91 }
92
93
94 /* Convert EXPR to some floating-point type TYPE.
95
96 EXPR must be float, fixed-point, integer, or enumeral;
97 in other cases error is called. */
98
99 tree
100 convert_to_real (tree type, tree expr)
101 {
102 enum built_in_function fcode = builtin_mathfn_code (expr);
103 tree itype = TREE_TYPE (expr);
104
105 /* Disable until we figure out how to decide whether the functions are
106 present in runtime. */
107 /* Convert (float)sqrt((double)x) where x is float into sqrtf(x) */
108 if (optimize
109 && (TYPE_MODE (type) == TYPE_MODE (double_type_node)
110 || TYPE_MODE (type) == TYPE_MODE (float_type_node)))
111 {
112 switch (fcode)
113 {
114 #define CASE_MATHFN(FN) case BUILT_IN_##FN: case BUILT_IN_##FN##L:
115 CASE_MATHFN (COSH)
116 CASE_MATHFN (EXP)
117 CASE_MATHFN (EXP10)
118 CASE_MATHFN (EXP2)
119 CASE_MATHFN (EXPM1)
120 CASE_MATHFN (GAMMA)
121 CASE_MATHFN (J0)
122 CASE_MATHFN (J1)
123 CASE_MATHFN (LGAMMA)
124 CASE_MATHFN (POW10)
125 CASE_MATHFN (SINH)
126 CASE_MATHFN (TGAMMA)
127 CASE_MATHFN (Y0)
128 CASE_MATHFN (Y1)
129 /* The above functions may set errno differently with float
130 input or output so this transformation is not safe with
131 -fmath-errno. */
132 if (flag_errno_math)
133 break;
134 CASE_MATHFN (ACOS)
135 CASE_MATHFN (ACOSH)
136 CASE_MATHFN (ASIN)
137 CASE_MATHFN (ASINH)
138 CASE_MATHFN (ATAN)
139 CASE_MATHFN (ATANH)
140 CASE_MATHFN (CBRT)
141 CASE_MATHFN (COS)
142 CASE_MATHFN (ERF)
143 CASE_MATHFN (ERFC)
144 CASE_MATHFN (FABS)
145 CASE_MATHFN (LOG)
146 CASE_MATHFN (LOG10)
147 CASE_MATHFN (LOG2)
148 CASE_MATHFN (LOG1P)
149 CASE_MATHFN (LOGB)
150 CASE_MATHFN (SIN)
151 CASE_MATHFN (SQRT)
152 CASE_MATHFN (TAN)
153 CASE_MATHFN (TANH)
154 #undef CASE_MATHFN
155 {
156 tree arg0 = strip_float_extensions (CALL_EXPR_ARG (expr, 0));
157 tree newtype = type;
158
159 /* We have (outertype)sqrt((innertype)x). Choose the wider mode from
160 the both as the safe type for operation. */
161 if (TYPE_PRECISION (TREE_TYPE (arg0)) > TYPE_PRECISION (type))
162 newtype = TREE_TYPE (arg0);
163
164 /* Be careful about integer to fp conversions.
165 These may overflow still. */
166 if (FLOAT_TYPE_P (TREE_TYPE (arg0))
167 && TYPE_PRECISION (newtype) < TYPE_PRECISION (itype)
168 && (TYPE_MODE (newtype) == TYPE_MODE (double_type_node)
169 || TYPE_MODE (newtype) == TYPE_MODE (float_type_node)))
170 {
171 tree fn = mathfn_built_in (newtype, fcode);
172
173 if (fn)
174 {
175 tree arg = fold (convert_to_real (newtype, arg0));
176 expr = build_call_expr (fn, 1, arg);
177 if (newtype == type)
178 return expr;
179 }
180 }
181 }
182 default:
183 break;
184 }
185 }
186 if (optimize
187 && (((fcode == BUILT_IN_FLOORL
188 || fcode == BUILT_IN_CEILL
189 || fcode == BUILT_IN_ROUNDL
190 || fcode == BUILT_IN_RINTL
191 || fcode == BUILT_IN_TRUNCL
192 || fcode == BUILT_IN_NEARBYINTL)
193 && (TYPE_MODE (type) == TYPE_MODE (double_type_node)
194 || TYPE_MODE (type) == TYPE_MODE (float_type_node)))
195 || ((fcode == BUILT_IN_FLOOR
196 || fcode == BUILT_IN_CEIL
197 || fcode == BUILT_IN_ROUND
198 || fcode == BUILT_IN_RINT
199 || fcode == BUILT_IN_TRUNC
200 || fcode == BUILT_IN_NEARBYINT)
201 && (TYPE_MODE (type) == TYPE_MODE (float_type_node)))))
202 {
203 tree fn = mathfn_built_in (type, fcode);
204
205 if (fn)
206 {
207 tree arg = strip_float_extensions (CALL_EXPR_ARG (expr, 0));
208
209 /* Make sure (type)arg0 is an extension, otherwise we could end up
210 changing (float)floor(double d) into floorf((float)d), which is
211 incorrect because (float)d uses round-to-nearest and can round
212 up to the next integer. */
213 if (TYPE_PRECISION (type) >= TYPE_PRECISION (TREE_TYPE (arg)))
214 return build_call_expr (fn, 1, fold (convert_to_real (type, arg)));
215 }
216 }
217
218 /* Propagate the cast into the operation. */
219 if (itype != type && FLOAT_TYPE_P (type))
220 switch (TREE_CODE (expr))
221 {
222 /* Convert (float)-x into -(float)x. This is safe for
223 round-to-nearest rounding mode. */
224 case ABS_EXPR:
225 case NEGATE_EXPR:
226 if (!flag_rounding_math
227 && TYPE_PRECISION (type) < TYPE_PRECISION (TREE_TYPE (expr)))
228 return build1 (TREE_CODE (expr), type,
229 fold (convert_to_real (type,
230 TREE_OPERAND (expr, 0))));
231 break;
232 /* Convert (outertype)((innertype0)a+(innertype1)b)
233 into ((newtype)a+(newtype)b) where newtype
234 is the widest mode from all of these. */
235 case PLUS_EXPR:
236 case MINUS_EXPR:
237 case MULT_EXPR:
238 case RDIV_EXPR:
239 {
240 tree arg0 = strip_float_extensions (TREE_OPERAND (expr, 0));
241 tree arg1 = strip_float_extensions (TREE_OPERAND (expr, 1));
242
243 if (FLOAT_TYPE_P (TREE_TYPE (arg0))
244 && FLOAT_TYPE_P (TREE_TYPE (arg1))
245 && DECIMAL_FLOAT_TYPE_P (itype) == DECIMAL_FLOAT_TYPE_P (type))
246 {
247 tree newtype = type;
248
249 if (TYPE_MODE (TREE_TYPE (arg0)) == SDmode
250 || TYPE_MODE (TREE_TYPE (arg1)) == SDmode
251 || TYPE_MODE (type) == SDmode)
252 newtype = dfloat32_type_node;
253 if (TYPE_MODE (TREE_TYPE (arg0)) == DDmode
254 || TYPE_MODE (TREE_TYPE (arg1)) == DDmode
255 || TYPE_MODE (type) == DDmode)
256 newtype = dfloat64_type_node;
257 if (TYPE_MODE (TREE_TYPE (arg0)) == TDmode
258 || TYPE_MODE (TREE_TYPE (arg1)) == TDmode
259 || TYPE_MODE (type) == TDmode)
260 newtype = dfloat128_type_node;
261 if (newtype == dfloat32_type_node
262 || newtype == dfloat64_type_node
263 || newtype == dfloat128_type_node)
264 {
265 expr = build2 (TREE_CODE (expr), newtype,
266 fold (convert_to_real (newtype, arg0)),
267 fold (convert_to_real (newtype, arg1)));
268 if (newtype == type)
269 return expr;
270 break;
271 }
272
273 if (TYPE_PRECISION (TREE_TYPE (arg0)) > TYPE_PRECISION (newtype))
274 newtype = TREE_TYPE (arg0);
275 if (TYPE_PRECISION (TREE_TYPE (arg1)) > TYPE_PRECISION (newtype))
276 newtype = TREE_TYPE (arg1);
277 /* Sometimes this transformation is safe (cannot
278 change results through affecting double rounding
279 cases) and sometimes it is not. If NEWTYPE is
280 wider than TYPE, e.g. (float)((long double)double
281 + (long double)double) converted to
282 (float)(double + double), the transformation is
283 unsafe regardless of the details of the types
284 involved; double rounding can arise if the result
285 of NEWTYPE arithmetic is a NEWTYPE value half way
286 between two representable TYPE values but the
287 exact value is sufficiently different (in the
288 right direction) for this difference to be
289 visible in ITYPE arithmetic. If NEWTYPE is the
290 same as TYPE, however, the transformation may be
291 safe depending on the types involved: it is safe
292 if the ITYPE has strictly more than twice as many
293 mantissa bits as TYPE, can represent infinities
294 and NaNs if the TYPE can, and has sufficient
295 exponent range for the product or ratio of two
296 values representable in the TYPE to be within the
297 range of normal values of ITYPE. */
298 if (TYPE_PRECISION (newtype) < TYPE_PRECISION (itype)
299 && (flag_unsafe_math_optimizations
300 || (TYPE_PRECISION (newtype) == TYPE_PRECISION (type)
301 && real_can_shorten_arithmetic (TYPE_MODE (itype),
302 TYPE_MODE (type))
303 && !excess_precision_type (newtype))))
304 {
305 expr = build2 (TREE_CODE (expr), newtype,
306 fold (convert_to_real (newtype, arg0)),
307 fold (convert_to_real (newtype, arg1)));
308 if (newtype == type)
309 return expr;
310 }
311 }
312 }
313 break;
314 default:
315 break;
316 }
317
318 switch (TREE_CODE (TREE_TYPE (expr)))
319 {
320 case REAL_TYPE:
321 /* Ignore the conversion if we don't need to store intermediate
322 results and neither type is a decimal float. */
323 return build1 ((flag_float_store
324 || DECIMAL_FLOAT_TYPE_P (type)
325 || DECIMAL_FLOAT_TYPE_P (itype))
326 ? CONVERT_EXPR : NOP_EXPR, type, expr);
327
328 case INTEGER_TYPE:
329 case ENUMERAL_TYPE:
330 case BOOLEAN_TYPE:
331 return build1 (FLOAT_EXPR, type, expr);
332
333 case FIXED_POINT_TYPE:
334 return build1 (FIXED_CONVERT_EXPR, type, expr);
335
336 case COMPLEX_TYPE:
337 return convert (type,
338 fold_build1 (REALPART_EXPR,
339 TREE_TYPE (TREE_TYPE (expr)), expr));
340
341 case POINTER_TYPE:
342 case REFERENCE_TYPE:
343 error ("pointer value used where a floating point value was expected");
344 return convert_to_real (type, integer_zero_node);
345
346 default:
347 error ("aggregate value used where a float was expected");
348 return convert_to_real (type, integer_zero_node);
349 }
350 }
351
352 /* Convert EXPR to some integer (or enum) type TYPE.
353
354 EXPR must be pointer, integer, discrete (enum, char, or bool), float,
355 fixed-point or vector; in other cases error is called.
356
357 The result of this is always supposed to be a newly created tree node
358 not in use in any existing structure. */
359
360 tree
361 convert_to_integer (tree type, tree expr)
362 {
363 enum tree_code ex_form = TREE_CODE (expr);
364 tree intype = TREE_TYPE (expr);
365 unsigned int inprec = TYPE_PRECISION (intype);
366 unsigned int outprec = TYPE_PRECISION (type);
367
368 /* An INTEGER_TYPE cannot be incomplete, but an ENUMERAL_TYPE can
369 be. Consider `enum E = { a, b = (enum E) 3 };'. */
370 if (!COMPLETE_TYPE_P (type))
371 {
372 error ("conversion to incomplete type");
373 return error_mark_node;
374 }
375
376 /* Convert e.g. (long)round(d) -> lround(d). */
377 /* If we're converting to char, we may encounter differing behavior
378 between converting from double->char vs double->long->char.
379 We're in "undefined" territory but we prefer to be conservative,
380 so only proceed in "unsafe" math mode. */
381 if (optimize
382 && (flag_unsafe_math_optimizations
383 || (long_integer_type_node
384 && outprec >= TYPE_PRECISION (long_integer_type_node))))
385 {
386 tree s_expr = strip_float_extensions (expr);
387 tree s_intype = TREE_TYPE (s_expr);
388 const enum built_in_function fcode = builtin_mathfn_code (s_expr);
389 tree fn = 0;
390
391 switch (fcode)
392 {
393 CASE_FLT_FN (BUILT_IN_CEIL):
394 /* Only convert in ISO C99 mode. */
395 if (!TARGET_C99_FUNCTIONS)
396 break;
397 if (outprec < TYPE_PRECISION (integer_type_node)
398 || (outprec == TYPE_PRECISION (integer_type_node)
399 && !TYPE_UNSIGNED (type)))
400 fn = mathfn_built_in (s_intype, BUILT_IN_ICEIL);
401 else if (outprec == TYPE_PRECISION (long_integer_type_node)
402 && !TYPE_UNSIGNED (type))
403 fn = mathfn_built_in (s_intype, BUILT_IN_LCEIL);
404 else if (outprec == TYPE_PRECISION (long_long_integer_type_node)
405 && !TYPE_UNSIGNED (type))
406 fn = mathfn_built_in (s_intype, BUILT_IN_LLCEIL);
407 break;
408
409 CASE_FLT_FN (BUILT_IN_FLOOR):
410 /* Only convert in ISO C99 mode. */
411 if (!TARGET_C99_FUNCTIONS)
412 break;
413 if (outprec < TYPE_PRECISION (integer_type_node)
414 || (outprec == TYPE_PRECISION (integer_type_node)
415 && !TYPE_UNSIGNED (type)))
416 fn = mathfn_built_in (s_intype, BUILT_IN_IFLOOR);
417 else if (outprec == TYPE_PRECISION (long_integer_type_node)
418 && !TYPE_UNSIGNED (type))
419 fn = mathfn_built_in (s_intype, BUILT_IN_LFLOOR);
420 else if (outprec == TYPE_PRECISION (long_long_integer_type_node)
421 && !TYPE_UNSIGNED (type))
422 fn = mathfn_built_in (s_intype, BUILT_IN_LLFLOOR);
423 break;
424
425 CASE_FLT_FN (BUILT_IN_ROUND):
426 /* Only convert in ISO C99 mode. */
427 if (!TARGET_C99_FUNCTIONS)
428 break;
429 if (outprec < TYPE_PRECISION (integer_type_node)
430 || (outprec == TYPE_PRECISION (integer_type_node)
431 && !TYPE_UNSIGNED (type)))
432 fn = mathfn_built_in (s_intype, BUILT_IN_IROUND);
433 else if (outprec == TYPE_PRECISION (long_integer_type_node)
434 && !TYPE_UNSIGNED (type))
435 fn = mathfn_built_in (s_intype, BUILT_IN_LROUND);
436 else if (outprec == TYPE_PRECISION (long_long_integer_type_node)
437 && !TYPE_UNSIGNED (type))
438 fn = mathfn_built_in (s_intype, BUILT_IN_LLROUND);
439 break;
440
441 CASE_FLT_FN (BUILT_IN_NEARBYINT):
442 /* Only convert nearbyint* if we can ignore math exceptions. */
443 if (flag_trapping_math)
444 break;
445 /* ... Fall through ... */
446 CASE_FLT_FN (BUILT_IN_RINT):
447 /* Only convert in ISO C99 mode. */
448 if (!TARGET_C99_FUNCTIONS)
449 break;
450 if (outprec < TYPE_PRECISION (integer_type_node)
451 || (outprec == TYPE_PRECISION (integer_type_node)
452 && !TYPE_UNSIGNED (type)))
453 fn = mathfn_built_in (s_intype, BUILT_IN_IRINT);
454 else if (outprec == TYPE_PRECISION (long_integer_type_node)
455 && !TYPE_UNSIGNED (type))
456 fn = mathfn_built_in (s_intype, BUILT_IN_LRINT);
457 else if (outprec == TYPE_PRECISION (long_long_integer_type_node)
458 && !TYPE_UNSIGNED (type))
459 fn = mathfn_built_in (s_intype, BUILT_IN_LLRINT);
460 break;
461
462 CASE_FLT_FN (BUILT_IN_TRUNC):
463 return convert_to_integer (type, CALL_EXPR_ARG (s_expr, 0));
464
465 default:
466 break;
467 }
468
469 if (fn)
470 {
471 tree newexpr = build_call_expr (fn, 1, CALL_EXPR_ARG (s_expr, 0));
472 return convert_to_integer (type, newexpr);
473 }
474 }
475
476 /* Convert (int)logb(d) -> ilogb(d). */
477 if (optimize
478 && flag_unsafe_math_optimizations
479 && !flag_trapping_math && !flag_errno_math && flag_finite_math_only
480 && integer_type_node
481 && (outprec > TYPE_PRECISION (integer_type_node)
482 || (outprec == TYPE_PRECISION (integer_type_node)
483 && !TYPE_UNSIGNED (type))))
484 {
485 tree s_expr = strip_float_extensions (expr);
486 tree s_intype = TREE_TYPE (s_expr);
487 const enum built_in_function fcode = builtin_mathfn_code (s_expr);
488 tree fn = 0;
489
490 switch (fcode)
491 {
492 CASE_FLT_FN (BUILT_IN_LOGB):
493 fn = mathfn_built_in (s_intype, BUILT_IN_ILOGB);
494 break;
495
496 default:
497 break;
498 }
499
500 if (fn)
501 {
502 tree newexpr = build_call_expr (fn, 1, CALL_EXPR_ARG (s_expr, 0));
503 return convert_to_integer (type, newexpr);
504 }
505 }
506
507 switch (TREE_CODE (intype))
508 {
509 case POINTER_TYPE:
510 case REFERENCE_TYPE:
511 if (integer_zerop (expr))
512 return build_int_cst (type, 0);
513
514 /* Convert to an unsigned integer of the correct width first, and from
515 there widen/truncate to the required type. Some targets support the
516 coexistence of multiple valid pointer sizes, so fetch the one we need
517 from the type. */
518 expr = fold_build1 (CONVERT_EXPR,
519 lang_hooks.types.type_for_size
520 (TYPE_PRECISION (intype), 0),
521 expr);
522 return fold_convert (type, expr);
523
524 case INTEGER_TYPE:
525 case ENUMERAL_TYPE:
526 case BOOLEAN_TYPE:
527 case OFFSET_TYPE:
528 /* If this is a logical operation, which just returns 0 or 1, we can
529 change the type of the expression. */
530
531 if (TREE_CODE_CLASS (ex_form) == tcc_comparison)
532 {
533 expr = copy_node (expr);
534 TREE_TYPE (expr) = type;
535 return expr;
536 }
537
538 /* If we are widening the type, put in an explicit conversion.
539 Similarly if we are not changing the width. After this, we know
540 we are truncating EXPR. */
541
542 else if (outprec >= inprec)
543 {
544 enum tree_code code;
545 tree tem;
546
547 /* If the precision of the EXPR's type is K bits and the
548 destination mode has more bits, and the sign is changing,
549 it is not safe to use a NOP_EXPR. For example, suppose
550 that EXPR's type is a 3-bit unsigned integer type, the
551 TYPE is a 3-bit signed integer type, and the machine mode
552 for the types is 8-bit QImode. In that case, the
553 conversion necessitates an explicit sign-extension. In
554 the signed-to-unsigned case the high-order bits have to
555 be cleared. */
556 if (TYPE_UNSIGNED (type) != TYPE_UNSIGNED (TREE_TYPE (expr))
557 && (TYPE_PRECISION (TREE_TYPE (expr))
558 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (expr)))))
559 code = CONVERT_EXPR;
560 else
561 code = NOP_EXPR;
562
563 tem = fold_unary (code, type, expr);
564 if (tem)
565 return tem;
566
567 tem = build1 (code, type, expr);
568 TREE_NO_WARNING (tem) = 1;
569 return tem;
570 }
571
572 /* If TYPE is an enumeral type or a type with a precision less
573 than the number of bits in its mode, do the conversion to the
574 type corresponding to its mode, then do a nop conversion
575 to TYPE. */
576 else if (TREE_CODE (type) == ENUMERAL_TYPE
577 || outprec != GET_MODE_PRECISION (TYPE_MODE (type)))
578 return build1 (NOP_EXPR, type,
579 convert (lang_hooks.types.type_for_mode
580 (TYPE_MODE (type), TYPE_UNSIGNED (type)),
581 expr));
582
583 /* Here detect when we can distribute the truncation down past some
584 arithmetic. For example, if adding two longs and converting to an
585 int, we can equally well convert both to ints and then add.
586 For the operations handled here, such truncation distribution
587 is always safe.
588 It is desirable in these cases:
589 1) when truncating down to full-word from a larger size
590 2) when truncating takes no work.
591 3) when at least one operand of the arithmetic has been extended
592 (as by C's default conversions). In this case we need two conversions
593 if we do the arithmetic as already requested, so we might as well
594 truncate both and then combine. Perhaps that way we need only one.
595
596 Note that in general we cannot do the arithmetic in a type
597 shorter than the desired result of conversion, even if the operands
598 are both extended from a shorter type, because they might overflow
599 if combined in that type. The exceptions to this--the times when
600 two narrow values can be combined in their narrow type even to
601 make a wider result--are handled by "shorten" in build_binary_op. */
602
603 switch (ex_form)
604 {
605 case RSHIFT_EXPR:
606 /* We can pass truncation down through right shifting
607 when the shift count is a nonpositive constant. */
608 if (TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST
609 && tree_int_cst_sgn (TREE_OPERAND (expr, 1)) <= 0)
610 goto trunc1;
611 break;
612
613 case LSHIFT_EXPR:
614 /* We can pass truncation down through left shifting
615 when the shift count is a nonnegative constant and
616 the target type is unsigned. */
617 if (TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST
618 && tree_int_cst_sgn (TREE_OPERAND (expr, 1)) >= 0
619 && TYPE_UNSIGNED (type)
620 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
621 {
622 /* If shift count is less than the width of the truncated type,
623 really shift. */
624 if (tree_int_cst_lt (TREE_OPERAND (expr, 1), TYPE_SIZE (type)))
625 /* In this case, shifting is like multiplication. */
626 goto trunc1;
627 else
628 {
629 /* If it is >= that width, result is zero.
630 Handling this with trunc1 would give the wrong result:
631 (int) ((long long) a << 32) is well defined (as 0)
632 but (int) a << 32 is undefined and would get a
633 warning. */
634
635 tree t = build_int_cst (type, 0);
636
637 /* If the original expression had side-effects, we must
638 preserve it. */
639 if (TREE_SIDE_EFFECTS (expr))
640 return build2 (COMPOUND_EXPR, type, expr, t);
641 else
642 return t;
643 }
644 }
645 break;
646
647 case TRUNC_DIV_EXPR:
648 {
649 tree arg0 = get_unwidened (TREE_OPERAND (expr, 0), type);
650 tree arg1 = get_unwidened (TREE_OPERAND (expr, 1), type);
651
652 /* Don't distribute unless the output precision is at least as big
653 as the actual inputs and it has the same signedness. */
654 if (outprec >= TYPE_PRECISION (TREE_TYPE (arg0))
655 && outprec >= TYPE_PRECISION (TREE_TYPE (arg1))
656 /* If signedness of arg0 and arg1 don't match,
657 we can't necessarily find a type to compare them in. */
658 && (TYPE_UNSIGNED (TREE_TYPE (arg0))
659 == TYPE_UNSIGNED (TREE_TYPE (arg1)))
660 /* Do not change the sign of the division. */
661 && (TYPE_UNSIGNED (TREE_TYPE (expr))
662 == TYPE_UNSIGNED (TREE_TYPE (arg0)))
663 /* Either require unsigned division or a division by
664 a constant that is not -1. */
665 && (TYPE_UNSIGNED (TREE_TYPE (arg0))
666 || (TREE_CODE (arg1) == INTEGER_CST
667 && !integer_all_onesp (arg1))))
668 goto trunc1;
669 break;
670 }
671
672 case MAX_EXPR:
673 case MIN_EXPR:
674 case MULT_EXPR:
675 {
676 tree arg0 = get_unwidened (TREE_OPERAND (expr, 0), type);
677 tree arg1 = get_unwidened (TREE_OPERAND (expr, 1), type);
678
679 /* Don't distribute unless the output precision is at least as big
680 as the actual inputs. Otherwise, the comparison of the
681 truncated values will be wrong. */
682 if (outprec >= TYPE_PRECISION (TREE_TYPE (arg0))
683 && outprec >= TYPE_PRECISION (TREE_TYPE (arg1))
684 /* If signedness of arg0 and arg1 don't match,
685 we can't necessarily find a type to compare them in. */
686 && (TYPE_UNSIGNED (TREE_TYPE (arg0))
687 == TYPE_UNSIGNED (TREE_TYPE (arg1))))
688 goto trunc1;
689 break;
690 }
691
692 case PLUS_EXPR:
693 case MINUS_EXPR:
694 case BIT_AND_EXPR:
695 case BIT_IOR_EXPR:
696 case BIT_XOR_EXPR:
697 trunc1:
698 {
699 tree arg0 = get_unwidened (TREE_OPERAND (expr, 0), type);
700 tree arg1 = get_unwidened (TREE_OPERAND (expr, 1), type);
701
702 /* Do not try to narrow operands of pointer subtraction;
703 that will interfere with other folding. */
704 if (ex_form == MINUS_EXPR
705 && CONVERT_EXPR_P (arg0)
706 && CONVERT_EXPR_P (arg1)
707 && POINTER_TYPE_P (TREE_TYPE (TREE_OPERAND (arg0, 0)))
708 && POINTER_TYPE_P (TREE_TYPE (TREE_OPERAND (arg1, 0))))
709 break;
710
711 if (outprec >= BITS_PER_WORD
712 || TRULY_NOOP_TRUNCATION (outprec, inprec)
713 || inprec > TYPE_PRECISION (TREE_TYPE (arg0))
714 || inprec > TYPE_PRECISION (TREE_TYPE (arg1)))
715 {
716 /* Do the arithmetic in type TYPEX,
717 then convert result to TYPE. */
718 tree typex = type;
719
720 /* Can't do arithmetic in enumeral types
721 so use an integer type that will hold the values. */
722 if (TREE_CODE (typex) == ENUMERAL_TYPE)
723 typex = lang_hooks.types.type_for_size
724 (TYPE_PRECISION (typex), TYPE_UNSIGNED (typex));
725
726 /* But now perhaps TYPEX is as wide as INPREC.
727 In that case, do nothing special here.
728 (Otherwise would recurse infinitely in convert. */
729 if (TYPE_PRECISION (typex) != inprec)
730 {
731 /* Don't do unsigned arithmetic where signed was wanted,
732 or vice versa.
733 Exception: if both of the original operands were
734 unsigned then we can safely do the work as unsigned.
735 Exception: shift operations take their type solely
736 from the first argument.
737 Exception: the LSHIFT_EXPR case above requires that
738 we perform this operation unsigned lest we produce
739 signed-overflow undefinedness.
740 And we may need to do it as unsigned
741 if we truncate to the original size. */
742 if (TYPE_UNSIGNED (TREE_TYPE (expr))
743 || (TYPE_UNSIGNED (TREE_TYPE (arg0))
744 && (TYPE_UNSIGNED (TREE_TYPE (arg1))
745 || ex_form == LSHIFT_EXPR
746 || ex_form == RSHIFT_EXPR
747 || ex_form == LROTATE_EXPR
748 || ex_form == RROTATE_EXPR))
749 || ex_form == LSHIFT_EXPR
750 /* If we have !flag_wrapv, and either ARG0 or
751 ARG1 is of a signed type, we have to do
752 PLUS_EXPR, MINUS_EXPR or MULT_EXPR in an unsigned
753 type in case the operation in outprec precision
754 could overflow. Otherwise, we would introduce
755 signed-overflow undefinedness. */
756 || ((!TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg0))
757 || !TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg1)))
758 && ((TYPE_PRECISION (TREE_TYPE (arg0)) * 2u
759 > outprec)
760 || (TYPE_PRECISION (TREE_TYPE (arg1)) * 2u
761 > outprec))
762 && (ex_form == PLUS_EXPR
763 || ex_form == MINUS_EXPR
764 || ex_form == MULT_EXPR)))
765 typex = unsigned_type_for (typex);
766 else
767 typex = signed_type_for (typex);
768 return convert (type,
769 fold_build2 (ex_form, typex,
770 convert (typex, arg0),
771 convert (typex, arg1)));
772 }
773 }
774 }
775 break;
776
777 case NEGATE_EXPR:
778 case BIT_NOT_EXPR:
779 /* This is not correct for ABS_EXPR,
780 since we must test the sign before truncation. */
781 {
782 tree typex = unsigned_type_for (type);
783 return convert (type,
784 fold_build1 (ex_form, typex,
785 convert (typex,
786 TREE_OPERAND (expr, 0))));
787 }
788
789 case NOP_EXPR:
790 /* Don't introduce a
791 "can't convert between vector values of different size" error. */
792 if (TREE_CODE (TREE_TYPE (TREE_OPERAND (expr, 0))) == VECTOR_TYPE
793 && (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (TREE_OPERAND (expr, 0))))
794 != GET_MODE_SIZE (TYPE_MODE (type))))
795 break;
796 /* If truncating after truncating, might as well do all at once.
797 If truncating after extending, we may get rid of wasted work. */
798 return convert (type, get_unwidened (TREE_OPERAND (expr, 0), type));
799
800 case COND_EXPR:
801 /* It is sometimes worthwhile to push the narrowing down through
802 the conditional and never loses. A COND_EXPR may have a throw
803 as one operand, which then has void type. Just leave void
804 operands as they are. */
805 return fold_build3 (COND_EXPR, type, TREE_OPERAND (expr, 0),
806 VOID_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 1)))
807 ? TREE_OPERAND (expr, 1)
808 : convert (type, TREE_OPERAND (expr, 1)),
809 VOID_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 2)))
810 ? TREE_OPERAND (expr, 2)
811 : convert (type, TREE_OPERAND (expr, 2)));
812
813 default:
814 break;
815 }
816
817 /* When parsing long initializers, we might end up with a lot of casts.
818 Shortcut this. */
819 if (TREE_CODE (expr) == INTEGER_CST)
820 return fold_convert (type, expr);
821 return build1 (CONVERT_EXPR, type, expr);
822
823 case REAL_TYPE:
824 return build1 (FIX_TRUNC_EXPR, type, expr);
825
826 case FIXED_POINT_TYPE:
827 return build1 (FIXED_CONVERT_EXPR, type, expr);
828
829 case COMPLEX_TYPE:
830 return convert (type,
831 fold_build1 (REALPART_EXPR,
832 TREE_TYPE (TREE_TYPE (expr)), expr));
833
834 case VECTOR_TYPE:
835 if (!tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (TREE_TYPE (expr))))
836 {
837 error ("can%'t convert between vector values of different size");
838 return error_mark_node;
839 }
840 return build1 (VIEW_CONVERT_EXPR, type, expr);
841
842 default:
843 error ("aggregate value used where an integer was expected");
844 return convert (type, integer_zero_node);
845 }
846 }
847
848 /* Convert EXPR to the complex type TYPE in the usual ways. */
849
850 tree
851 convert_to_complex (tree type, tree expr)
852 {
853 tree subtype = TREE_TYPE (type);
854
855 switch (TREE_CODE (TREE_TYPE (expr)))
856 {
857 case REAL_TYPE:
858 case FIXED_POINT_TYPE:
859 case INTEGER_TYPE:
860 case ENUMERAL_TYPE:
861 case BOOLEAN_TYPE:
862 return build2 (COMPLEX_EXPR, type, convert (subtype, expr),
863 convert (subtype, integer_zero_node));
864
865 case COMPLEX_TYPE:
866 {
867 tree elt_type = TREE_TYPE (TREE_TYPE (expr));
868
869 if (TYPE_MAIN_VARIANT (elt_type) == TYPE_MAIN_VARIANT (subtype))
870 return expr;
871 else if (TREE_CODE (expr) == COMPLEX_EXPR)
872 return fold_build2 (COMPLEX_EXPR, type,
873 convert (subtype, TREE_OPERAND (expr, 0)),
874 convert (subtype, TREE_OPERAND (expr, 1)));
875 else
876 {
877 expr = save_expr (expr);
878 return
879 fold_build2 (COMPLEX_EXPR, type,
880 convert (subtype,
881 fold_build1 (REALPART_EXPR,
882 TREE_TYPE (TREE_TYPE (expr)),
883 expr)),
884 convert (subtype,
885 fold_build1 (IMAGPART_EXPR,
886 TREE_TYPE (TREE_TYPE (expr)),
887 expr)));
888 }
889 }
890
891 case POINTER_TYPE:
892 case REFERENCE_TYPE:
893 error ("pointer value used where a complex was expected");
894 return convert_to_complex (type, integer_zero_node);
895
896 default:
897 error ("aggregate value used where a complex was expected");
898 return convert_to_complex (type, integer_zero_node);
899 }
900 }
901
902 /* Convert EXPR to the vector type TYPE in the usual ways. */
903
904 tree
905 convert_to_vector (tree type, tree expr)
906 {
907 switch (TREE_CODE (TREE_TYPE (expr)))
908 {
909 case INTEGER_TYPE:
910 case VECTOR_TYPE:
911 if (!tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (TREE_TYPE (expr))))
912 {
913 error ("can%'t convert between vector values of different size");
914 return error_mark_node;
915 }
916 return build1 (VIEW_CONVERT_EXPR, type, expr);
917
918 default:
919 error ("can%'t convert value to a vector");
920 return error_mark_node;
921 }
922 }
923
924 /* Convert EXPR to some fixed-point type TYPE.
925
926 EXPR must be fixed-point, float, integer, or enumeral;
927 in other cases error is called. */
928
929 tree
930 convert_to_fixed (tree type, tree expr)
931 {
932 if (integer_zerop (expr))
933 {
934 tree fixed_zero_node = build_fixed (type, FCONST0 (TYPE_MODE (type)));
935 return fixed_zero_node;
936 }
937 else if (integer_onep (expr) && ALL_SCALAR_ACCUM_MODE_P (TYPE_MODE (type)))
938 {
939 tree fixed_one_node = build_fixed (type, FCONST1 (TYPE_MODE (type)));
940 return fixed_one_node;
941 }
942
943 switch (TREE_CODE (TREE_TYPE (expr)))
944 {
945 case FIXED_POINT_TYPE:
946 case INTEGER_TYPE:
947 case ENUMERAL_TYPE:
948 case BOOLEAN_TYPE:
949 case REAL_TYPE:
950 return build1 (FIXED_CONVERT_EXPR, type, expr);
951
952 case COMPLEX_TYPE:
953 return convert (type,
954 fold_build1 (REALPART_EXPR,
955 TREE_TYPE (TREE_TYPE (expr)), expr));
956
957 default:
958 error ("aggregate value used where a fixed-point was expected");
959 return error_mark_node;
960 }
961 }