* config/alpha/x-vms (version): Change "." to "_".
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
21
22
23 #include "config.h"
24 #include "system.h"
25
26 #include "rtl.h"
27 #include "tm_p.h"
28 #include "regs.h"
29 #include "hard-reg-set.h"
30 #include "flags.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "recog.h"
34 #include "function.h"
35 #include "expr.h"
36 #include "toplev.h"
37 #include "output.h"
38 #include "ggc.h"
39
40 /* Simplification and canonicalization of RTL. */
41
42 /* Nonzero if X has the form (PLUS frame-pointer integer). We check for
43 virtual regs here because the simplify_*_operation routines are called
44 by integrate.c, which is called before virtual register instantiation.
45
46 ?!? FIXED_BASE_PLUS_P and NONZERO_BASE_PLUS_P need to move into
47 a header file so that their definitions can be shared with the
48 simplification routines in simplify-rtx.c. Until then, do not
49 change these macros without also changing the copy in simplify-rtx.c. */
50
51 #define FIXED_BASE_PLUS_P(X) \
52 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
53 || ((X) == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])\
54 || (X) == virtual_stack_vars_rtx \
55 || (X) == virtual_incoming_args_rtx \
56 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
57 && (XEXP (X, 0) == frame_pointer_rtx \
58 || XEXP (X, 0) == hard_frame_pointer_rtx \
59 || ((X) == arg_pointer_rtx \
60 && fixed_regs[ARG_POINTER_REGNUM]) \
61 || XEXP (X, 0) == virtual_stack_vars_rtx \
62 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
63 || GET_CODE (X) == ADDRESSOF)
64
65 /* Similar, but also allows reference to the stack pointer.
66
67 This used to include FIXED_BASE_PLUS_P, however, we can't assume that
68 arg_pointer_rtx by itself is nonzero, because on at least one machine,
69 the i960, the arg pointer is zero when it is unused. */
70
71 #define NONZERO_BASE_PLUS_P(X) \
72 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
73 || (X) == virtual_stack_vars_rtx \
74 || (X) == virtual_incoming_args_rtx \
75 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
76 && (XEXP (X, 0) == frame_pointer_rtx \
77 || XEXP (X, 0) == hard_frame_pointer_rtx \
78 || ((X) == arg_pointer_rtx \
79 && fixed_regs[ARG_POINTER_REGNUM]) \
80 || XEXP (X, 0) == virtual_stack_vars_rtx \
81 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
82 || (X) == stack_pointer_rtx \
83 || (X) == virtual_stack_dynamic_rtx \
84 || (X) == virtual_outgoing_args_rtx \
85 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
86 && (XEXP (X, 0) == stack_pointer_rtx \
87 || XEXP (X, 0) == virtual_stack_dynamic_rtx \
88 || XEXP (X, 0) == virtual_outgoing_args_rtx)) \
89 || GET_CODE (X) == ADDRESSOF)
90
91 /* Much code operates on (low, high) pairs; the low value is an
92 unsigned wide int, the high value a signed wide int. We
93 occasionally need to sign extend from low to high as if low were a
94 signed wide int. */
95 #define HWI_SIGN_EXTEND(low) \
96 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
97
98 static int simplify_plus_minus_op_data_cmp PARAMS ((const void *,
99 const void *));
100 static rtx simplify_plus_minus PARAMS ((enum rtx_code,
101 enum machine_mode, rtx, rtx));
102 static void check_fold_consts PARAMS ((PTR));
103 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
104 static void simplify_unary_real PARAMS ((PTR));
105 static void simplify_binary_real PARAMS ((PTR));
106 #endif
107 static void simplify_binary_is2orm1 PARAMS ((PTR));
108
109 \f
110 /* Make a binary operation by properly ordering the operands and
111 seeing if the expression folds. */
112
113 rtx
114 simplify_gen_binary (code, mode, op0, op1)
115 enum rtx_code code;
116 enum machine_mode mode;
117 rtx op0, op1;
118 {
119 rtx tem;
120
121 /* Put complex operands first and constants second if commutative. */
122 if (GET_RTX_CLASS (code) == 'c'
123 && swap_commutative_operands_p (op0, op1))
124 tem = op0, op0 = op1, op1 = tem;
125
126 /* If this simplifies, do it. */
127 tem = simplify_binary_operation (code, mode, op0, op1);
128
129 if (tem)
130 return tem;
131
132 /* Handle addition and subtraction of CONST_INT specially. Otherwise,
133 just form the operation. */
134
135 if (GET_CODE (op1) == CONST_INT
136 && GET_MODE (op0) != VOIDmode
137 && (code == PLUS || code == MINUS))
138 {
139 HOST_WIDE_INT value = INTVAL (op1);
140 if (code == MINUS)
141 value = -value;
142 return plus_constant (op0, value);
143 }
144 else
145 return gen_rtx_fmt_ee (code, mode, op0, op1);
146 }
147 \f
148 /* If X is a MEM referencing the constant pool, return the real value.
149 Otherwise return X. */
150 rtx
151 avoid_constant_pool_reference (x)
152 rtx x;
153 {
154 rtx c, addr;
155 enum machine_mode cmode;
156
157 if (GET_CODE (x) != MEM)
158 return x;
159 addr = XEXP (x, 0);
160
161 if (GET_CODE (addr) != SYMBOL_REF
162 || ! CONSTANT_POOL_ADDRESS_P (addr))
163 return x;
164
165 c = get_pool_constant (addr);
166 cmode = get_pool_mode (addr);
167
168 /* If we're accessing the constant in a different mode than it was
169 originally stored, attempt to fix that up via subreg simplifications.
170 If that fails we have no choice but to return the original memory. */
171 if (cmode != GET_MODE (x))
172 {
173 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
174 return c ? c : x;
175 }
176
177 return c;
178 }
179 \f
180 /* Make a unary operation by first seeing if it folds and otherwise making
181 the specified operation. */
182
183 rtx
184 simplify_gen_unary (code, mode, op, op_mode)
185 enum rtx_code code;
186 enum machine_mode mode;
187 rtx op;
188 enum machine_mode op_mode;
189 {
190 rtx tem;
191
192 /* If this simplifies, use it. */
193 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
194 return tem;
195
196 return gen_rtx_fmt_e (code, mode, op);
197 }
198
199 /* Likewise for ternary operations. */
200
201 rtx
202 simplify_gen_ternary (code, mode, op0_mode, op0, op1, op2)
203 enum rtx_code code;
204 enum machine_mode mode, op0_mode;
205 rtx op0, op1, op2;
206 {
207 rtx tem;
208
209 /* If this simplifies, use it. */
210 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
211 op0, op1, op2)))
212 return tem;
213
214 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
215 }
216 \f
217 /* Likewise, for relational operations.
218 CMP_MODE specifies mode comparison is done in.
219 */
220
221 rtx
222 simplify_gen_relational (code, mode, cmp_mode, op0, op1)
223 enum rtx_code code;
224 enum machine_mode mode;
225 enum machine_mode cmp_mode;
226 rtx op0, op1;
227 {
228 rtx tem;
229
230 if ((tem = simplify_relational_operation (code, cmp_mode, op0, op1)) != 0)
231 return tem;
232
233 /* Put complex operands first and constants second. */
234 if (swap_commutative_operands_p (op0, op1))
235 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
236
237 return gen_rtx_fmt_ee (code, mode, op0, op1);
238 }
239 \f
240 /* Replace all occurrences of OLD in X with NEW and try to simplify the
241 resulting RTX. Return a new RTX which is as simplified as possible. */
242
243 rtx
244 simplify_replace_rtx (x, old, new)
245 rtx x;
246 rtx old;
247 rtx new;
248 {
249 enum rtx_code code = GET_CODE (x);
250 enum machine_mode mode = GET_MODE (x);
251
252 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
253 to build a new expression substituting recursively. If we can't do
254 anything, return our input. */
255
256 if (x == old)
257 return new;
258
259 switch (GET_RTX_CLASS (code))
260 {
261 case '1':
262 {
263 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
264 rtx op = (XEXP (x, 0) == old
265 ? new : simplify_replace_rtx (XEXP (x, 0), old, new));
266
267 return simplify_gen_unary (code, mode, op, op_mode);
268 }
269
270 case '2':
271 case 'c':
272 return
273 simplify_gen_binary (code, mode,
274 simplify_replace_rtx (XEXP (x, 0), old, new),
275 simplify_replace_rtx (XEXP (x, 1), old, new));
276 case '<':
277 {
278 enum machine_mode op_mode = (GET_MODE (XEXP (x, 0)) != VOIDmode
279 ? GET_MODE (XEXP (x, 0))
280 : GET_MODE (XEXP (x, 1)));
281 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
282 rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
283
284 return
285 simplify_gen_relational (code, mode,
286 (op_mode != VOIDmode
287 ? op_mode
288 : GET_MODE (op0) != VOIDmode
289 ? GET_MODE (op0)
290 : GET_MODE (op1)),
291 op0, op1);
292 }
293
294 case '3':
295 case 'b':
296 {
297 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
298 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
299
300 return
301 simplify_gen_ternary (code, mode,
302 (op_mode != VOIDmode
303 ? op_mode
304 : GET_MODE (op0)),
305 op0,
306 simplify_replace_rtx (XEXP (x, 1), old, new),
307 simplify_replace_rtx (XEXP (x, 2), old, new));
308 }
309
310 case 'x':
311 /* The only case we try to handle is a SUBREG. */
312 if (code == SUBREG)
313 {
314 rtx exp;
315 exp = simplify_gen_subreg (GET_MODE (x),
316 simplify_replace_rtx (SUBREG_REG (x),
317 old, new),
318 GET_MODE (SUBREG_REG (x)),
319 SUBREG_BYTE (x));
320 if (exp)
321 x = exp;
322 }
323 return x;
324
325 default:
326 if (GET_CODE (x) == MEM)
327 return
328 replace_equiv_address_nv (x,
329 simplify_replace_rtx (XEXP (x, 0),
330 old, new));
331
332 return x;
333 }
334 return x;
335 }
336 \f
337 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
338 /* Subroutine of simplify_unary_operation, called via do_float_handler.
339 Handles simplification of unary ops on floating point values. */
340 struct simplify_unary_real_args
341 {
342 rtx operand;
343 rtx result;
344 enum machine_mode mode;
345 enum rtx_code code;
346 bool want_integer;
347 };
348 #define REAL_VALUE_ABS(d_) \
349 (REAL_VALUE_NEGATIVE (d_) ? REAL_VALUE_NEGATE (d_) : (d_))
350
351 static void
352 simplify_unary_real (p)
353 PTR p;
354 {
355 REAL_VALUE_TYPE d;
356
357 struct simplify_unary_real_args *args =
358 (struct simplify_unary_real_args *) p;
359
360 REAL_VALUE_FROM_CONST_DOUBLE (d, args->operand);
361
362 if (args->want_integer)
363 {
364 HOST_WIDE_INT i;
365
366 switch (args->code)
367 {
368 case FIX: i = REAL_VALUE_FIX (d); break;
369 case UNSIGNED_FIX: i = REAL_VALUE_UNSIGNED_FIX (d); break;
370 default:
371 abort ();
372 }
373 args->result = GEN_INT (trunc_int_for_mode (i, args->mode));
374 }
375 else
376 {
377 switch (args->code)
378 {
379 case SQRT:
380 /* We don't attempt to optimize this. */
381 args->result = 0;
382 return;
383
384 case ABS: d = REAL_VALUE_ABS (d); break;
385 case NEG: d = REAL_VALUE_NEGATE (d); break;
386 case FLOAT_TRUNCATE: d = real_value_truncate (args->mode, d); break;
387 case FLOAT_EXTEND: /* All this does is change the mode. */ break;
388 case FIX: d = REAL_VALUE_RNDZINT (d); break;
389 case UNSIGNED_FIX: d = REAL_VALUE_UNSIGNED_RNDZINT (d); break;
390 default:
391 abort ();
392 }
393 args->result = CONST_DOUBLE_FROM_REAL_VALUE (d, args->mode);
394 }
395 }
396 #endif
397
398 /* Try to simplify a unary operation CODE whose output mode is to be
399 MODE with input operand OP whose mode was originally OP_MODE.
400 Return zero if no simplification can be made. */
401 rtx
402 simplify_unary_operation (code, mode, op, op_mode)
403 enum rtx_code code;
404 enum machine_mode mode;
405 rtx op;
406 enum machine_mode op_mode;
407 {
408 unsigned int width = GET_MODE_BITSIZE (mode);
409 rtx trueop = avoid_constant_pool_reference (op);
410
411 /* The order of these tests is critical so that, for example, we don't
412 check the wrong mode (input vs. output) for a conversion operation,
413 such as FIX. At some point, this should be simplified. */
414
415 #if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC)
416
417 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
418 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
419 {
420 HOST_WIDE_INT hv, lv;
421 REAL_VALUE_TYPE d;
422
423 if (GET_CODE (trueop) == CONST_INT)
424 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
425 else
426 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
427
428 #ifdef REAL_ARITHMETIC
429 REAL_VALUE_FROM_INT (d, lv, hv, mode);
430 #else
431 if (hv < 0)
432 {
433 d = (double) (~ hv);
434 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
435 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
436 d += (double) (unsigned HOST_WIDE_INT) (~ lv);
437 d = (- d - 1.0);
438 }
439 else
440 {
441 d = (double) hv;
442 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
443 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
444 d += (double) (unsigned HOST_WIDE_INT) lv;
445 }
446 #endif /* REAL_ARITHMETIC */
447 d = real_value_truncate (mode, d);
448 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
449 }
450 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
451 && (GET_CODE (trueop) == CONST_DOUBLE
452 || GET_CODE (trueop) == CONST_INT))
453 {
454 HOST_WIDE_INT hv, lv;
455 REAL_VALUE_TYPE d;
456
457 if (GET_CODE (trueop) == CONST_INT)
458 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
459 else
460 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
461
462 if (op_mode == VOIDmode)
463 {
464 /* We don't know how to interpret negative-looking numbers in
465 this case, so don't try to fold those. */
466 if (hv < 0)
467 return 0;
468 }
469 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
470 ;
471 else
472 hv = 0, lv &= GET_MODE_MASK (op_mode);
473
474 #ifdef REAL_ARITHMETIC
475 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
476 #else
477
478 d = (double) (unsigned HOST_WIDE_INT) hv;
479 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
480 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
481 d += (double) (unsigned HOST_WIDE_INT) lv;
482 #endif /* REAL_ARITHMETIC */
483 d = real_value_truncate (mode, d);
484 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
485 }
486 #endif
487
488 if (GET_CODE (trueop) == CONST_INT
489 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
490 {
491 HOST_WIDE_INT arg0 = INTVAL (trueop);
492 HOST_WIDE_INT val;
493
494 switch (code)
495 {
496 case NOT:
497 val = ~ arg0;
498 break;
499
500 case NEG:
501 val = - arg0;
502 break;
503
504 case ABS:
505 val = (arg0 >= 0 ? arg0 : - arg0);
506 break;
507
508 case FFS:
509 /* Don't use ffs here. Instead, get low order bit and then its
510 number. If arg0 is zero, this will return 0, as desired. */
511 arg0 &= GET_MODE_MASK (mode);
512 val = exact_log2 (arg0 & (- arg0)) + 1;
513 break;
514
515 case TRUNCATE:
516 val = arg0;
517 break;
518
519 case ZERO_EXTEND:
520 if (op_mode == VOIDmode)
521 op_mode = mode;
522 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
523 {
524 /* If we were really extending the mode,
525 we would have to distinguish between zero-extension
526 and sign-extension. */
527 if (width != GET_MODE_BITSIZE (op_mode))
528 abort ();
529 val = arg0;
530 }
531 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
532 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
533 else
534 return 0;
535 break;
536
537 case SIGN_EXTEND:
538 if (op_mode == VOIDmode)
539 op_mode = mode;
540 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
541 {
542 /* If we were really extending the mode,
543 we would have to distinguish between zero-extension
544 and sign-extension. */
545 if (width != GET_MODE_BITSIZE (op_mode))
546 abort ();
547 val = arg0;
548 }
549 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
550 {
551 val
552 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
553 if (val
554 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
555 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
556 }
557 else
558 return 0;
559 break;
560
561 case SQRT:
562 case FLOAT_EXTEND:
563 case FLOAT_TRUNCATE:
564 return 0;
565
566 default:
567 abort ();
568 }
569
570 val = trunc_int_for_mode (val, mode);
571
572 return GEN_INT (val);
573 }
574
575 /* We can do some operations on integer CONST_DOUBLEs. Also allow
576 for a DImode operation on a CONST_INT. */
577 else if (GET_MODE (trueop) == VOIDmode && width <= HOST_BITS_PER_INT * 2
578 && (GET_CODE (trueop) == CONST_DOUBLE
579 || GET_CODE (trueop) == CONST_INT))
580 {
581 unsigned HOST_WIDE_INT l1, lv;
582 HOST_WIDE_INT h1, hv;
583
584 if (GET_CODE (trueop) == CONST_DOUBLE)
585 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
586 else
587 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
588
589 switch (code)
590 {
591 case NOT:
592 lv = ~ l1;
593 hv = ~ h1;
594 break;
595
596 case NEG:
597 neg_double (l1, h1, &lv, &hv);
598 break;
599
600 case ABS:
601 if (h1 < 0)
602 neg_double (l1, h1, &lv, &hv);
603 else
604 lv = l1, hv = h1;
605 break;
606
607 case FFS:
608 hv = 0;
609 if (l1 == 0)
610 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & (-h1)) + 1;
611 else
612 lv = exact_log2 (l1 & (-l1)) + 1;
613 break;
614
615 case TRUNCATE:
616 /* This is just a change-of-mode, so do nothing. */
617 lv = l1, hv = h1;
618 break;
619
620 case ZERO_EXTEND:
621 if (op_mode == VOIDmode
622 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
623 return 0;
624
625 hv = 0;
626 lv = l1 & GET_MODE_MASK (op_mode);
627 break;
628
629 case SIGN_EXTEND:
630 if (op_mode == VOIDmode
631 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
632 return 0;
633 else
634 {
635 lv = l1 & GET_MODE_MASK (op_mode);
636 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
637 && (lv & ((HOST_WIDE_INT) 1
638 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
639 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
640
641 hv = HWI_SIGN_EXTEND (lv);
642 }
643 break;
644
645 case SQRT:
646 return 0;
647
648 default:
649 return 0;
650 }
651
652 return immed_double_const (lv, hv, mode);
653 }
654
655 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
656 else if (GET_CODE (trueop) == CONST_DOUBLE
657 && GET_MODE_CLASS (mode) == MODE_FLOAT)
658 {
659 struct simplify_unary_real_args args;
660 args.operand = trueop;
661 args.mode = mode;
662 args.code = code;
663 args.want_integer = false;
664
665 if (do_float_handler (simplify_unary_real, (PTR) &args))
666 return args.result;
667
668 return 0;
669 }
670
671 else if (GET_CODE (trueop) == CONST_DOUBLE
672 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
673 && GET_MODE_CLASS (mode) == MODE_INT
674 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
675 {
676 struct simplify_unary_real_args args;
677 args.operand = trueop;
678 args.mode = mode;
679 args.code = code;
680 args.want_integer = true;
681
682 if (do_float_handler (simplify_unary_real, (PTR) &args))
683 return args.result;
684
685 return 0;
686 }
687 #endif
688 /* This was formerly used only for non-IEEE float.
689 eggert@twinsun.com says it is safe for IEEE also. */
690 else
691 {
692 enum rtx_code reversed;
693 /* There are some simplifications we can do even if the operands
694 aren't constant. */
695 switch (code)
696 {
697 case NOT:
698 /* (not (not X)) == X. */
699 if (GET_CODE (op) == NOT)
700 return XEXP (op, 0);
701
702 /* (not (eq X Y)) == (ne X Y), etc. */
703 if (mode == BImode && GET_RTX_CLASS (GET_CODE (op)) == '<'
704 && ((reversed = reversed_comparison_code (op, NULL_RTX))
705 != UNKNOWN))
706 return gen_rtx_fmt_ee (reversed,
707 op_mode, XEXP (op, 0), XEXP (op, 1));
708 break;
709
710 case NEG:
711 /* (neg (neg X)) == X. */
712 if (GET_CODE (op) == NEG)
713 return XEXP (op, 0);
714 break;
715
716 case SIGN_EXTEND:
717 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
718 becomes just the MINUS if its mode is MODE. This allows
719 folding switch statements on machines using casesi (such as
720 the VAX). */
721 if (GET_CODE (op) == TRUNCATE
722 && GET_MODE (XEXP (op, 0)) == mode
723 && GET_CODE (XEXP (op, 0)) == MINUS
724 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
725 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
726 return XEXP (op, 0);
727
728 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
729 if (! POINTERS_EXTEND_UNSIGNED
730 && mode == Pmode && GET_MODE (op) == ptr_mode
731 && (CONSTANT_P (op)
732 || (GET_CODE (op) == SUBREG
733 && GET_CODE (SUBREG_REG (op)) == REG
734 && REG_POINTER (SUBREG_REG (op))
735 && GET_MODE (SUBREG_REG (op)) == Pmode)))
736 return convert_memory_address (Pmode, op);
737 #endif
738 break;
739
740 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
741 case ZERO_EXTEND:
742 if (POINTERS_EXTEND_UNSIGNED > 0
743 && mode == Pmode && GET_MODE (op) == ptr_mode
744 && (CONSTANT_P (op)
745 || (GET_CODE (op) == SUBREG
746 && GET_CODE (SUBREG_REG (op)) == REG
747 && REG_POINTER (SUBREG_REG (op))
748 && GET_MODE (SUBREG_REG (op)) == Pmode)))
749 return convert_memory_address (Pmode, op);
750 break;
751 #endif
752
753 default:
754 break;
755 }
756
757 return 0;
758 }
759 }
760 \f
761 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
762 /* Subroutine of simplify_binary_operation, called via do_float_handler.
763 Handles simplification of binary ops on floating point values. */
764 struct simplify_binary_real_args
765 {
766 rtx trueop0, trueop1;
767 rtx result;
768 enum rtx_code code;
769 enum machine_mode mode;
770 };
771
772 static void
773 simplify_binary_real (p)
774 PTR p;
775 {
776 REAL_VALUE_TYPE f0, f1, value;
777 struct simplify_binary_real_args *args =
778 (struct simplify_binary_real_args *) p;
779
780 REAL_VALUE_FROM_CONST_DOUBLE (f0, args->trueop0);
781 REAL_VALUE_FROM_CONST_DOUBLE (f1, args->trueop1);
782 f0 = real_value_truncate (args->mode, f0);
783 f1 = real_value_truncate (args->mode, f1);
784
785 #ifdef REAL_ARITHMETIC
786 #ifndef REAL_INFINITY
787 if (args->code == DIV && REAL_VALUES_EQUAL (f1, dconst0))
788 {
789 args->result = 0;
790 return;
791 }
792 #endif
793 REAL_ARITHMETIC (value, rtx_to_tree_code (args->code), f0, f1);
794 #else
795 switch (args->code)
796 {
797 case PLUS:
798 value = f0 + f1;
799 break;
800 case MINUS:
801 value = f0 - f1;
802 break;
803 case MULT:
804 value = f0 * f1;
805 break;
806 case DIV:
807 #ifndef REAL_INFINITY
808 if (f1 == 0)
809 return 0;
810 #endif
811 value = f0 / f1;
812 break;
813 case SMIN:
814 value = MIN (f0, f1);
815 break;
816 case SMAX:
817 value = MAX (f0, f1);
818 break;
819 default:
820 abort ();
821 }
822 #endif
823
824 value = real_value_truncate (args->mode, value);
825 args->result = CONST_DOUBLE_FROM_REAL_VALUE (value, args->mode);
826 }
827 #endif
828
829 /* Another subroutine called via do_float_handler. This one tests
830 the floating point value given against 2. and -1. */
831 struct simplify_binary_is2orm1_args
832 {
833 rtx value;
834 bool is_2;
835 bool is_m1;
836 };
837
838 static void
839 simplify_binary_is2orm1 (p)
840 PTR p;
841 {
842 REAL_VALUE_TYPE d;
843 struct simplify_binary_is2orm1_args *args =
844 (struct simplify_binary_is2orm1_args *) p;
845
846 REAL_VALUE_FROM_CONST_DOUBLE (d, args->value);
847 args->is_2 = REAL_VALUES_EQUAL (d, dconst2);
848 args->is_m1 = REAL_VALUES_EQUAL (d, dconstm1);
849 }
850
851 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
852 and OP1. Return 0 if no simplification is possible.
853
854 Don't use this for relational operations such as EQ or LT.
855 Use simplify_relational_operation instead. */
856 rtx
857 simplify_binary_operation (code, mode, op0, op1)
858 enum rtx_code code;
859 enum machine_mode mode;
860 rtx op0, op1;
861 {
862 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
863 HOST_WIDE_INT val;
864 unsigned int width = GET_MODE_BITSIZE (mode);
865 rtx tem;
866 rtx trueop0 = avoid_constant_pool_reference (op0);
867 rtx trueop1 = avoid_constant_pool_reference (op1);
868
869 /* Relational operations don't work here. We must know the mode
870 of the operands in order to do the comparison correctly.
871 Assuming a full word can give incorrect results.
872 Consider comparing 128 with -128 in QImode. */
873
874 if (GET_RTX_CLASS (code) == '<')
875 abort ();
876
877 /* Make sure the constant is second. */
878 if (GET_RTX_CLASS (code) == 'c'
879 && swap_commutative_operands_p (trueop0, trueop1))
880 {
881 tem = op0, op0 = op1, op1 = tem;
882 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
883 }
884
885 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
886 if (GET_MODE_CLASS (mode) == MODE_FLOAT
887 && GET_CODE (trueop0) == CONST_DOUBLE
888 && GET_CODE (trueop1) == CONST_DOUBLE
889 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
890 {
891 struct simplify_binary_real_args args;
892 args.trueop0 = trueop0;
893 args.trueop1 = trueop1;
894 args.mode = mode;
895 args.code = code;
896
897 if (do_float_handler (simplify_binary_real, (PTR) &args))
898 return args.result;
899 return 0;
900 }
901 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
902
903 /* We can fold some multi-word operations. */
904 if (GET_MODE_CLASS (mode) == MODE_INT
905 && width == HOST_BITS_PER_WIDE_INT * 2
906 && (GET_CODE (trueop0) == CONST_DOUBLE
907 || GET_CODE (trueop0) == CONST_INT)
908 && (GET_CODE (trueop1) == CONST_DOUBLE
909 || GET_CODE (trueop1) == CONST_INT))
910 {
911 unsigned HOST_WIDE_INT l1, l2, lv;
912 HOST_WIDE_INT h1, h2, hv;
913
914 if (GET_CODE (trueop0) == CONST_DOUBLE)
915 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
916 else
917 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
918
919 if (GET_CODE (trueop1) == CONST_DOUBLE)
920 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
921 else
922 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
923
924 switch (code)
925 {
926 case MINUS:
927 /* A - B == A + (-B). */
928 neg_double (l2, h2, &lv, &hv);
929 l2 = lv, h2 = hv;
930
931 /* .. fall through ... */
932
933 case PLUS:
934 add_double (l1, h1, l2, h2, &lv, &hv);
935 break;
936
937 case MULT:
938 mul_double (l1, h1, l2, h2, &lv, &hv);
939 break;
940
941 case DIV: case MOD: case UDIV: case UMOD:
942 /* We'd need to include tree.h to do this and it doesn't seem worth
943 it. */
944 return 0;
945
946 case AND:
947 lv = l1 & l2, hv = h1 & h2;
948 break;
949
950 case IOR:
951 lv = l1 | l2, hv = h1 | h2;
952 break;
953
954 case XOR:
955 lv = l1 ^ l2, hv = h1 ^ h2;
956 break;
957
958 case SMIN:
959 if (h1 < h2
960 || (h1 == h2
961 && ((unsigned HOST_WIDE_INT) l1
962 < (unsigned HOST_WIDE_INT) l2)))
963 lv = l1, hv = h1;
964 else
965 lv = l2, hv = h2;
966 break;
967
968 case SMAX:
969 if (h1 > h2
970 || (h1 == h2
971 && ((unsigned HOST_WIDE_INT) l1
972 > (unsigned HOST_WIDE_INT) l2)))
973 lv = l1, hv = h1;
974 else
975 lv = l2, hv = h2;
976 break;
977
978 case UMIN:
979 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
980 || (h1 == h2
981 && ((unsigned HOST_WIDE_INT) l1
982 < (unsigned HOST_WIDE_INT) l2)))
983 lv = l1, hv = h1;
984 else
985 lv = l2, hv = h2;
986 break;
987
988 case UMAX:
989 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
990 || (h1 == h2
991 && ((unsigned HOST_WIDE_INT) l1
992 > (unsigned HOST_WIDE_INT) l2)))
993 lv = l1, hv = h1;
994 else
995 lv = l2, hv = h2;
996 break;
997
998 case LSHIFTRT: case ASHIFTRT:
999 case ASHIFT:
1000 case ROTATE: case ROTATERT:
1001 #ifdef SHIFT_COUNT_TRUNCATED
1002 if (SHIFT_COUNT_TRUNCATED)
1003 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
1004 #endif
1005
1006 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
1007 return 0;
1008
1009 if (code == LSHIFTRT || code == ASHIFTRT)
1010 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
1011 code == ASHIFTRT);
1012 else if (code == ASHIFT)
1013 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
1014 else if (code == ROTATE)
1015 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1016 else /* code == ROTATERT */
1017 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1018 break;
1019
1020 default:
1021 return 0;
1022 }
1023
1024 return immed_double_const (lv, hv, mode);
1025 }
1026
1027 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
1028 || width > HOST_BITS_PER_WIDE_INT || width == 0)
1029 {
1030 /* Even if we can't compute a constant result,
1031 there are some cases worth simplifying. */
1032
1033 switch (code)
1034 {
1035 case PLUS:
1036 /* In IEEE floating point, x+0 is not the same as x. Similarly
1037 for the other optimizations below. */
1038 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
1039 && FLOAT_MODE_P (mode) && ! flag_unsafe_math_optimizations)
1040 break;
1041
1042 if (trueop1 == CONST0_RTX (mode))
1043 return op0;
1044
1045 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)) */
1046 if (GET_CODE (op0) == NEG)
1047 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1048 else if (GET_CODE (op1) == NEG)
1049 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1050
1051 /* (~a) + 1 -> -a */
1052 if (INTEGRAL_MODE_P (mode)
1053 && GET_CODE (op0) == NOT
1054 && trueop1 == const1_rtx)
1055 return gen_rtx_NEG (mode, XEXP (op0, 0));
1056
1057 /* Handle both-operands-constant cases. We can only add
1058 CONST_INTs to constants since the sum of relocatable symbols
1059 can't be handled by most assemblers. Don't add CONST_INT
1060 to CONST_INT since overflow won't be computed properly if wider
1061 than HOST_BITS_PER_WIDE_INT. */
1062
1063 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1064 && GET_CODE (op1) == CONST_INT)
1065 return plus_constant (op0, INTVAL (op1));
1066 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1067 && GET_CODE (op0) == CONST_INT)
1068 return plus_constant (op1, INTVAL (op0));
1069
1070 /* See if this is something like X * C - X or vice versa or
1071 if the multiplication is written as a shift. If so, we can
1072 distribute and make a new multiply, shift, or maybe just
1073 have X (if C is 2 in the example above). But don't make
1074 real multiply if we didn't have one before. */
1075
1076 if (! FLOAT_MODE_P (mode))
1077 {
1078 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1079 rtx lhs = op0, rhs = op1;
1080 int had_mult = 0;
1081
1082 if (GET_CODE (lhs) == NEG)
1083 coeff0 = -1, lhs = XEXP (lhs, 0);
1084 else if (GET_CODE (lhs) == MULT
1085 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1086 {
1087 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1088 had_mult = 1;
1089 }
1090 else if (GET_CODE (lhs) == ASHIFT
1091 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1092 && INTVAL (XEXP (lhs, 1)) >= 0
1093 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1094 {
1095 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1096 lhs = XEXP (lhs, 0);
1097 }
1098
1099 if (GET_CODE (rhs) == NEG)
1100 coeff1 = -1, rhs = XEXP (rhs, 0);
1101 else if (GET_CODE (rhs) == MULT
1102 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1103 {
1104 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1105 had_mult = 1;
1106 }
1107 else if (GET_CODE (rhs) == ASHIFT
1108 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1109 && INTVAL (XEXP (rhs, 1)) >= 0
1110 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1111 {
1112 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1113 rhs = XEXP (rhs, 0);
1114 }
1115
1116 if (rtx_equal_p (lhs, rhs))
1117 {
1118 tem = simplify_gen_binary (MULT, mode, lhs,
1119 GEN_INT (coeff0 + coeff1));
1120 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1121 }
1122 }
1123
1124 /* If one of the operands is a PLUS or a MINUS, see if we can
1125 simplify this by the associative law.
1126 Don't use the associative law for floating point.
1127 The inaccuracy makes it nonassociative,
1128 and subtle programs can break if operations are associated. */
1129
1130 if (INTEGRAL_MODE_P (mode)
1131 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1132 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1133 || (GET_CODE (op0) == CONST
1134 && GET_CODE (XEXP (op0, 0)) == PLUS)
1135 || (GET_CODE (op1) == CONST
1136 && GET_CODE (XEXP (op1, 0)) == PLUS))
1137 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1138 return tem;
1139 break;
1140
1141 case COMPARE:
1142 #ifdef HAVE_cc0
1143 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1144 using cc0, in which case we want to leave it as a COMPARE
1145 so we can distinguish it from a register-register-copy.
1146
1147 In IEEE floating point, x-0 is not the same as x. */
1148
1149 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1150 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1151 && trueop1 == CONST0_RTX (mode))
1152 return op0;
1153 #endif
1154
1155 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1156 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1157 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1158 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1159 {
1160 rtx xop00 = XEXP (op0, 0);
1161 rtx xop10 = XEXP (op1, 0);
1162
1163 #ifdef HAVE_cc0
1164 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1165 #else
1166 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1167 && GET_MODE (xop00) == GET_MODE (xop10)
1168 && REGNO (xop00) == REGNO (xop10)
1169 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1170 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1171 #endif
1172 return xop00;
1173 }
1174 break;
1175
1176 case MINUS:
1177 /* None of these optimizations can be done for IEEE
1178 floating point. */
1179 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
1180 && FLOAT_MODE_P (mode) && ! flag_unsafe_math_optimizations)
1181 break;
1182
1183 /* We can't assume x-x is 0 even with non-IEEE floating point,
1184 but since it is zero except in very strange circumstances, we
1185 will treat it as zero with -funsafe-math-optimizations. */
1186 if (rtx_equal_p (trueop0, trueop1)
1187 && ! side_effects_p (op0)
1188 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1189 return CONST0_RTX (mode);
1190
1191 /* Change subtraction from zero into negation. */
1192 if (trueop0 == CONST0_RTX (mode))
1193 return gen_rtx_NEG (mode, op1);
1194
1195 /* (-1 - a) is ~a. */
1196 if (trueop0 == constm1_rtx)
1197 return gen_rtx_NOT (mode, op1);
1198
1199 /* Subtracting 0 has no effect. */
1200 if (trueop1 == CONST0_RTX (mode))
1201 return op0;
1202
1203 /* See if this is something like X * C - X or vice versa or
1204 if the multiplication is written as a shift. If so, we can
1205 distribute and make a new multiply, shift, or maybe just
1206 have X (if C is 2 in the example above). But don't make
1207 real multiply if we didn't have one before. */
1208
1209 if (! FLOAT_MODE_P (mode))
1210 {
1211 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1212 rtx lhs = op0, rhs = op1;
1213 int had_mult = 0;
1214
1215 if (GET_CODE (lhs) == NEG)
1216 coeff0 = -1, lhs = XEXP (lhs, 0);
1217 else if (GET_CODE (lhs) == MULT
1218 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1219 {
1220 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1221 had_mult = 1;
1222 }
1223 else if (GET_CODE (lhs) == ASHIFT
1224 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1225 && INTVAL (XEXP (lhs, 1)) >= 0
1226 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1227 {
1228 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1229 lhs = XEXP (lhs, 0);
1230 }
1231
1232 if (GET_CODE (rhs) == NEG)
1233 coeff1 = - 1, rhs = XEXP (rhs, 0);
1234 else if (GET_CODE (rhs) == MULT
1235 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1236 {
1237 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1238 had_mult = 1;
1239 }
1240 else if (GET_CODE (rhs) == ASHIFT
1241 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1242 && INTVAL (XEXP (rhs, 1)) >= 0
1243 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1244 {
1245 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1246 rhs = XEXP (rhs, 0);
1247 }
1248
1249 if (rtx_equal_p (lhs, rhs))
1250 {
1251 tem = simplify_gen_binary (MULT, mode, lhs,
1252 GEN_INT (coeff0 - coeff1));
1253 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1254 }
1255 }
1256
1257 /* (a - (-b)) -> (a + b). */
1258 if (GET_CODE (op1) == NEG)
1259 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1260
1261 /* If one of the operands is a PLUS or a MINUS, see if we can
1262 simplify this by the associative law.
1263 Don't use the associative law for floating point.
1264 The inaccuracy makes it nonassociative,
1265 and subtle programs can break if operations are associated. */
1266
1267 if (INTEGRAL_MODE_P (mode)
1268 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1269 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1270 || (GET_CODE (op0) == CONST
1271 && GET_CODE (XEXP (op0, 0)) == PLUS)
1272 || (GET_CODE (op1) == CONST
1273 && GET_CODE (XEXP (op1, 0)) == PLUS))
1274 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1275 return tem;
1276
1277 /* Don't let a relocatable value get a negative coeff. */
1278 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1279 return plus_constant (op0, - INTVAL (op1));
1280
1281 /* (x - (x & y)) -> (x & ~y) */
1282 if (GET_CODE (op1) == AND)
1283 {
1284 if (rtx_equal_p (op0, XEXP (op1, 0)))
1285 return simplify_gen_binary (AND, mode, op0,
1286 gen_rtx_NOT (mode, XEXP (op1, 1)));
1287 if (rtx_equal_p (op0, XEXP (op1, 1)))
1288 return simplify_gen_binary (AND, mode, op0,
1289 gen_rtx_NOT (mode, XEXP (op1, 0)));
1290 }
1291 break;
1292
1293 case MULT:
1294 if (trueop1 == constm1_rtx)
1295 {
1296 tem = simplify_unary_operation (NEG, mode, op0, mode);
1297
1298 return tem ? tem : gen_rtx_NEG (mode, op0);
1299 }
1300
1301 /* In IEEE floating point, x*0 is not always 0. */
1302 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1303 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1304 && trueop1 == CONST0_RTX (mode)
1305 && ! side_effects_p (op0))
1306 return op1;
1307
1308 /* In IEEE floating point, x*1 is not equivalent to x for nans.
1309 However, ANSI says we can drop signals,
1310 so we can do this anyway. */
1311 if (trueop1 == CONST1_RTX (mode))
1312 return op0;
1313
1314 /* Convert multiply by constant power of two into shift unless
1315 we are still generating RTL. This test is a kludge. */
1316 if (GET_CODE (trueop1) == CONST_INT
1317 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1318 /* If the mode is larger than the host word size, and the
1319 uppermost bit is set, then this isn't a power of two due
1320 to implicit sign extension. */
1321 && (width <= HOST_BITS_PER_WIDE_INT
1322 || val != HOST_BITS_PER_WIDE_INT - 1)
1323 && ! rtx_equal_function_value_matters)
1324 return gen_rtx_ASHIFT (mode, op0, GEN_INT (val));
1325
1326 if (GET_CODE (trueop1) == CONST_DOUBLE
1327 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT)
1328 {
1329 struct simplify_binary_is2orm1_args args;
1330
1331 args.value = trueop1;
1332 if (! do_float_handler (simplify_binary_is2orm1, (PTR) &args))
1333 return 0;
1334
1335 /* x*2 is x+x and x*(-1) is -x */
1336 if (args.is_2 && GET_MODE (op0) == mode)
1337 return gen_rtx_PLUS (mode, op0, copy_rtx (op0));
1338
1339 else if (args.is_m1 && GET_MODE (op0) == mode)
1340 return gen_rtx_NEG (mode, op0);
1341 }
1342 break;
1343
1344 case IOR:
1345 if (trueop1 == const0_rtx)
1346 return op0;
1347 if (GET_CODE (trueop1) == CONST_INT
1348 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1349 == GET_MODE_MASK (mode)))
1350 return op1;
1351 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1352 return op0;
1353 /* A | (~A) -> -1 */
1354 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1355 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1356 && ! side_effects_p (op0)
1357 && GET_MODE_CLASS (mode) != MODE_CC)
1358 return constm1_rtx;
1359 break;
1360
1361 case XOR:
1362 if (trueop1 == const0_rtx)
1363 return op0;
1364 if (GET_CODE (trueop1) == CONST_INT
1365 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1366 == GET_MODE_MASK (mode)))
1367 return gen_rtx_NOT (mode, op0);
1368 if (trueop0 == trueop1 && ! side_effects_p (op0)
1369 && GET_MODE_CLASS (mode) != MODE_CC)
1370 return const0_rtx;
1371 break;
1372
1373 case AND:
1374 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1375 return const0_rtx;
1376 if (GET_CODE (trueop1) == CONST_INT
1377 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1378 == GET_MODE_MASK (mode)))
1379 return op0;
1380 if (trueop0 == trueop1 && ! side_effects_p (op0)
1381 && GET_MODE_CLASS (mode) != MODE_CC)
1382 return op0;
1383 /* A & (~A) -> 0 */
1384 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1385 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1386 && ! side_effects_p (op0)
1387 && GET_MODE_CLASS (mode) != MODE_CC)
1388 return const0_rtx;
1389 break;
1390
1391 case UDIV:
1392 /* Convert divide by power of two into shift (divide by 1 handled
1393 below). */
1394 if (GET_CODE (trueop1) == CONST_INT
1395 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1396 return gen_rtx_LSHIFTRT (mode, op0, GEN_INT (arg1));
1397
1398 /* ... fall through ... */
1399
1400 case DIV:
1401 if (trueop1 == CONST1_RTX (mode))
1402 return op0;
1403
1404 /* In IEEE floating point, 0/x is not always 0. */
1405 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1406 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1407 && trueop0 == CONST0_RTX (mode)
1408 && ! side_effects_p (op1))
1409 return op0;
1410
1411 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1412 /* Change division by a constant into multiplication. Only do
1413 this with -funsafe-math-optimizations. */
1414 else if (GET_CODE (trueop1) == CONST_DOUBLE
1415 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1416 && trueop1 != CONST0_RTX (mode)
1417 && flag_unsafe_math_optimizations)
1418 {
1419 REAL_VALUE_TYPE d;
1420 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1421
1422 if (! REAL_VALUES_EQUAL (d, dconst0))
1423 {
1424 #if defined (REAL_ARITHMETIC)
1425 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1426 return gen_rtx_MULT (mode, op0,
1427 CONST_DOUBLE_FROM_REAL_VALUE (d, mode));
1428 #else
1429 return
1430 gen_rtx_MULT (mode, op0,
1431 CONST_DOUBLE_FROM_REAL_VALUE (1./d, mode));
1432 #endif
1433 }
1434 }
1435 #endif
1436 break;
1437
1438 case UMOD:
1439 /* Handle modulus by power of two (mod with 1 handled below). */
1440 if (GET_CODE (trueop1) == CONST_INT
1441 && exact_log2 (INTVAL (trueop1)) > 0)
1442 return gen_rtx_AND (mode, op0, GEN_INT (INTVAL (op1) - 1));
1443
1444 /* ... fall through ... */
1445
1446 case MOD:
1447 if ((trueop0 == const0_rtx || trueop1 == const1_rtx)
1448 && ! side_effects_p (op0) && ! side_effects_p (op1))
1449 return const0_rtx;
1450 break;
1451
1452 case ROTATERT:
1453 case ROTATE:
1454 /* Rotating ~0 always results in ~0. */
1455 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1456 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1457 && ! side_effects_p (op1))
1458 return op0;
1459
1460 /* ... fall through ... */
1461
1462 case ASHIFT:
1463 case ASHIFTRT:
1464 case LSHIFTRT:
1465 if (trueop1 == const0_rtx)
1466 return op0;
1467 if (trueop0 == const0_rtx && ! side_effects_p (op1))
1468 return op0;
1469 break;
1470
1471 case SMIN:
1472 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1473 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1474 && ! side_effects_p (op0))
1475 return op1;
1476 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1477 return op0;
1478 break;
1479
1480 case SMAX:
1481 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1482 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1483 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1484 && ! side_effects_p (op0))
1485 return op1;
1486 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1487 return op0;
1488 break;
1489
1490 case UMIN:
1491 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1492 return op1;
1493 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1494 return op0;
1495 break;
1496
1497 case UMAX:
1498 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1499 return op1;
1500 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1501 return op0;
1502 break;
1503
1504 default:
1505 abort ();
1506 }
1507
1508 return 0;
1509 }
1510
1511 /* Get the integer argument values in two forms:
1512 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1513
1514 arg0 = INTVAL (trueop0);
1515 arg1 = INTVAL (trueop1);
1516
1517 if (width < HOST_BITS_PER_WIDE_INT)
1518 {
1519 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
1520 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
1521
1522 arg0s = arg0;
1523 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1524 arg0s |= ((HOST_WIDE_INT) (-1) << width);
1525
1526 arg1s = arg1;
1527 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1528 arg1s |= ((HOST_WIDE_INT) (-1) << width);
1529 }
1530 else
1531 {
1532 arg0s = arg0;
1533 arg1s = arg1;
1534 }
1535
1536 /* Compute the value of the arithmetic. */
1537
1538 switch (code)
1539 {
1540 case PLUS:
1541 val = arg0s + arg1s;
1542 break;
1543
1544 case MINUS:
1545 val = arg0s - arg1s;
1546 break;
1547
1548 case MULT:
1549 val = arg0s * arg1s;
1550 break;
1551
1552 case DIV:
1553 if (arg1s == 0
1554 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1555 && arg1s == -1))
1556 return 0;
1557 val = arg0s / arg1s;
1558 break;
1559
1560 case MOD:
1561 if (arg1s == 0
1562 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1563 && arg1s == -1))
1564 return 0;
1565 val = arg0s % arg1s;
1566 break;
1567
1568 case UDIV:
1569 if (arg1 == 0
1570 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1571 && arg1s == -1))
1572 return 0;
1573 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
1574 break;
1575
1576 case UMOD:
1577 if (arg1 == 0
1578 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1579 && arg1s == -1))
1580 return 0;
1581 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
1582 break;
1583
1584 case AND:
1585 val = arg0 & arg1;
1586 break;
1587
1588 case IOR:
1589 val = arg0 | arg1;
1590 break;
1591
1592 case XOR:
1593 val = arg0 ^ arg1;
1594 break;
1595
1596 case LSHIFTRT:
1597 /* If shift count is undefined, don't fold it; let the machine do
1598 what it wants. But truncate it if the machine will do that. */
1599 if (arg1 < 0)
1600 return 0;
1601
1602 #ifdef SHIFT_COUNT_TRUNCATED
1603 if (SHIFT_COUNT_TRUNCATED)
1604 arg1 %= width;
1605 #endif
1606
1607 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
1608 break;
1609
1610 case ASHIFT:
1611 if (arg1 < 0)
1612 return 0;
1613
1614 #ifdef SHIFT_COUNT_TRUNCATED
1615 if (SHIFT_COUNT_TRUNCATED)
1616 arg1 %= width;
1617 #endif
1618
1619 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
1620 break;
1621
1622 case ASHIFTRT:
1623 if (arg1 < 0)
1624 return 0;
1625
1626 #ifdef SHIFT_COUNT_TRUNCATED
1627 if (SHIFT_COUNT_TRUNCATED)
1628 arg1 %= width;
1629 #endif
1630
1631 val = arg0s >> arg1;
1632
1633 /* Bootstrap compiler may not have sign extended the right shift.
1634 Manually extend the sign to insure bootstrap cc matches gcc. */
1635 if (arg0s < 0 && arg1 > 0)
1636 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
1637
1638 break;
1639
1640 case ROTATERT:
1641 if (arg1 < 0)
1642 return 0;
1643
1644 arg1 %= width;
1645 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
1646 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
1647 break;
1648
1649 case ROTATE:
1650 if (arg1 < 0)
1651 return 0;
1652
1653 arg1 %= width;
1654 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
1655 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
1656 break;
1657
1658 case COMPARE:
1659 /* Do nothing here. */
1660 return 0;
1661
1662 case SMIN:
1663 val = arg0s <= arg1s ? arg0s : arg1s;
1664 break;
1665
1666 case UMIN:
1667 val = ((unsigned HOST_WIDE_INT) arg0
1668 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1669 break;
1670
1671 case SMAX:
1672 val = arg0s > arg1s ? arg0s : arg1s;
1673 break;
1674
1675 case UMAX:
1676 val = ((unsigned HOST_WIDE_INT) arg0
1677 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1678 break;
1679
1680 default:
1681 abort ();
1682 }
1683
1684 val = trunc_int_for_mode (val, mode);
1685
1686 return GEN_INT (val);
1687 }
1688 \f
1689 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1690 PLUS or MINUS.
1691
1692 Rather than test for specific case, we do this by a brute-force method
1693 and do all possible simplifications until no more changes occur. Then
1694 we rebuild the operation. */
1695
1696 struct simplify_plus_minus_op_data
1697 {
1698 rtx op;
1699 int neg;
1700 };
1701
1702 static int
1703 simplify_plus_minus_op_data_cmp (p1, p2)
1704 const void *p1;
1705 const void *p2;
1706 {
1707 const struct simplify_plus_minus_op_data *d1 = p1;
1708 const struct simplify_plus_minus_op_data *d2 = p2;
1709
1710 return (commutative_operand_precedence (d2->op)
1711 - commutative_operand_precedence (d1->op));
1712 }
1713
1714 static rtx
1715 simplify_plus_minus (code, mode, op0, op1)
1716 enum rtx_code code;
1717 enum machine_mode mode;
1718 rtx op0, op1;
1719 {
1720 struct simplify_plus_minus_op_data ops[8];
1721 rtx result, tem;
1722 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
1723 int first, negate, changed;
1724 int i, j;
1725
1726 memset ((char *) ops, 0, sizeof ops);
1727
1728 /* Set up the two operands and then expand them until nothing has been
1729 changed. If we run out of room in our array, give up; this should
1730 almost never happen. */
1731
1732 ops[0].op = op0;
1733 ops[0].neg = 0;
1734 ops[1].op = op1;
1735 ops[1].neg = (code == MINUS);
1736
1737 do
1738 {
1739 changed = 0;
1740
1741 for (i = 0; i < n_ops; i++)
1742 {
1743 rtx this_op = ops[i].op;
1744 int this_neg = ops[i].neg;
1745 enum rtx_code this_code = GET_CODE (this_op);
1746
1747 switch (this_code)
1748 {
1749 case PLUS:
1750 case MINUS:
1751 if (n_ops == 7)
1752 return 0;
1753
1754 ops[n_ops].op = XEXP (this_op, 1);
1755 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
1756 n_ops++;
1757
1758 ops[i].op = XEXP (this_op, 0);
1759 input_ops++;
1760 changed = 1;
1761 break;
1762
1763 case NEG:
1764 ops[i].op = XEXP (this_op, 0);
1765 ops[i].neg = ! this_neg;
1766 changed = 1;
1767 break;
1768
1769 case CONST:
1770 ops[i].op = XEXP (this_op, 0);
1771 input_consts++;
1772 changed = 1;
1773 break;
1774
1775 case NOT:
1776 /* ~a -> (-a - 1) */
1777 if (n_ops != 7)
1778 {
1779 ops[n_ops].op = constm1_rtx;
1780 ops[n_ops].neg = this_neg;
1781 ops[i].op = XEXP (this_op, 0);
1782 ops[i].neg = !this_neg;
1783 changed = 1;
1784 }
1785 break;
1786
1787 case CONST_INT:
1788 if (this_neg)
1789 {
1790 ops[i].op = GEN_INT (- INTVAL (this_op));
1791 ops[i].neg = 0;
1792 changed = 1;
1793 }
1794 break;
1795
1796 default:
1797 break;
1798 }
1799 }
1800 }
1801 while (changed);
1802
1803 /* If we only have two operands, we can't do anything. */
1804 if (n_ops <= 2)
1805 return NULL_RTX;
1806
1807 /* Now simplify each pair of operands until nothing changes. The first
1808 time through just simplify constants against each other. */
1809
1810 first = 1;
1811 do
1812 {
1813 changed = first;
1814
1815 for (i = 0; i < n_ops - 1; i++)
1816 for (j = i + 1; j < n_ops; j++)
1817 {
1818 rtx lhs = ops[i].op, rhs = ops[j].op;
1819 int lneg = ops[i].neg, rneg = ops[j].neg;
1820
1821 if (lhs != 0 && rhs != 0
1822 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
1823 {
1824 enum rtx_code ncode = PLUS;
1825
1826 if (lneg != rneg)
1827 {
1828 ncode = MINUS;
1829 if (lneg)
1830 tem = lhs, lhs = rhs, rhs = tem;
1831 }
1832 else if (swap_commutative_operands_p (lhs, rhs))
1833 tem = lhs, lhs = rhs, rhs = tem;
1834
1835 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
1836
1837 /* Reject "simplifications" that just wrap the two
1838 arguments in a CONST. Failure to do so can result
1839 in infinite recursion with simplify_binary_operation
1840 when it calls us to simplify CONST operations. */
1841 if (tem
1842 && ! (GET_CODE (tem) == CONST
1843 && GET_CODE (XEXP (tem, 0)) == ncode
1844 && XEXP (XEXP (tem, 0), 0) == lhs
1845 && XEXP (XEXP (tem, 0), 1) == rhs))
1846 {
1847 lneg &= rneg;
1848 if (GET_CODE (tem) == NEG)
1849 tem = XEXP (tem, 0), lneg = !lneg;
1850 if (GET_CODE (tem) == CONST_INT && lneg)
1851 tem = GEN_INT (- INTVAL (tem)), lneg = 0;
1852
1853 ops[i].op = tem;
1854 ops[i].neg = lneg;
1855 ops[j].op = NULL_RTX;
1856 changed = 1;
1857 }
1858 }
1859 }
1860
1861 first = 0;
1862 }
1863 while (changed);
1864
1865 /* Pack all the operands to the lower-numbered entries. */
1866 for (i = 0, j = 0; j < n_ops; j++)
1867 if (ops[j].op)
1868 ops[i++] = ops[j];
1869 n_ops = i;
1870
1871 /* Sort the operations based on swap_commutative_operands_p. */
1872 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
1873
1874 /* We suppressed creation of trivial CONST expressions in the
1875 combination loop to avoid recursion. Create one manually now.
1876 The combination loop should have ensured that there is exactly
1877 one CONST_INT, and the sort will have ensured that it is last
1878 in the array and that any other constant will be next-to-last. */
1879
1880 if (n_ops > 1
1881 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
1882 && CONSTANT_P (ops[n_ops - 2].op))
1883 {
1884 HOST_WIDE_INT value = INTVAL (ops[n_ops - 1].op);
1885 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
1886 value = -value;
1887 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, value);
1888 n_ops--;
1889 }
1890
1891 /* Count the number of CONSTs that we generated. */
1892 n_consts = 0;
1893 for (i = 0; i < n_ops; i++)
1894 if (GET_CODE (ops[i].op) == CONST)
1895 n_consts++;
1896
1897 /* Give up if we didn't reduce the number of operands we had. Make
1898 sure we count a CONST as two operands. If we have the same
1899 number of operands, but have made more CONSTs than before, this
1900 is also an improvement, so accept it. */
1901 if (n_ops + n_consts > input_ops
1902 || (n_ops + n_consts == input_ops && n_consts <= input_consts))
1903 return NULL_RTX;
1904
1905 /* Put a non-negated operand first. If there aren't any, make all
1906 operands positive and negate the whole thing later. */
1907
1908 negate = 0;
1909 for (i = 0; i < n_ops && ops[i].neg; i++)
1910 continue;
1911 if (i == n_ops)
1912 {
1913 for (i = 0; i < n_ops; i++)
1914 ops[i].neg = 0;
1915 negate = 1;
1916 }
1917 else if (i != 0)
1918 {
1919 tem = ops[0].op;
1920 ops[0] = ops[i];
1921 ops[i].op = tem;
1922 ops[i].neg = 1;
1923 }
1924
1925 /* Now make the result by performing the requested operations. */
1926 result = ops[0].op;
1927 for (i = 1; i < n_ops; i++)
1928 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
1929 mode, result, ops[i].op);
1930
1931 return negate ? gen_rtx_NEG (mode, result) : result;
1932 }
1933
1934 struct cfc_args
1935 {
1936 rtx op0, op1; /* Input */
1937 int equal, op0lt, op1lt; /* Output */
1938 int unordered;
1939 };
1940
1941 static void
1942 check_fold_consts (data)
1943 PTR data;
1944 {
1945 struct cfc_args *args = (struct cfc_args *) data;
1946 REAL_VALUE_TYPE d0, d1;
1947
1948 /* We may possibly raise an exception while reading the value. */
1949 args->unordered = 1;
1950 REAL_VALUE_FROM_CONST_DOUBLE (d0, args->op0);
1951 REAL_VALUE_FROM_CONST_DOUBLE (d1, args->op1);
1952
1953 /* Comparisons of Inf versus Inf are ordered. */
1954 if (REAL_VALUE_ISNAN (d0)
1955 || REAL_VALUE_ISNAN (d1))
1956 return;
1957 args->equal = REAL_VALUES_EQUAL (d0, d1);
1958 args->op0lt = REAL_VALUES_LESS (d0, d1);
1959 args->op1lt = REAL_VALUES_LESS (d1, d0);
1960 args->unordered = 0;
1961 }
1962
1963 /* Like simplify_binary_operation except used for relational operators.
1964 MODE is the mode of the operands, not that of the result. If MODE
1965 is VOIDmode, both operands must also be VOIDmode and we compare the
1966 operands in "infinite precision".
1967
1968 If no simplification is possible, this function returns zero. Otherwise,
1969 it returns either const_true_rtx or const0_rtx. */
1970
1971 rtx
1972 simplify_relational_operation (code, mode, op0, op1)
1973 enum rtx_code code;
1974 enum machine_mode mode;
1975 rtx op0, op1;
1976 {
1977 int equal, op0lt, op0ltu, op1lt, op1ltu;
1978 rtx tem;
1979 rtx trueop0;
1980 rtx trueop1;
1981
1982 if (mode == VOIDmode
1983 && (GET_MODE (op0) != VOIDmode
1984 || GET_MODE (op1) != VOIDmode))
1985 abort ();
1986
1987 /* If op0 is a compare, extract the comparison arguments from it. */
1988 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
1989 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
1990
1991 trueop0 = avoid_constant_pool_reference (op0);
1992 trueop1 = avoid_constant_pool_reference (op1);
1993
1994 /* We can't simplify MODE_CC values since we don't know what the
1995 actual comparison is. */
1996 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC
1997 #ifdef HAVE_cc0
1998 || op0 == cc0_rtx
1999 #endif
2000 )
2001 return 0;
2002
2003 /* Make sure the constant is second. */
2004 if (swap_commutative_operands_p (trueop0, trueop1))
2005 {
2006 tem = op0, op0 = op1, op1 = tem;
2007 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
2008 code = swap_condition (code);
2009 }
2010
2011 /* For integer comparisons of A and B maybe we can simplify A - B and can
2012 then simplify a comparison of that with zero. If A and B are both either
2013 a register or a CONST_INT, this can't help; testing for these cases will
2014 prevent infinite recursion here and speed things up.
2015
2016 If CODE is an unsigned comparison, then we can never do this optimization,
2017 because it gives an incorrect result if the subtraction wraps around zero.
2018 ANSI C defines unsigned operations such that they never overflow, and
2019 thus such cases can not be ignored. */
2020
2021 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2022 && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
2023 && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
2024 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2025 && code != GTU && code != GEU && code != LTU && code != LEU)
2026 return simplify_relational_operation (signed_condition (code),
2027 mode, tem, const0_rtx);
2028
2029 if (flag_unsafe_math_optimizations && code == ORDERED)
2030 return const_true_rtx;
2031
2032 if (flag_unsafe_math_optimizations && code == UNORDERED)
2033 return const0_rtx;
2034
2035 /* For non-IEEE floating-point, if the two operands are equal, we know the
2036 result. */
2037 if (rtx_equal_p (trueop0, trueop1)
2038 && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
2039 || ! FLOAT_MODE_P (GET_MODE (trueop0))
2040 || flag_unsafe_math_optimizations))
2041 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2042
2043 /* If the operands are floating-point constants, see if we can fold
2044 the result. */
2045 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
2046 else if (GET_CODE (trueop0) == CONST_DOUBLE
2047 && GET_CODE (trueop1) == CONST_DOUBLE
2048 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2049 {
2050 struct cfc_args args;
2051
2052 /* Setup input for check_fold_consts() */
2053 args.op0 = trueop0;
2054 args.op1 = trueop1;
2055
2056
2057 if (!do_float_handler (check_fold_consts, (PTR) &args))
2058 args.unordered = 1;
2059
2060 if (args.unordered)
2061 switch (code)
2062 {
2063 case UNEQ:
2064 case UNLT:
2065 case UNGT:
2066 case UNLE:
2067 case UNGE:
2068 case NE:
2069 case UNORDERED:
2070 return const_true_rtx;
2071 case EQ:
2072 case LT:
2073 case GT:
2074 case LE:
2075 case GE:
2076 case LTGT:
2077 case ORDERED:
2078 return const0_rtx;
2079 default:
2080 return 0;
2081 }
2082
2083 /* Receive output from check_fold_consts() */
2084 equal = args.equal;
2085 op0lt = op0ltu = args.op0lt;
2086 op1lt = op1ltu = args.op1lt;
2087 }
2088 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
2089
2090 /* Otherwise, see if the operands are both integers. */
2091 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2092 && (GET_CODE (trueop0) == CONST_DOUBLE
2093 || GET_CODE (trueop0) == CONST_INT)
2094 && (GET_CODE (trueop1) == CONST_DOUBLE
2095 || GET_CODE (trueop1) == CONST_INT))
2096 {
2097 int width = GET_MODE_BITSIZE (mode);
2098 HOST_WIDE_INT l0s, h0s, l1s, h1s;
2099 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2100
2101 /* Get the two words comprising each integer constant. */
2102 if (GET_CODE (trueop0) == CONST_DOUBLE)
2103 {
2104 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
2105 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
2106 }
2107 else
2108 {
2109 l0u = l0s = INTVAL (trueop0);
2110 h0u = h0s = HWI_SIGN_EXTEND (l0s);
2111 }
2112
2113 if (GET_CODE (trueop1) == CONST_DOUBLE)
2114 {
2115 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
2116 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
2117 }
2118 else
2119 {
2120 l1u = l1s = INTVAL (trueop1);
2121 h1u = h1s = HWI_SIGN_EXTEND (l1s);
2122 }
2123
2124 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2125 we have to sign or zero-extend the values. */
2126 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2127 {
2128 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2129 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2130
2131 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2132 l0s |= ((HOST_WIDE_INT) (-1) << width);
2133
2134 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2135 l1s |= ((HOST_WIDE_INT) (-1) << width);
2136 }
2137 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2138 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2139
2140 equal = (h0u == h1u && l0u == l1u);
2141 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2142 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2143 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2144 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2145 }
2146
2147 /* Otherwise, there are some code-specific tests we can make. */
2148 else
2149 {
2150 switch (code)
2151 {
2152 case EQ:
2153 /* References to the frame plus a constant or labels cannot
2154 be zero, but a SYMBOL_REF can due to #pragma weak. */
2155 if (((NONZERO_BASE_PLUS_P (op0) && trueop1 == const0_rtx)
2156 || GET_CODE (trueop0) == LABEL_REF)
2157 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2158 /* On some machines, the ap reg can be 0 sometimes. */
2159 && op0 != arg_pointer_rtx
2160 #endif
2161 )
2162 return const0_rtx;
2163 break;
2164
2165 case NE:
2166 if (((NONZERO_BASE_PLUS_P (op0) && trueop1 == const0_rtx)
2167 || GET_CODE (trueop0) == LABEL_REF)
2168 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2169 && op0 != arg_pointer_rtx
2170 #endif
2171 )
2172 return const_true_rtx;
2173 break;
2174
2175 case GEU:
2176 /* Unsigned values are never negative. */
2177 if (trueop1 == const0_rtx)
2178 return const_true_rtx;
2179 break;
2180
2181 case LTU:
2182 if (trueop1 == const0_rtx)
2183 return const0_rtx;
2184 break;
2185
2186 case LEU:
2187 /* Unsigned values are never greater than the largest
2188 unsigned value. */
2189 if (GET_CODE (trueop1) == CONST_INT
2190 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2191 && INTEGRAL_MODE_P (mode))
2192 return const_true_rtx;
2193 break;
2194
2195 case GTU:
2196 if (GET_CODE (trueop1) == CONST_INT
2197 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2198 && INTEGRAL_MODE_P (mode))
2199 return const0_rtx;
2200 break;
2201
2202 default:
2203 break;
2204 }
2205
2206 return 0;
2207 }
2208
2209 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2210 as appropriate. */
2211 switch (code)
2212 {
2213 case EQ:
2214 case UNEQ:
2215 return equal ? const_true_rtx : const0_rtx;
2216 case NE:
2217 case LTGT:
2218 return ! equal ? const_true_rtx : const0_rtx;
2219 case LT:
2220 case UNLT:
2221 return op0lt ? const_true_rtx : const0_rtx;
2222 case GT:
2223 case UNGT:
2224 return op1lt ? const_true_rtx : const0_rtx;
2225 case LTU:
2226 return op0ltu ? const_true_rtx : const0_rtx;
2227 case GTU:
2228 return op1ltu ? const_true_rtx : const0_rtx;
2229 case LE:
2230 case UNLE:
2231 return equal || op0lt ? const_true_rtx : const0_rtx;
2232 case GE:
2233 case UNGE:
2234 return equal || op1lt ? const_true_rtx : const0_rtx;
2235 case LEU:
2236 return equal || op0ltu ? const_true_rtx : const0_rtx;
2237 case GEU:
2238 return equal || op1ltu ? const_true_rtx : const0_rtx;
2239 case ORDERED:
2240 return const_true_rtx;
2241 case UNORDERED:
2242 return const0_rtx;
2243 default:
2244 abort ();
2245 }
2246 }
2247 \f
2248 /* Simplify CODE, an operation with result mode MODE and three operands,
2249 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2250 a constant. Return 0 if no simplifications is possible. */
2251
2252 rtx
2253 simplify_ternary_operation (code, mode, op0_mode, op0, op1, op2)
2254 enum rtx_code code;
2255 enum machine_mode mode, op0_mode;
2256 rtx op0, op1, op2;
2257 {
2258 unsigned int width = GET_MODE_BITSIZE (mode);
2259
2260 /* VOIDmode means "infinite" precision. */
2261 if (width == 0)
2262 width = HOST_BITS_PER_WIDE_INT;
2263
2264 switch (code)
2265 {
2266 case SIGN_EXTRACT:
2267 case ZERO_EXTRACT:
2268 if (GET_CODE (op0) == CONST_INT
2269 && GET_CODE (op1) == CONST_INT
2270 && GET_CODE (op2) == CONST_INT
2271 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2272 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2273 {
2274 /* Extracting a bit-field from a constant */
2275 HOST_WIDE_INT val = INTVAL (op0);
2276
2277 if (BITS_BIG_ENDIAN)
2278 val >>= (GET_MODE_BITSIZE (op0_mode)
2279 - INTVAL (op2) - INTVAL (op1));
2280 else
2281 val >>= INTVAL (op2);
2282
2283 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2284 {
2285 /* First zero-extend. */
2286 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2287 /* If desired, propagate sign bit. */
2288 if (code == SIGN_EXTRACT
2289 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2290 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2291 }
2292
2293 /* Clear the bits that don't belong in our mode,
2294 unless they and our sign bit are all one.
2295 So we get either a reasonable negative value or a reasonable
2296 unsigned value for this mode. */
2297 if (width < HOST_BITS_PER_WIDE_INT
2298 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2299 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2300 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2301
2302 return GEN_INT (val);
2303 }
2304 break;
2305
2306 case IF_THEN_ELSE:
2307 if (GET_CODE (op0) == CONST_INT)
2308 return op0 != const0_rtx ? op1 : op2;
2309
2310 /* Convert a == b ? b : a to "a". */
2311 if (GET_CODE (op0) == NE && ! side_effects_p (op0)
2312 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
2313 && rtx_equal_p (XEXP (op0, 0), op1)
2314 && rtx_equal_p (XEXP (op0, 1), op2))
2315 return op1;
2316 else if (GET_CODE (op0) == EQ && ! side_effects_p (op0)
2317 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
2318 && rtx_equal_p (XEXP (op0, 1), op1)
2319 && rtx_equal_p (XEXP (op0, 0), op2))
2320 return op2;
2321 else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2322 {
2323 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2324 ? GET_MODE (XEXP (op0, 1))
2325 : GET_MODE (XEXP (op0, 0)));
2326 rtx temp;
2327 if (cmp_mode == VOIDmode)
2328 cmp_mode = op0_mode;
2329 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2330 XEXP (op0, 0), XEXP (op0, 1));
2331
2332 /* See if any simplifications were possible. */
2333 if (temp == const0_rtx)
2334 return op2;
2335 else if (temp == const1_rtx)
2336 return op1;
2337 else if (temp)
2338 op0 = temp;
2339
2340 /* Look for happy constants in op1 and op2. */
2341 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2342 {
2343 HOST_WIDE_INT t = INTVAL (op1);
2344 HOST_WIDE_INT f = INTVAL (op2);
2345
2346 if (t == STORE_FLAG_VALUE && f == 0)
2347 code = GET_CODE (op0);
2348 else if (t == 0 && f == STORE_FLAG_VALUE)
2349 {
2350 enum rtx_code tmp;
2351 tmp = reversed_comparison_code (op0, NULL_RTX);
2352 if (tmp == UNKNOWN)
2353 break;
2354 code = tmp;
2355 }
2356 else
2357 break;
2358
2359 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2360 }
2361 }
2362 break;
2363
2364 default:
2365 abort ();
2366 }
2367
2368 return 0;
2369 }
2370
2371 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2372 Return 0 if no simplifications is possible. */
2373 rtx
2374 simplify_subreg (outermode, op, innermode, byte)
2375 rtx op;
2376 unsigned int byte;
2377 enum machine_mode outermode, innermode;
2378 {
2379 /* Little bit of sanity checking. */
2380 if (innermode == VOIDmode || outermode == VOIDmode
2381 || innermode == BLKmode || outermode == BLKmode)
2382 abort ();
2383
2384 if (GET_MODE (op) != innermode
2385 && GET_MODE (op) != VOIDmode)
2386 abort ();
2387
2388 if (byte % GET_MODE_SIZE (outermode)
2389 || byte >= GET_MODE_SIZE (innermode))
2390 abort ();
2391
2392 if (outermode == innermode && !byte)
2393 return op;
2394
2395 /* Attempt to simplify constant to non-SUBREG expression. */
2396 if (CONSTANT_P (op))
2397 {
2398 int offset, part;
2399 unsigned HOST_WIDE_INT val = 0;
2400
2401 /* ??? This code is partly redundant with code below, but can handle
2402 the subregs of floats and similar corner cases.
2403 Later it we should move all simplification code here and rewrite
2404 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
2405 using SIMPLIFY_SUBREG. */
2406 if (subreg_lowpart_offset (outermode, innermode) == byte)
2407 {
2408 rtx new = gen_lowpart_if_possible (outermode, op);
2409 if (new)
2410 return new;
2411 }
2412
2413 /* Similar comment as above apply here. */
2414 if (GET_MODE_SIZE (outermode) == UNITS_PER_WORD
2415 && GET_MODE_SIZE (innermode) > UNITS_PER_WORD
2416 && GET_MODE_CLASS (outermode) == MODE_INT)
2417 {
2418 rtx new = constant_subword (op,
2419 (byte / UNITS_PER_WORD),
2420 innermode);
2421 if (new)
2422 return new;
2423 }
2424
2425 offset = byte * BITS_PER_UNIT;
2426 switch (GET_CODE (op))
2427 {
2428 case CONST_DOUBLE:
2429 if (GET_MODE (op) != VOIDmode)
2430 break;
2431
2432 /* We can't handle this case yet. */
2433 if (GET_MODE_BITSIZE (outermode) >= HOST_BITS_PER_WIDE_INT)
2434 return NULL_RTX;
2435
2436 part = offset >= HOST_BITS_PER_WIDE_INT;
2437 if ((BITS_PER_WORD > HOST_BITS_PER_WIDE_INT
2438 && BYTES_BIG_ENDIAN)
2439 || (BITS_PER_WORD <= HOST_BITS_PER_WIDE_INT
2440 && WORDS_BIG_ENDIAN))
2441 part = !part;
2442 val = part ? CONST_DOUBLE_HIGH (op) : CONST_DOUBLE_LOW (op);
2443 offset %= HOST_BITS_PER_WIDE_INT;
2444
2445 /* We've already picked the word we want from a double, so
2446 pretend this is actually an integer. */
2447 innermode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
2448
2449 /* FALLTHROUGH */
2450 case CONST_INT:
2451 if (GET_CODE (op) == CONST_INT)
2452 val = INTVAL (op);
2453
2454 /* We don't handle synthetizing of non-integral constants yet. */
2455 if (GET_MODE_CLASS (outermode) != MODE_INT)
2456 return NULL_RTX;
2457
2458 if (BYTES_BIG_ENDIAN || WORDS_BIG_ENDIAN)
2459 {
2460 if (WORDS_BIG_ENDIAN)
2461 offset = (GET_MODE_BITSIZE (innermode)
2462 - GET_MODE_BITSIZE (outermode) - offset);
2463 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN
2464 && GET_MODE_SIZE (outermode) < UNITS_PER_WORD)
2465 offset = (offset + BITS_PER_WORD - GET_MODE_BITSIZE (outermode)
2466 - 2 * (offset % BITS_PER_WORD));
2467 }
2468
2469 if (offset >= HOST_BITS_PER_WIDE_INT)
2470 return ((HOST_WIDE_INT) val < 0) ? constm1_rtx : const0_rtx;
2471 else
2472 {
2473 val >>= offset;
2474 if (GET_MODE_BITSIZE (outermode) < HOST_BITS_PER_WIDE_INT)
2475 val = trunc_int_for_mode (val, outermode);
2476 return GEN_INT (val);
2477 }
2478 default:
2479 break;
2480 }
2481 }
2482
2483 /* Changing mode twice with SUBREG => just change it once,
2484 or not at all if changing back op starting mode. */
2485 if (GET_CODE (op) == SUBREG)
2486 {
2487 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
2488 int final_offset = byte + SUBREG_BYTE (op);
2489 rtx new;
2490
2491 if (outermode == innermostmode
2492 && byte == 0 && SUBREG_BYTE (op) == 0)
2493 return SUBREG_REG (op);
2494
2495 /* The SUBREG_BYTE represents offset, as if the value were stored
2496 in memory. Irritating exception is paradoxical subreg, where
2497 we define SUBREG_BYTE to be 0. On big endian machines, this
2498 value should be negative. For a moment, undo this exception. */
2499 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
2500 {
2501 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
2502 if (WORDS_BIG_ENDIAN)
2503 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2504 if (BYTES_BIG_ENDIAN)
2505 final_offset += difference % UNITS_PER_WORD;
2506 }
2507 if (SUBREG_BYTE (op) == 0
2508 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
2509 {
2510 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
2511 if (WORDS_BIG_ENDIAN)
2512 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2513 if (BYTES_BIG_ENDIAN)
2514 final_offset += difference % UNITS_PER_WORD;
2515 }
2516
2517 /* See whether resulting subreg will be paradoxical. */
2518 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
2519 {
2520 /* In nonparadoxical subregs we can't handle negative offsets. */
2521 if (final_offset < 0)
2522 return NULL_RTX;
2523 /* Bail out in case resulting subreg would be incorrect. */
2524 if (final_offset % GET_MODE_SIZE (outermode)
2525 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
2526 return NULL_RTX;
2527 }
2528 else
2529 {
2530 int offset = 0;
2531 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
2532
2533 /* In paradoxical subreg, see if we are still looking on lower part.
2534 If so, our SUBREG_BYTE will be 0. */
2535 if (WORDS_BIG_ENDIAN)
2536 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2537 if (BYTES_BIG_ENDIAN)
2538 offset += difference % UNITS_PER_WORD;
2539 if (offset == final_offset)
2540 final_offset = 0;
2541 else
2542 return NULL_RTX;
2543 }
2544
2545 /* Recurse for futher possible simplifications. */
2546 new = simplify_subreg (outermode, SUBREG_REG (op),
2547 GET_MODE (SUBREG_REG (op)),
2548 final_offset);
2549 if (new)
2550 return new;
2551 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
2552 }
2553
2554 /* SUBREG of a hard register => just change the register number
2555 and/or mode. If the hard register is not valid in that mode,
2556 suppress this simplification. If the hard register is the stack,
2557 frame, or argument pointer, leave this as a SUBREG. */
2558
2559 if (REG_P (op)
2560 && (! REG_FUNCTION_VALUE_P (op)
2561 || ! rtx_equal_function_value_matters)
2562 #ifdef CLASS_CANNOT_CHANGE_MODE
2563 && ! (CLASS_CANNOT_CHANGE_MODE_P (outermode, innermode)
2564 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
2565 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT
2566 && (TEST_HARD_REG_BIT
2567 (reg_class_contents[(int) CLASS_CANNOT_CHANGE_MODE],
2568 REGNO (op))))
2569 #endif
2570 && REGNO (op) < FIRST_PSEUDO_REGISTER
2571 && ((reload_completed && !frame_pointer_needed)
2572 || (REGNO (op) != FRAME_POINTER_REGNUM
2573 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2574 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
2575 #endif
2576 ))
2577 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2578 && REGNO (op) != ARG_POINTER_REGNUM
2579 #endif
2580 && REGNO (op) != STACK_POINTER_REGNUM)
2581 {
2582 int final_regno = subreg_hard_regno (gen_rtx_SUBREG (outermode, op, byte),
2583 0);
2584
2585 /* ??? We do allow it if the current REG is not valid for
2586 its mode. This is a kludge to work around how float/complex
2587 arguments are passed on 32-bit Sparc and should be fixed. */
2588 if (HARD_REGNO_MODE_OK (final_regno, outermode)
2589 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
2590 {
2591 rtx x = gen_rtx_REG (outermode, final_regno);
2592
2593 /* Propagate original regno. We don't have any way to specify
2594 the offset inside orignal regno, so do so only for lowpart.
2595 The information is used only by alias analysis that can not
2596 grog partial register anyway. */
2597
2598 if (subreg_lowpart_offset (outermode, innermode) == byte)
2599 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
2600 return x;
2601 }
2602 }
2603
2604 /* If we have a SUBREG of a register that we are replacing and we are
2605 replacing it with a MEM, make a new MEM and try replacing the
2606 SUBREG with it. Don't do this if the MEM has a mode-dependent address
2607 or if we would be widening it. */
2608
2609 if (GET_CODE (op) == MEM
2610 && ! mode_dependent_address_p (XEXP (op, 0))
2611 /* Allow splitting of volatile memory references in case we don't
2612 have instruction to move the whole thing. */
2613 && (! MEM_VOLATILE_P (op)
2614 || ! have_insn_for (SET, innermode))
2615 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
2616 return adjust_address_nv (op, outermode, byte);
2617
2618 /* Handle complex values represented as CONCAT
2619 of real and imaginary part. */
2620 if (GET_CODE (op) == CONCAT)
2621 {
2622 int is_realpart = byte < GET_MODE_UNIT_SIZE (innermode);
2623 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
2624 unsigned int final_offset;
2625 rtx res;
2626
2627 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
2628 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
2629 if (res)
2630 return res;
2631 /* We can at least simplify it by referring directly to the relevant part. */
2632 return gen_rtx_SUBREG (outermode, part, final_offset);
2633 }
2634
2635 return NULL_RTX;
2636 }
2637 /* Make a SUBREG operation or equivalent if it folds. */
2638
2639 rtx
2640 simplify_gen_subreg (outermode, op, innermode, byte)
2641 rtx op;
2642 unsigned int byte;
2643 enum machine_mode outermode, innermode;
2644 {
2645 rtx new;
2646 /* Little bit of sanity checking. */
2647 if (innermode == VOIDmode || outermode == VOIDmode
2648 || innermode == BLKmode || outermode == BLKmode)
2649 abort ();
2650
2651 if (GET_MODE (op) != innermode
2652 && GET_MODE (op) != VOIDmode)
2653 abort ();
2654
2655 if (byte % GET_MODE_SIZE (outermode)
2656 || byte >= GET_MODE_SIZE (innermode))
2657 abort ();
2658
2659 if (GET_CODE (op) == QUEUED)
2660 return NULL_RTX;
2661
2662 new = simplify_subreg (outermode, op, innermode, byte);
2663 if (new)
2664 return new;
2665
2666 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
2667 return NULL_RTX;
2668
2669 return gen_rtx_SUBREG (outermode, op, byte);
2670 }
2671 /* Simplify X, an rtx expression.
2672
2673 Return the simplified expression or NULL if no simplifications
2674 were possible.
2675
2676 This is the preferred entry point into the simplification routines;
2677 however, we still allow passes to call the more specific routines.
2678
2679 Right now GCC has three (yes, three) major bodies of RTL simplficiation
2680 code that need to be unified.
2681
2682 1. fold_rtx in cse.c. This code uses various CSE specific
2683 information to aid in RTL simplification.
2684
2685 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
2686 it uses combine specific information to aid in RTL
2687 simplification.
2688
2689 3. The routines in this file.
2690
2691
2692 Long term we want to only have one body of simplification code; to
2693 get to that state I recommend the following steps:
2694
2695 1. Pour over fold_rtx & simplify_rtx and move any simplifications
2696 which are not pass dependent state into these routines.
2697
2698 2. As code is moved by #1, change fold_rtx & simplify_rtx to
2699 use this routine whenever possible.
2700
2701 3. Allow for pass dependent state to be provided to these
2702 routines and add simplifications based on the pass dependent
2703 state. Remove code from cse.c & combine.c that becomes
2704 redundant/dead.
2705
2706 It will take time, but ultimately the compiler will be easier to
2707 maintain and improve. It's totally silly that when we add a
2708 simplification that it needs to be added to 4 places (3 for RTL
2709 simplification and 1 for tree simplification. */
2710
2711 rtx
2712 simplify_rtx (x)
2713 rtx x;
2714 {
2715 enum rtx_code code = GET_CODE (x);
2716 enum machine_mode mode = GET_MODE (x);
2717
2718 switch (GET_RTX_CLASS (code))
2719 {
2720 case '1':
2721 return simplify_unary_operation (code, mode,
2722 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
2723 case 'c':
2724 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
2725 {
2726 rtx tem;
2727
2728 tem = XEXP (x, 0);
2729 XEXP (x, 0) = XEXP (x, 1);
2730 XEXP (x, 1) = tem;
2731 return simplify_binary_operation (code, mode,
2732 XEXP (x, 0), XEXP (x, 1));
2733 }
2734
2735 case '2':
2736 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
2737
2738 case '3':
2739 case 'b':
2740 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
2741 XEXP (x, 0), XEXP (x, 1),
2742 XEXP (x, 2));
2743
2744 case '<':
2745 return simplify_relational_operation (code,
2746 ((GET_MODE (XEXP (x, 0))
2747 != VOIDmode)
2748 ? GET_MODE (XEXP (x, 0))
2749 : GET_MODE (XEXP (x, 1))),
2750 XEXP (x, 0), XEXP (x, 1));
2751 case 'x':
2752 /* The only case we try to handle is a SUBREG. */
2753 if (code == SUBREG)
2754 return simplify_gen_subreg (mode, SUBREG_REG (x),
2755 GET_MODE (SUBREG_REG (x)),
2756 SUBREG_BYTE (x));
2757 return NULL;
2758 default:
2759 return NULL;
2760 }
2761 }