2014-11-01 Andrew MacLeod <amacleod@redhat,com>
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2014 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "varasm.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "flags.h"
32 #include "insn-config.h"
33 #include "recog.h"
34 #include "hashtab.h"
35 #include "hash-set.h"
36 #include "vec.h"
37 #include "machmode.h"
38 #include "input.h"
39 #include "function.h"
40 #include "insn-codes.h"
41 #include "optabs.h"
42 #include "expr.h"
43 #include "diagnostic-core.h"
44 #include "ggc.h"
45 #include "target.h"
46 #include "predict.h"
47
48 /* Simplification and canonicalization of RTL. */
49
50 /* Much code operates on (low, high) pairs; the low value is an
51 unsigned wide int, the high value a signed wide int. We
52 occasionally need to sign extend from low to high as if low were a
53 signed wide int. */
54 #define HWI_SIGN_EXTEND(low) \
55 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
56
57 static rtx neg_const_int (machine_mode, const_rtx);
58 static bool plus_minus_operand_p (const_rtx);
59 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
60 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
61 static rtx simplify_immed_subreg (machine_mode, rtx, machine_mode,
62 unsigned int);
63 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
64 rtx, rtx);
65 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
66 machine_mode, rtx, rtx);
67 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
68 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
69 rtx, rtx, rtx, rtx);
70 \f
71 /* Negate a CONST_INT rtx, truncating (because a conversion from a
72 maximally negative number can overflow). */
73 static rtx
74 neg_const_int (machine_mode mode, const_rtx i)
75 {
76 return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
77 }
78
79 /* Test whether expression, X, is an immediate constant that represents
80 the most significant bit of machine mode MODE. */
81
82 bool
83 mode_signbit_p (machine_mode mode, const_rtx x)
84 {
85 unsigned HOST_WIDE_INT val;
86 unsigned int width;
87
88 if (GET_MODE_CLASS (mode) != MODE_INT)
89 return false;
90
91 width = GET_MODE_PRECISION (mode);
92 if (width == 0)
93 return false;
94
95 if (width <= HOST_BITS_PER_WIDE_INT
96 && CONST_INT_P (x))
97 val = INTVAL (x);
98 #if TARGET_SUPPORTS_WIDE_INT
99 else if (CONST_WIDE_INT_P (x))
100 {
101 unsigned int i;
102 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
103 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
104 return false;
105 for (i = 0; i < elts - 1; i++)
106 if (CONST_WIDE_INT_ELT (x, i) != 0)
107 return false;
108 val = CONST_WIDE_INT_ELT (x, elts - 1);
109 width %= HOST_BITS_PER_WIDE_INT;
110 if (width == 0)
111 width = HOST_BITS_PER_WIDE_INT;
112 }
113 #else
114 else if (width <= HOST_BITS_PER_DOUBLE_INT
115 && CONST_DOUBLE_AS_INT_P (x)
116 && CONST_DOUBLE_LOW (x) == 0)
117 {
118 val = CONST_DOUBLE_HIGH (x);
119 width -= HOST_BITS_PER_WIDE_INT;
120 }
121 #endif
122 else
123 /* X is not an integer constant. */
124 return false;
125
126 if (width < HOST_BITS_PER_WIDE_INT)
127 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
128 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
129 }
130
131 /* Test whether VAL is equal to the most significant bit of mode MODE
132 (after masking with the mode mask of MODE). Returns false if the
133 precision of MODE is too large to handle. */
134
135 bool
136 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
137 {
138 unsigned int width;
139
140 if (GET_MODE_CLASS (mode) != MODE_INT)
141 return false;
142
143 width = GET_MODE_PRECISION (mode);
144 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
145 return false;
146
147 val &= GET_MODE_MASK (mode);
148 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
149 }
150
151 /* Test whether the most significant bit of mode MODE is set in VAL.
152 Returns false if the precision of MODE is too large to handle. */
153 bool
154 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
155 {
156 unsigned int width;
157
158 if (GET_MODE_CLASS (mode) != MODE_INT)
159 return false;
160
161 width = GET_MODE_PRECISION (mode);
162 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
163 return false;
164
165 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
166 return val != 0;
167 }
168
169 /* Test whether the most significant bit of mode MODE is clear in VAL.
170 Returns false if the precision of MODE is too large to handle. */
171 bool
172 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
173 {
174 unsigned int width;
175
176 if (GET_MODE_CLASS (mode) != MODE_INT)
177 return false;
178
179 width = GET_MODE_PRECISION (mode);
180 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
181 return false;
182
183 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
184 return val == 0;
185 }
186 \f
187 /* Make a binary operation by properly ordering the operands and
188 seeing if the expression folds. */
189
190 rtx
191 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
192 rtx op1)
193 {
194 rtx tem;
195
196 /* If this simplifies, do it. */
197 tem = simplify_binary_operation (code, mode, op0, op1);
198 if (tem)
199 return tem;
200
201 /* Put complex operands first and constants second if commutative. */
202 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
203 && swap_commutative_operands_p (op0, op1))
204 tem = op0, op0 = op1, op1 = tem;
205
206 return gen_rtx_fmt_ee (code, mode, op0, op1);
207 }
208 \f
209 /* If X is a MEM referencing the constant pool, return the real value.
210 Otherwise return X. */
211 rtx
212 avoid_constant_pool_reference (rtx x)
213 {
214 rtx c, tmp, addr;
215 machine_mode cmode;
216 HOST_WIDE_INT offset = 0;
217
218 switch (GET_CODE (x))
219 {
220 case MEM:
221 break;
222
223 case FLOAT_EXTEND:
224 /* Handle float extensions of constant pool references. */
225 tmp = XEXP (x, 0);
226 c = avoid_constant_pool_reference (tmp);
227 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
228 {
229 REAL_VALUE_TYPE d;
230
231 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
232 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
233 }
234 return x;
235
236 default:
237 return x;
238 }
239
240 if (GET_MODE (x) == BLKmode)
241 return x;
242
243 addr = XEXP (x, 0);
244
245 /* Call target hook to avoid the effects of -fpic etc.... */
246 addr = targetm.delegitimize_address (addr);
247
248 /* Split the address into a base and integer offset. */
249 if (GET_CODE (addr) == CONST
250 && GET_CODE (XEXP (addr, 0)) == PLUS
251 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
252 {
253 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
254 addr = XEXP (XEXP (addr, 0), 0);
255 }
256
257 if (GET_CODE (addr) == LO_SUM)
258 addr = XEXP (addr, 1);
259
260 /* If this is a constant pool reference, we can turn it into its
261 constant and hope that simplifications happen. */
262 if (GET_CODE (addr) == SYMBOL_REF
263 && CONSTANT_POOL_ADDRESS_P (addr))
264 {
265 c = get_pool_constant (addr);
266 cmode = get_pool_mode (addr);
267
268 /* If we're accessing the constant in a different mode than it was
269 originally stored, attempt to fix that up via subreg simplifications.
270 If that fails we have no choice but to return the original memory. */
271 if ((offset != 0 || cmode != GET_MODE (x))
272 && offset >= 0 && offset < GET_MODE_SIZE (cmode))
273 {
274 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
275 if (tem && CONSTANT_P (tem))
276 return tem;
277 }
278 else
279 return c;
280 }
281
282 return x;
283 }
284 \f
285 /* Simplify a MEM based on its attributes. This is the default
286 delegitimize_address target hook, and it's recommended that every
287 overrider call it. */
288
289 rtx
290 delegitimize_mem_from_attrs (rtx x)
291 {
292 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
293 use their base addresses as equivalent. */
294 if (MEM_P (x)
295 && MEM_EXPR (x)
296 && MEM_OFFSET_KNOWN_P (x))
297 {
298 tree decl = MEM_EXPR (x);
299 machine_mode mode = GET_MODE (x);
300 HOST_WIDE_INT offset = 0;
301
302 switch (TREE_CODE (decl))
303 {
304 default:
305 decl = NULL;
306 break;
307
308 case VAR_DECL:
309 break;
310
311 case ARRAY_REF:
312 case ARRAY_RANGE_REF:
313 case COMPONENT_REF:
314 case BIT_FIELD_REF:
315 case REALPART_EXPR:
316 case IMAGPART_EXPR:
317 case VIEW_CONVERT_EXPR:
318 {
319 HOST_WIDE_INT bitsize, bitpos;
320 tree toffset;
321 int unsignedp, volatilep = 0;
322
323 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
324 &mode, &unsignedp, &volatilep, false);
325 if (bitsize != GET_MODE_BITSIZE (mode)
326 || (bitpos % BITS_PER_UNIT)
327 || (toffset && !tree_fits_shwi_p (toffset)))
328 decl = NULL;
329 else
330 {
331 offset += bitpos / BITS_PER_UNIT;
332 if (toffset)
333 offset += tree_to_shwi (toffset);
334 }
335 break;
336 }
337 }
338
339 if (decl
340 && mode == GET_MODE (x)
341 && TREE_CODE (decl) == VAR_DECL
342 && (TREE_STATIC (decl)
343 || DECL_THREAD_LOCAL_P (decl))
344 && DECL_RTL_SET_P (decl)
345 && MEM_P (DECL_RTL (decl)))
346 {
347 rtx newx;
348
349 offset += MEM_OFFSET (x);
350
351 newx = DECL_RTL (decl);
352
353 if (MEM_P (newx))
354 {
355 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
356
357 /* Avoid creating a new MEM needlessly if we already had
358 the same address. We do if there's no OFFSET and the
359 old address X is identical to NEWX, or if X is of the
360 form (plus NEWX OFFSET), or the NEWX is of the form
361 (plus Y (const_int Z)) and X is that with the offset
362 added: (plus Y (const_int Z+OFFSET)). */
363 if (!((offset == 0
364 || (GET_CODE (o) == PLUS
365 && GET_CODE (XEXP (o, 1)) == CONST_INT
366 && (offset == INTVAL (XEXP (o, 1))
367 || (GET_CODE (n) == PLUS
368 && GET_CODE (XEXP (n, 1)) == CONST_INT
369 && (INTVAL (XEXP (n, 1)) + offset
370 == INTVAL (XEXP (o, 1)))
371 && (n = XEXP (n, 0))))
372 && (o = XEXP (o, 0))))
373 && rtx_equal_p (o, n)))
374 x = adjust_address_nv (newx, mode, offset);
375 }
376 else if (GET_MODE (x) == GET_MODE (newx)
377 && offset == 0)
378 x = newx;
379 }
380 }
381
382 return x;
383 }
384 \f
385 /* Make a unary operation by first seeing if it folds and otherwise making
386 the specified operation. */
387
388 rtx
389 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
390 machine_mode op_mode)
391 {
392 rtx tem;
393
394 /* If this simplifies, use it. */
395 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
396 return tem;
397
398 return gen_rtx_fmt_e (code, mode, op);
399 }
400
401 /* Likewise for ternary operations. */
402
403 rtx
404 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
405 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
406 {
407 rtx tem;
408
409 /* If this simplifies, use it. */
410 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
411 op0, op1, op2)))
412 return tem;
413
414 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
415 }
416
417 /* Likewise, for relational operations.
418 CMP_MODE specifies mode comparison is done in. */
419
420 rtx
421 simplify_gen_relational (enum rtx_code code, machine_mode mode,
422 machine_mode cmp_mode, rtx op0, rtx op1)
423 {
424 rtx tem;
425
426 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
427 op0, op1)))
428 return tem;
429
430 return gen_rtx_fmt_ee (code, mode, op0, op1);
431 }
432 \f
433 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
434 and simplify the result. If FN is non-NULL, call this callback on each
435 X, if it returns non-NULL, replace X with its return value and simplify the
436 result. */
437
438 rtx
439 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
440 rtx (*fn) (rtx, const_rtx, void *), void *data)
441 {
442 enum rtx_code code = GET_CODE (x);
443 machine_mode mode = GET_MODE (x);
444 machine_mode op_mode;
445 const char *fmt;
446 rtx op0, op1, op2, newx, op;
447 rtvec vec, newvec;
448 int i, j;
449
450 if (__builtin_expect (fn != NULL, 0))
451 {
452 newx = fn (x, old_rtx, data);
453 if (newx)
454 return newx;
455 }
456 else if (rtx_equal_p (x, old_rtx))
457 return copy_rtx ((rtx) data);
458
459 switch (GET_RTX_CLASS (code))
460 {
461 case RTX_UNARY:
462 op0 = XEXP (x, 0);
463 op_mode = GET_MODE (op0);
464 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
465 if (op0 == XEXP (x, 0))
466 return x;
467 return simplify_gen_unary (code, mode, op0, op_mode);
468
469 case RTX_BIN_ARITH:
470 case RTX_COMM_ARITH:
471 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
472 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
473 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
474 return x;
475 return simplify_gen_binary (code, mode, op0, op1);
476
477 case RTX_COMPARE:
478 case RTX_COMM_COMPARE:
479 op0 = XEXP (x, 0);
480 op1 = XEXP (x, 1);
481 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
482 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
483 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
484 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
485 return x;
486 return simplify_gen_relational (code, mode, op_mode, op0, op1);
487
488 case RTX_TERNARY:
489 case RTX_BITFIELD_OPS:
490 op0 = XEXP (x, 0);
491 op_mode = GET_MODE (op0);
492 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
493 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
494 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
495 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
496 return x;
497 if (op_mode == VOIDmode)
498 op_mode = GET_MODE (op0);
499 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
500
501 case RTX_EXTRA:
502 if (code == SUBREG)
503 {
504 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
505 if (op0 == SUBREG_REG (x))
506 return x;
507 op0 = simplify_gen_subreg (GET_MODE (x), op0,
508 GET_MODE (SUBREG_REG (x)),
509 SUBREG_BYTE (x));
510 return op0 ? op0 : x;
511 }
512 break;
513
514 case RTX_OBJ:
515 if (code == MEM)
516 {
517 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
518 if (op0 == XEXP (x, 0))
519 return x;
520 return replace_equiv_address_nv (x, op0);
521 }
522 else if (code == LO_SUM)
523 {
524 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
525 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
526
527 /* (lo_sum (high x) x) -> x */
528 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
529 return op1;
530
531 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
532 return x;
533 return gen_rtx_LO_SUM (mode, op0, op1);
534 }
535 break;
536
537 default:
538 break;
539 }
540
541 newx = x;
542 fmt = GET_RTX_FORMAT (code);
543 for (i = 0; fmt[i]; i++)
544 switch (fmt[i])
545 {
546 case 'E':
547 vec = XVEC (x, i);
548 newvec = XVEC (newx, i);
549 for (j = 0; j < GET_NUM_ELEM (vec); j++)
550 {
551 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
552 old_rtx, fn, data);
553 if (op != RTVEC_ELT (vec, j))
554 {
555 if (newvec == vec)
556 {
557 newvec = shallow_copy_rtvec (vec);
558 if (x == newx)
559 newx = shallow_copy_rtx (x);
560 XVEC (newx, i) = newvec;
561 }
562 RTVEC_ELT (newvec, j) = op;
563 }
564 }
565 break;
566
567 case 'e':
568 if (XEXP (x, i))
569 {
570 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
571 if (op != XEXP (x, i))
572 {
573 if (x == newx)
574 newx = shallow_copy_rtx (x);
575 XEXP (newx, i) = op;
576 }
577 }
578 break;
579 }
580 return newx;
581 }
582
583 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
584 resulting RTX. Return a new RTX which is as simplified as possible. */
585
586 rtx
587 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
588 {
589 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
590 }
591 \f
592 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
593 Only handle cases where the truncated value is inherently an rvalue.
594
595 RTL provides two ways of truncating a value:
596
597 1. a lowpart subreg. This form is only a truncation when both
598 the outer and inner modes (here MODE and OP_MODE respectively)
599 are scalar integers, and only then when the subreg is used as
600 an rvalue.
601
602 It is only valid to form such truncating subregs if the
603 truncation requires no action by the target. The onus for
604 proving this is on the creator of the subreg -- e.g. the
605 caller to simplify_subreg or simplify_gen_subreg -- and typically
606 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
607
608 2. a TRUNCATE. This form handles both scalar and compound integers.
609
610 The first form is preferred where valid. However, the TRUNCATE
611 handling in simplify_unary_operation turns the second form into the
612 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
613 so it is generally safe to form rvalue truncations using:
614
615 simplify_gen_unary (TRUNCATE, ...)
616
617 and leave simplify_unary_operation to work out which representation
618 should be used.
619
620 Because of the proof requirements on (1), simplify_truncation must
621 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
622 regardless of whether the outer truncation came from a SUBREG or a
623 TRUNCATE. For example, if the caller has proven that an SImode
624 truncation of:
625
626 (and:DI X Y)
627
628 is a no-op and can be represented as a subreg, it does not follow
629 that SImode truncations of X and Y are also no-ops. On a target
630 like 64-bit MIPS that requires SImode values to be stored in
631 sign-extended form, an SImode truncation of:
632
633 (and:DI (reg:DI X) (const_int 63))
634
635 is trivially a no-op because only the lower 6 bits can be set.
636 However, X is still an arbitrary 64-bit number and so we cannot
637 assume that truncating it too is a no-op. */
638
639 static rtx
640 simplify_truncation (machine_mode mode, rtx op,
641 machine_mode op_mode)
642 {
643 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
644 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
645 gcc_assert (precision <= op_precision);
646
647 /* Optimize truncations of zero and sign extended values. */
648 if (GET_CODE (op) == ZERO_EXTEND
649 || GET_CODE (op) == SIGN_EXTEND)
650 {
651 /* There are three possibilities. If MODE is the same as the
652 origmode, we can omit both the extension and the subreg.
653 If MODE is not larger than the origmode, we can apply the
654 truncation without the extension. Finally, if the outermode
655 is larger than the origmode, we can just extend to the appropriate
656 mode. */
657 machine_mode origmode = GET_MODE (XEXP (op, 0));
658 if (mode == origmode)
659 return XEXP (op, 0);
660 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
661 return simplify_gen_unary (TRUNCATE, mode,
662 XEXP (op, 0), origmode);
663 else
664 return simplify_gen_unary (GET_CODE (op), mode,
665 XEXP (op, 0), origmode);
666 }
667
668 /* If the machine can perform operations in the truncated mode, distribute
669 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
670 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
671 if (1
672 #ifdef WORD_REGISTER_OPERATIONS
673 && precision >= BITS_PER_WORD
674 #endif
675 && (GET_CODE (op) == PLUS
676 || GET_CODE (op) == MINUS
677 || GET_CODE (op) == MULT))
678 {
679 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
680 if (op0)
681 {
682 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
683 if (op1)
684 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
685 }
686 }
687
688 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
689 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
690 the outer subreg is effectively a truncation to the original mode. */
691 if ((GET_CODE (op) == LSHIFTRT
692 || GET_CODE (op) == ASHIFTRT)
693 /* Ensure that OP_MODE is at least twice as wide as MODE
694 to avoid the possibility that an outer LSHIFTRT shifts by more
695 than the sign extension's sign_bit_copies and introduces zeros
696 into the high bits of the result. */
697 && 2 * precision <= op_precision
698 && CONST_INT_P (XEXP (op, 1))
699 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
700 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
701 && UINTVAL (XEXP (op, 1)) < precision)
702 return simplify_gen_binary (ASHIFTRT, mode,
703 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
704
705 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
706 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
707 the outer subreg is effectively a truncation to the original mode. */
708 if ((GET_CODE (op) == LSHIFTRT
709 || GET_CODE (op) == ASHIFTRT)
710 && CONST_INT_P (XEXP (op, 1))
711 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
712 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
713 && UINTVAL (XEXP (op, 1)) < precision)
714 return simplify_gen_binary (LSHIFTRT, mode,
715 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
716
717 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
718 to (ashift:QI (x:QI) C), where C is a suitable small constant and
719 the outer subreg is effectively a truncation to the original mode. */
720 if (GET_CODE (op) == ASHIFT
721 && CONST_INT_P (XEXP (op, 1))
722 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
723 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
724 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
725 && UINTVAL (XEXP (op, 1)) < precision)
726 return simplify_gen_binary (ASHIFT, mode,
727 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
728
729 /* Recognize a word extraction from a multi-word subreg. */
730 if ((GET_CODE (op) == LSHIFTRT
731 || GET_CODE (op) == ASHIFTRT)
732 && SCALAR_INT_MODE_P (mode)
733 && SCALAR_INT_MODE_P (op_mode)
734 && precision >= BITS_PER_WORD
735 && 2 * precision <= op_precision
736 && CONST_INT_P (XEXP (op, 1))
737 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
738 && UINTVAL (XEXP (op, 1)) < op_precision)
739 {
740 int byte = subreg_lowpart_offset (mode, op_mode);
741 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
742 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
743 (WORDS_BIG_ENDIAN
744 ? byte - shifted_bytes
745 : byte + shifted_bytes));
746 }
747
748 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
749 and try replacing the TRUNCATE and shift with it. Don't do this
750 if the MEM has a mode-dependent address. */
751 if ((GET_CODE (op) == LSHIFTRT
752 || GET_CODE (op) == ASHIFTRT)
753 && SCALAR_INT_MODE_P (op_mode)
754 && MEM_P (XEXP (op, 0))
755 && CONST_INT_P (XEXP (op, 1))
756 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
757 && INTVAL (XEXP (op, 1)) > 0
758 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
759 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
760 MEM_ADDR_SPACE (XEXP (op, 0)))
761 && ! MEM_VOLATILE_P (XEXP (op, 0))
762 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
763 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
764 {
765 int byte = subreg_lowpart_offset (mode, op_mode);
766 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
767 return adjust_address_nv (XEXP (op, 0), mode,
768 (WORDS_BIG_ENDIAN
769 ? byte - shifted_bytes
770 : byte + shifted_bytes));
771 }
772
773 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
774 (OP:SI foo:SI) if OP is NEG or ABS. */
775 if ((GET_CODE (op) == ABS
776 || GET_CODE (op) == NEG)
777 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
778 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
779 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
780 return simplify_gen_unary (GET_CODE (op), mode,
781 XEXP (XEXP (op, 0), 0), mode);
782
783 /* (truncate:A (subreg:B (truncate:C X) 0)) is
784 (truncate:A X). */
785 if (GET_CODE (op) == SUBREG
786 && SCALAR_INT_MODE_P (mode)
787 && SCALAR_INT_MODE_P (op_mode)
788 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
789 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
790 && subreg_lowpart_p (op))
791 {
792 rtx inner = XEXP (SUBREG_REG (op), 0);
793 if (GET_MODE_PRECISION (mode)
794 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
795 return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
796 else
797 /* If subreg above is paradoxical and C is narrower
798 than A, return (subreg:A (truncate:C X) 0). */
799 return simplify_gen_subreg (mode, SUBREG_REG (op),
800 GET_MODE (SUBREG_REG (op)), 0);
801 }
802
803 /* (truncate:A (truncate:B X)) is (truncate:A X). */
804 if (GET_CODE (op) == TRUNCATE)
805 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
806 GET_MODE (XEXP (op, 0)));
807
808 return NULL_RTX;
809 }
810 \f
811 /* Try to simplify a unary operation CODE whose output mode is to be
812 MODE with input operand OP whose mode was originally OP_MODE.
813 Return zero if no simplification can be made. */
814 rtx
815 simplify_unary_operation (enum rtx_code code, machine_mode mode,
816 rtx op, machine_mode op_mode)
817 {
818 rtx trueop, tem;
819
820 trueop = avoid_constant_pool_reference (op);
821
822 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
823 if (tem)
824 return tem;
825
826 return simplify_unary_operation_1 (code, mode, op);
827 }
828
829 /* Perform some simplifications we can do even if the operands
830 aren't constant. */
831 static rtx
832 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
833 {
834 enum rtx_code reversed;
835 rtx temp;
836
837 switch (code)
838 {
839 case NOT:
840 /* (not (not X)) == X. */
841 if (GET_CODE (op) == NOT)
842 return XEXP (op, 0);
843
844 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
845 comparison is all ones. */
846 if (COMPARISON_P (op)
847 && (mode == BImode || STORE_FLAG_VALUE == -1)
848 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
849 return simplify_gen_relational (reversed, mode, VOIDmode,
850 XEXP (op, 0), XEXP (op, 1));
851
852 /* (not (plus X -1)) can become (neg X). */
853 if (GET_CODE (op) == PLUS
854 && XEXP (op, 1) == constm1_rtx)
855 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
856
857 /* Similarly, (not (neg X)) is (plus X -1). */
858 if (GET_CODE (op) == NEG)
859 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
860 CONSTM1_RTX (mode));
861
862 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
863 if (GET_CODE (op) == XOR
864 && CONST_INT_P (XEXP (op, 1))
865 && (temp = simplify_unary_operation (NOT, mode,
866 XEXP (op, 1), mode)) != 0)
867 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
868
869 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
870 if (GET_CODE (op) == PLUS
871 && CONST_INT_P (XEXP (op, 1))
872 && mode_signbit_p (mode, XEXP (op, 1))
873 && (temp = simplify_unary_operation (NOT, mode,
874 XEXP (op, 1), mode)) != 0)
875 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
876
877
878 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
879 operands other than 1, but that is not valid. We could do a
880 similar simplification for (not (lshiftrt C X)) where C is
881 just the sign bit, but this doesn't seem common enough to
882 bother with. */
883 if (GET_CODE (op) == ASHIFT
884 && XEXP (op, 0) == const1_rtx)
885 {
886 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
887 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
888 }
889
890 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
891 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
892 so we can perform the above simplification. */
893 if (STORE_FLAG_VALUE == -1
894 && GET_CODE (op) == ASHIFTRT
895 && CONST_INT_P (XEXP (op, 1))
896 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
897 return simplify_gen_relational (GE, mode, VOIDmode,
898 XEXP (op, 0), const0_rtx);
899
900
901 if (GET_CODE (op) == SUBREG
902 && subreg_lowpart_p (op)
903 && (GET_MODE_SIZE (GET_MODE (op))
904 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
905 && GET_CODE (SUBREG_REG (op)) == ASHIFT
906 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
907 {
908 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
909 rtx x;
910
911 x = gen_rtx_ROTATE (inner_mode,
912 simplify_gen_unary (NOT, inner_mode, const1_rtx,
913 inner_mode),
914 XEXP (SUBREG_REG (op), 1));
915 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
916 if (temp)
917 return temp;
918 }
919
920 /* Apply De Morgan's laws to reduce number of patterns for machines
921 with negating logical insns (and-not, nand, etc.). If result has
922 only one NOT, put it first, since that is how the patterns are
923 coded. */
924 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
925 {
926 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
927 machine_mode op_mode;
928
929 op_mode = GET_MODE (in1);
930 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
931
932 op_mode = GET_MODE (in2);
933 if (op_mode == VOIDmode)
934 op_mode = mode;
935 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
936
937 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
938 {
939 rtx tem = in2;
940 in2 = in1; in1 = tem;
941 }
942
943 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
944 mode, in1, in2);
945 }
946
947 /* (not (bswap x)) -> (bswap (not x)). */
948 if (GET_CODE (op) == BSWAP)
949 {
950 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
951 return simplify_gen_unary (BSWAP, mode, x, mode);
952 }
953 break;
954
955 case NEG:
956 /* (neg (neg X)) == X. */
957 if (GET_CODE (op) == NEG)
958 return XEXP (op, 0);
959
960 /* (neg (plus X 1)) can become (not X). */
961 if (GET_CODE (op) == PLUS
962 && XEXP (op, 1) == const1_rtx)
963 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
964
965 /* Similarly, (neg (not X)) is (plus X 1). */
966 if (GET_CODE (op) == NOT)
967 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
968 CONST1_RTX (mode));
969
970 /* (neg (minus X Y)) can become (minus Y X). This transformation
971 isn't safe for modes with signed zeros, since if X and Y are
972 both +0, (minus Y X) is the same as (minus X Y). If the
973 rounding mode is towards +infinity (or -infinity) then the two
974 expressions will be rounded differently. */
975 if (GET_CODE (op) == MINUS
976 && !HONOR_SIGNED_ZEROS (mode)
977 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
978 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
979
980 if (GET_CODE (op) == PLUS
981 && !HONOR_SIGNED_ZEROS (mode)
982 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
983 {
984 /* (neg (plus A C)) is simplified to (minus -C A). */
985 if (CONST_SCALAR_INT_P (XEXP (op, 1))
986 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
987 {
988 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
989 if (temp)
990 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
991 }
992
993 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
994 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
995 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
996 }
997
998 /* (neg (mult A B)) becomes (mult A (neg B)).
999 This works even for floating-point values. */
1000 if (GET_CODE (op) == MULT
1001 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1002 {
1003 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1004 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1005 }
1006
1007 /* NEG commutes with ASHIFT since it is multiplication. Only do
1008 this if we can then eliminate the NEG (e.g., if the operand
1009 is a constant). */
1010 if (GET_CODE (op) == ASHIFT)
1011 {
1012 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1013 if (temp)
1014 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1015 }
1016
1017 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1018 C is equal to the width of MODE minus 1. */
1019 if (GET_CODE (op) == ASHIFTRT
1020 && CONST_INT_P (XEXP (op, 1))
1021 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1022 return simplify_gen_binary (LSHIFTRT, mode,
1023 XEXP (op, 0), XEXP (op, 1));
1024
1025 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1026 C is equal to the width of MODE minus 1. */
1027 if (GET_CODE (op) == LSHIFTRT
1028 && CONST_INT_P (XEXP (op, 1))
1029 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1030 return simplify_gen_binary (ASHIFTRT, mode,
1031 XEXP (op, 0), XEXP (op, 1));
1032
1033 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1034 if (GET_CODE (op) == XOR
1035 && XEXP (op, 1) == const1_rtx
1036 && nonzero_bits (XEXP (op, 0), mode) == 1)
1037 return plus_constant (mode, XEXP (op, 0), -1);
1038
1039 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1040 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1041 if (GET_CODE (op) == LT
1042 && XEXP (op, 1) == const0_rtx
1043 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
1044 {
1045 machine_mode inner = GET_MODE (XEXP (op, 0));
1046 int isize = GET_MODE_PRECISION (inner);
1047 if (STORE_FLAG_VALUE == 1)
1048 {
1049 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1050 GEN_INT (isize - 1));
1051 if (mode == inner)
1052 return temp;
1053 if (GET_MODE_PRECISION (mode) > isize)
1054 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1055 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1056 }
1057 else if (STORE_FLAG_VALUE == -1)
1058 {
1059 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1060 GEN_INT (isize - 1));
1061 if (mode == inner)
1062 return temp;
1063 if (GET_MODE_PRECISION (mode) > isize)
1064 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1065 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1066 }
1067 }
1068 break;
1069
1070 case TRUNCATE:
1071 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1072 with the umulXi3_highpart patterns. */
1073 if (GET_CODE (op) == LSHIFTRT
1074 && GET_CODE (XEXP (op, 0)) == MULT)
1075 break;
1076
1077 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1078 {
1079 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1080 {
1081 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1082 if (temp)
1083 return temp;
1084 }
1085 /* We can't handle truncation to a partial integer mode here
1086 because we don't know the real bitsize of the partial
1087 integer mode. */
1088 break;
1089 }
1090
1091 if (GET_MODE (op) != VOIDmode)
1092 {
1093 temp = simplify_truncation (mode, op, GET_MODE (op));
1094 if (temp)
1095 return temp;
1096 }
1097
1098 /* If we know that the value is already truncated, we can
1099 replace the TRUNCATE with a SUBREG. */
1100 if (GET_MODE_NUNITS (mode) == 1
1101 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1102 || truncated_to_mode (mode, op)))
1103 {
1104 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1105 if (temp)
1106 return temp;
1107 }
1108
1109 /* A truncate of a comparison can be replaced with a subreg if
1110 STORE_FLAG_VALUE permits. This is like the previous test,
1111 but it works even if the comparison is done in a mode larger
1112 than HOST_BITS_PER_WIDE_INT. */
1113 if (HWI_COMPUTABLE_MODE_P (mode)
1114 && COMPARISON_P (op)
1115 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1116 {
1117 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1118 if (temp)
1119 return temp;
1120 }
1121
1122 /* A truncate of a memory is just loading the low part of the memory
1123 if we are not changing the meaning of the address. */
1124 if (GET_CODE (op) == MEM
1125 && !VECTOR_MODE_P (mode)
1126 && !MEM_VOLATILE_P (op)
1127 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1128 {
1129 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1130 if (temp)
1131 return temp;
1132 }
1133
1134 break;
1135
1136 case FLOAT_TRUNCATE:
1137 if (DECIMAL_FLOAT_MODE_P (mode))
1138 break;
1139
1140 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1141 if (GET_CODE (op) == FLOAT_EXTEND
1142 && GET_MODE (XEXP (op, 0)) == mode)
1143 return XEXP (op, 0);
1144
1145 /* (float_truncate:SF (float_truncate:DF foo:XF))
1146 = (float_truncate:SF foo:XF).
1147 This may eliminate double rounding, so it is unsafe.
1148
1149 (float_truncate:SF (float_extend:XF foo:DF))
1150 = (float_truncate:SF foo:DF).
1151
1152 (float_truncate:DF (float_extend:XF foo:SF))
1153 = (float_extend:SF foo:DF). */
1154 if ((GET_CODE (op) == FLOAT_TRUNCATE
1155 && flag_unsafe_math_optimizations)
1156 || GET_CODE (op) == FLOAT_EXTEND)
1157 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1158 0)))
1159 > GET_MODE_SIZE (mode)
1160 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1161 mode,
1162 XEXP (op, 0), mode);
1163
1164 /* (float_truncate (float x)) is (float x) */
1165 if (GET_CODE (op) == FLOAT
1166 && (flag_unsafe_math_optimizations
1167 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
1168 && ((unsigned)significand_size (GET_MODE (op))
1169 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1170 - num_sign_bit_copies (XEXP (op, 0),
1171 GET_MODE (XEXP (op, 0))))))))
1172 return simplify_gen_unary (FLOAT, mode,
1173 XEXP (op, 0),
1174 GET_MODE (XEXP (op, 0)));
1175
1176 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1177 (OP:SF foo:SF) if OP is NEG or ABS. */
1178 if ((GET_CODE (op) == ABS
1179 || GET_CODE (op) == NEG)
1180 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1181 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1182 return simplify_gen_unary (GET_CODE (op), mode,
1183 XEXP (XEXP (op, 0), 0), mode);
1184
1185 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1186 is (float_truncate:SF x). */
1187 if (GET_CODE (op) == SUBREG
1188 && subreg_lowpart_p (op)
1189 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1190 return SUBREG_REG (op);
1191 break;
1192
1193 case FLOAT_EXTEND:
1194 if (DECIMAL_FLOAT_MODE_P (mode))
1195 break;
1196
1197 /* (float_extend (float_extend x)) is (float_extend x)
1198
1199 (float_extend (float x)) is (float x) assuming that double
1200 rounding can't happen.
1201 */
1202 if (GET_CODE (op) == FLOAT_EXTEND
1203 || (GET_CODE (op) == FLOAT
1204 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1205 && ((unsigned)significand_size (GET_MODE (op))
1206 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1207 - num_sign_bit_copies (XEXP (op, 0),
1208 GET_MODE (XEXP (op, 0)))))))
1209 return simplify_gen_unary (GET_CODE (op), mode,
1210 XEXP (op, 0),
1211 GET_MODE (XEXP (op, 0)));
1212
1213 break;
1214
1215 case ABS:
1216 /* (abs (neg <foo>)) -> (abs <foo>) */
1217 if (GET_CODE (op) == NEG)
1218 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1219 GET_MODE (XEXP (op, 0)));
1220
1221 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1222 do nothing. */
1223 if (GET_MODE (op) == VOIDmode)
1224 break;
1225
1226 /* If operand is something known to be positive, ignore the ABS. */
1227 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1228 || val_signbit_known_clear_p (GET_MODE (op),
1229 nonzero_bits (op, GET_MODE (op))))
1230 return op;
1231
1232 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1233 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1234 return gen_rtx_NEG (mode, op);
1235
1236 break;
1237
1238 case FFS:
1239 /* (ffs (*_extend <X>)) = (ffs <X>) */
1240 if (GET_CODE (op) == SIGN_EXTEND
1241 || GET_CODE (op) == ZERO_EXTEND)
1242 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1243 GET_MODE (XEXP (op, 0)));
1244 break;
1245
1246 case POPCOUNT:
1247 switch (GET_CODE (op))
1248 {
1249 case BSWAP:
1250 case ZERO_EXTEND:
1251 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1252 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1253 GET_MODE (XEXP (op, 0)));
1254
1255 case ROTATE:
1256 case ROTATERT:
1257 /* Rotations don't affect popcount. */
1258 if (!side_effects_p (XEXP (op, 1)))
1259 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1260 GET_MODE (XEXP (op, 0)));
1261 break;
1262
1263 default:
1264 break;
1265 }
1266 break;
1267
1268 case PARITY:
1269 switch (GET_CODE (op))
1270 {
1271 case NOT:
1272 case BSWAP:
1273 case ZERO_EXTEND:
1274 case SIGN_EXTEND:
1275 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1276 GET_MODE (XEXP (op, 0)));
1277
1278 case ROTATE:
1279 case ROTATERT:
1280 /* Rotations don't affect parity. */
1281 if (!side_effects_p (XEXP (op, 1)))
1282 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1283 GET_MODE (XEXP (op, 0)));
1284 break;
1285
1286 default:
1287 break;
1288 }
1289 break;
1290
1291 case BSWAP:
1292 /* (bswap (bswap x)) -> x. */
1293 if (GET_CODE (op) == BSWAP)
1294 return XEXP (op, 0);
1295 break;
1296
1297 case FLOAT:
1298 /* (float (sign_extend <X>)) = (float <X>). */
1299 if (GET_CODE (op) == SIGN_EXTEND)
1300 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1301 GET_MODE (XEXP (op, 0)));
1302 break;
1303
1304 case SIGN_EXTEND:
1305 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1306 becomes just the MINUS if its mode is MODE. This allows
1307 folding switch statements on machines using casesi (such as
1308 the VAX). */
1309 if (GET_CODE (op) == TRUNCATE
1310 && GET_MODE (XEXP (op, 0)) == mode
1311 && GET_CODE (XEXP (op, 0)) == MINUS
1312 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1313 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1314 return XEXP (op, 0);
1315
1316 /* Extending a widening multiplication should be canonicalized to
1317 a wider widening multiplication. */
1318 if (GET_CODE (op) == MULT)
1319 {
1320 rtx lhs = XEXP (op, 0);
1321 rtx rhs = XEXP (op, 1);
1322 enum rtx_code lcode = GET_CODE (lhs);
1323 enum rtx_code rcode = GET_CODE (rhs);
1324
1325 /* Widening multiplies usually extend both operands, but sometimes
1326 they use a shift to extract a portion of a register. */
1327 if ((lcode == SIGN_EXTEND
1328 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1329 && (rcode == SIGN_EXTEND
1330 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1331 {
1332 machine_mode lmode = GET_MODE (lhs);
1333 machine_mode rmode = GET_MODE (rhs);
1334 int bits;
1335
1336 if (lcode == ASHIFTRT)
1337 /* Number of bits not shifted off the end. */
1338 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1339 else /* lcode == SIGN_EXTEND */
1340 /* Size of inner mode. */
1341 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1342
1343 if (rcode == ASHIFTRT)
1344 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1345 else /* rcode == SIGN_EXTEND */
1346 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1347
1348 /* We can only widen multiplies if the result is mathematiclly
1349 equivalent. I.e. if overflow was impossible. */
1350 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1351 return simplify_gen_binary
1352 (MULT, mode,
1353 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1354 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1355 }
1356 }
1357
1358 /* Check for a sign extension of a subreg of a promoted
1359 variable, where the promotion is sign-extended, and the
1360 target mode is the same as the variable's promotion. */
1361 if (GET_CODE (op) == SUBREG
1362 && SUBREG_PROMOTED_VAR_P (op)
1363 && SUBREG_PROMOTED_SIGNED_P (op)
1364 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1365 {
1366 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1367 if (temp)
1368 return temp;
1369 }
1370
1371 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1372 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1373 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1374 {
1375 gcc_assert (GET_MODE_PRECISION (mode)
1376 > GET_MODE_PRECISION (GET_MODE (op)));
1377 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1378 GET_MODE (XEXP (op, 0)));
1379 }
1380
1381 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1382 is (sign_extend:M (subreg:O <X>)) if there is mode with
1383 GET_MODE_BITSIZE (N) - I bits.
1384 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1385 is similarly (zero_extend:M (subreg:O <X>)). */
1386 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1387 && GET_CODE (XEXP (op, 0)) == ASHIFT
1388 && CONST_INT_P (XEXP (op, 1))
1389 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1390 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1391 {
1392 machine_mode tmode
1393 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1394 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1395 gcc_assert (GET_MODE_BITSIZE (mode)
1396 > GET_MODE_BITSIZE (GET_MODE (op)));
1397 if (tmode != BLKmode)
1398 {
1399 rtx inner =
1400 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1401 if (inner)
1402 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1403 ? SIGN_EXTEND : ZERO_EXTEND,
1404 mode, inner, tmode);
1405 }
1406 }
1407
1408 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1409 /* As we do not know which address space the pointer is referring to,
1410 we can do this only if the target does not support different pointer
1411 or address modes depending on the address space. */
1412 if (target_default_pointer_address_modes_p ()
1413 && ! POINTERS_EXTEND_UNSIGNED
1414 && mode == Pmode && GET_MODE (op) == ptr_mode
1415 && (CONSTANT_P (op)
1416 || (GET_CODE (op) == SUBREG
1417 && REG_P (SUBREG_REG (op))
1418 && REG_POINTER (SUBREG_REG (op))
1419 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1420 return convert_memory_address (Pmode, op);
1421 #endif
1422 break;
1423
1424 case ZERO_EXTEND:
1425 /* Check for a zero extension of a subreg of a promoted
1426 variable, where the promotion is zero-extended, and the
1427 target mode is the same as the variable's promotion. */
1428 if (GET_CODE (op) == SUBREG
1429 && SUBREG_PROMOTED_VAR_P (op)
1430 && SUBREG_PROMOTED_UNSIGNED_P (op)
1431 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1432 {
1433 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1434 if (temp)
1435 return temp;
1436 }
1437
1438 /* Extending a widening multiplication should be canonicalized to
1439 a wider widening multiplication. */
1440 if (GET_CODE (op) == MULT)
1441 {
1442 rtx lhs = XEXP (op, 0);
1443 rtx rhs = XEXP (op, 1);
1444 enum rtx_code lcode = GET_CODE (lhs);
1445 enum rtx_code rcode = GET_CODE (rhs);
1446
1447 /* Widening multiplies usually extend both operands, but sometimes
1448 they use a shift to extract a portion of a register. */
1449 if ((lcode == ZERO_EXTEND
1450 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1451 && (rcode == ZERO_EXTEND
1452 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1453 {
1454 machine_mode lmode = GET_MODE (lhs);
1455 machine_mode rmode = GET_MODE (rhs);
1456 int bits;
1457
1458 if (lcode == LSHIFTRT)
1459 /* Number of bits not shifted off the end. */
1460 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1461 else /* lcode == ZERO_EXTEND */
1462 /* Size of inner mode. */
1463 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1464
1465 if (rcode == LSHIFTRT)
1466 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1467 else /* rcode == ZERO_EXTEND */
1468 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1469
1470 /* We can only widen multiplies if the result is mathematiclly
1471 equivalent. I.e. if overflow was impossible. */
1472 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1473 return simplify_gen_binary
1474 (MULT, mode,
1475 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1476 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1477 }
1478 }
1479
1480 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1481 if (GET_CODE (op) == ZERO_EXTEND)
1482 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1483 GET_MODE (XEXP (op, 0)));
1484
1485 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1486 is (zero_extend:M (subreg:O <X>)) if there is mode with
1487 GET_MODE_PRECISION (N) - I bits. */
1488 if (GET_CODE (op) == LSHIFTRT
1489 && GET_CODE (XEXP (op, 0)) == ASHIFT
1490 && CONST_INT_P (XEXP (op, 1))
1491 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1492 && GET_MODE_PRECISION (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1493 {
1494 machine_mode tmode
1495 = mode_for_size (GET_MODE_PRECISION (GET_MODE (op))
1496 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1497 if (tmode != BLKmode)
1498 {
1499 rtx inner =
1500 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1501 if (inner)
1502 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1503 }
1504 }
1505
1506 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1507 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1508 of mode N. E.g.
1509 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1510 (and:SI (reg:SI) (const_int 63)). */
1511 if (GET_CODE (op) == SUBREG
1512 && GET_MODE_PRECISION (GET_MODE (op))
1513 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1514 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1515 <= HOST_BITS_PER_WIDE_INT
1516 && GET_MODE_PRECISION (mode)
1517 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1518 && subreg_lowpart_p (op)
1519 && (nonzero_bits (SUBREG_REG (op), GET_MODE (SUBREG_REG (op)))
1520 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1521 {
1522 if (GET_MODE_PRECISION (mode)
1523 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
1524 return SUBREG_REG (op);
1525 return simplify_gen_unary (ZERO_EXTEND, mode, SUBREG_REG (op),
1526 GET_MODE (SUBREG_REG (op)));
1527 }
1528
1529 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1530 /* As we do not know which address space the pointer is referring to,
1531 we can do this only if the target does not support different pointer
1532 or address modes depending on the address space. */
1533 if (target_default_pointer_address_modes_p ()
1534 && POINTERS_EXTEND_UNSIGNED > 0
1535 && mode == Pmode && GET_MODE (op) == ptr_mode
1536 && (CONSTANT_P (op)
1537 || (GET_CODE (op) == SUBREG
1538 && REG_P (SUBREG_REG (op))
1539 && REG_POINTER (SUBREG_REG (op))
1540 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1541 return convert_memory_address (Pmode, op);
1542 #endif
1543 break;
1544
1545 default:
1546 break;
1547 }
1548
1549 return 0;
1550 }
1551
1552 /* Try to compute the value of a unary operation CODE whose output mode is to
1553 be MODE with input operand OP whose mode was originally OP_MODE.
1554 Return zero if the value cannot be computed. */
1555 rtx
1556 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1557 rtx op, machine_mode op_mode)
1558 {
1559 unsigned int width = GET_MODE_PRECISION (mode);
1560
1561 if (code == VEC_DUPLICATE)
1562 {
1563 gcc_assert (VECTOR_MODE_P (mode));
1564 if (GET_MODE (op) != VOIDmode)
1565 {
1566 if (!VECTOR_MODE_P (GET_MODE (op)))
1567 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1568 else
1569 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1570 (GET_MODE (op)));
1571 }
1572 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1573 || GET_CODE (op) == CONST_VECTOR)
1574 {
1575 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1576 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1577 rtvec v = rtvec_alloc (n_elts);
1578 unsigned int i;
1579
1580 if (GET_CODE (op) != CONST_VECTOR)
1581 for (i = 0; i < n_elts; i++)
1582 RTVEC_ELT (v, i) = op;
1583 else
1584 {
1585 machine_mode inmode = GET_MODE (op);
1586 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1587 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1588
1589 gcc_assert (in_n_elts < n_elts);
1590 gcc_assert ((n_elts % in_n_elts) == 0);
1591 for (i = 0; i < n_elts; i++)
1592 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1593 }
1594 return gen_rtx_CONST_VECTOR (mode, v);
1595 }
1596 }
1597
1598 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1599 {
1600 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1601 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1602 machine_mode opmode = GET_MODE (op);
1603 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1604 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1605 rtvec v = rtvec_alloc (n_elts);
1606 unsigned int i;
1607
1608 gcc_assert (op_n_elts == n_elts);
1609 for (i = 0; i < n_elts; i++)
1610 {
1611 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1612 CONST_VECTOR_ELT (op, i),
1613 GET_MODE_INNER (opmode));
1614 if (!x)
1615 return 0;
1616 RTVEC_ELT (v, i) = x;
1617 }
1618 return gen_rtx_CONST_VECTOR (mode, v);
1619 }
1620
1621 /* The order of these tests is critical so that, for example, we don't
1622 check the wrong mode (input vs. output) for a conversion operation,
1623 such as FIX. At some point, this should be simplified. */
1624
1625 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1626 {
1627 REAL_VALUE_TYPE d;
1628
1629 if (op_mode == VOIDmode)
1630 {
1631 /* CONST_INT have VOIDmode as the mode. We assume that all
1632 the bits of the constant are significant, though, this is
1633 a dangerous assumption as many times CONST_INTs are
1634 created and used with garbage in the bits outside of the
1635 precision of the implied mode of the const_int. */
1636 op_mode = MAX_MODE_INT;
1637 }
1638
1639 real_from_integer (&d, mode, std::make_pair (op, op_mode), SIGNED);
1640 d = real_value_truncate (mode, d);
1641 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1642 }
1643 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1644 {
1645 REAL_VALUE_TYPE d;
1646
1647 if (op_mode == VOIDmode)
1648 {
1649 /* CONST_INT have VOIDmode as the mode. We assume that all
1650 the bits of the constant are significant, though, this is
1651 a dangerous assumption as many times CONST_INTs are
1652 created and used with garbage in the bits outside of the
1653 precision of the implied mode of the const_int. */
1654 op_mode = MAX_MODE_INT;
1655 }
1656
1657 real_from_integer (&d, mode, std::make_pair (op, op_mode), UNSIGNED);
1658 d = real_value_truncate (mode, d);
1659 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1660 }
1661
1662 if (CONST_SCALAR_INT_P (op) && width > 0)
1663 {
1664 wide_int result;
1665 machine_mode imode = op_mode == VOIDmode ? mode : op_mode;
1666 rtx_mode_t op0 = std::make_pair (op, imode);
1667 int int_value;
1668
1669 #if TARGET_SUPPORTS_WIDE_INT == 0
1670 /* This assert keeps the simplification from producing a result
1671 that cannot be represented in a CONST_DOUBLE but a lot of
1672 upstream callers expect that this function never fails to
1673 simplify something and so you if you added this to the test
1674 above the code would die later anyway. If this assert
1675 happens, you just need to make the port support wide int. */
1676 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1677 #endif
1678
1679 switch (code)
1680 {
1681 case NOT:
1682 result = wi::bit_not (op0);
1683 break;
1684
1685 case NEG:
1686 result = wi::neg (op0);
1687 break;
1688
1689 case ABS:
1690 result = wi::abs (op0);
1691 break;
1692
1693 case FFS:
1694 result = wi::shwi (wi::ffs (op0), mode);
1695 break;
1696
1697 case CLZ:
1698 if (wi::ne_p (op0, 0))
1699 int_value = wi::clz (op0);
1700 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1701 int_value = GET_MODE_PRECISION (mode);
1702 result = wi::shwi (int_value, mode);
1703 break;
1704
1705 case CLRSB:
1706 result = wi::shwi (wi::clrsb (op0), mode);
1707 break;
1708
1709 case CTZ:
1710 if (wi::ne_p (op0, 0))
1711 int_value = wi::ctz (op0);
1712 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1713 int_value = GET_MODE_PRECISION (mode);
1714 result = wi::shwi (int_value, mode);
1715 break;
1716
1717 case POPCOUNT:
1718 result = wi::shwi (wi::popcount (op0), mode);
1719 break;
1720
1721 case PARITY:
1722 result = wi::shwi (wi::parity (op0), mode);
1723 break;
1724
1725 case BSWAP:
1726 result = wide_int (op0).bswap ();
1727 break;
1728
1729 case TRUNCATE:
1730 case ZERO_EXTEND:
1731 result = wide_int::from (op0, width, UNSIGNED);
1732 break;
1733
1734 case SIGN_EXTEND:
1735 result = wide_int::from (op0, width, SIGNED);
1736 break;
1737
1738 case SQRT:
1739 default:
1740 return 0;
1741 }
1742
1743 return immed_wide_int_const (result, mode);
1744 }
1745
1746 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1747 && SCALAR_FLOAT_MODE_P (mode)
1748 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1749 {
1750 REAL_VALUE_TYPE d;
1751 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1752
1753 switch (code)
1754 {
1755 case SQRT:
1756 return 0;
1757 case ABS:
1758 d = real_value_abs (&d);
1759 break;
1760 case NEG:
1761 d = real_value_negate (&d);
1762 break;
1763 case FLOAT_TRUNCATE:
1764 d = real_value_truncate (mode, d);
1765 break;
1766 case FLOAT_EXTEND:
1767 /* All this does is change the mode, unless changing
1768 mode class. */
1769 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1770 real_convert (&d, mode, &d);
1771 break;
1772 case FIX:
1773 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1774 break;
1775 case NOT:
1776 {
1777 long tmp[4];
1778 int i;
1779
1780 real_to_target (tmp, &d, GET_MODE (op));
1781 for (i = 0; i < 4; i++)
1782 tmp[i] = ~tmp[i];
1783 real_from_target (&d, tmp, mode);
1784 break;
1785 }
1786 default:
1787 gcc_unreachable ();
1788 }
1789 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1790 }
1791 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1792 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1793 && GET_MODE_CLASS (mode) == MODE_INT
1794 && width > 0)
1795 {
1796 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1797 operators are intentionally left unspecified (to ease implementation
1798 by target backends), for consistency, this routine implements the
1799 same semantics for constant folding as used by the middle-end. */
1800
1801 /* This was formerly used only for non-IEEE float.
1802 eggert@twinsun.com says it is safe for IEEE also. */
1803 REAL_VALUE_TYPE x, t;
1804 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1805 wide_int wmax, wmin;
1806 /* This is part of the abi to real_to_integer, but we check
1807 things before making this call. */
1808 bool fail;
1809
1810 switch (code)
1811 {
1812 case FIX:
1813 if (REAL_VALUE_ISNAN (x))
1814 return const0_rtx;
1815
1816 /* Test against the signed upper bound. */
1817 wmax = wi::max_value (width, SIGNED);
1818 real_from_integer (&t, VOIDmode, wmax, SIGNED);
1819 if (REAL_VALUES_LESS (t, x))
1820 return immed_wide_int_const (wmax, mode);
1821
1822 /* Test against the signed lower bound. */
1823 wmin = wi::min_value (width, SIGNED);
1824 real_from_integer (&t, VOIDmode, wmin, SIGNED);
1825 if (REAL_VALUES_LESS (x, t))
1826 return immed_wide_int_const (wmin, mode);
1827
1828 return immed_wide_int_const (real_to_integer (&x, &fail, width), mode);
1829 break;
1830
1831 case UNSIGNED_FIX:
1832 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1833 return const0_rtx;
1834
1835 /* Test against the unsigned upper bound. */
1836 wmax = wi::max_value (width, UNSIGNED);
1837 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
1838 if (REAL_VALUES_LESS (t, x))
1839 return immed_wide_int_const (wmax, mode);
1840
1841 return immed_wide_int_const (real_to_integer (&x, &fail, width),
1842 mode);
1843 break;
1844
1845 default:
1846 gcc_unreachable ();
1847 }
1848 }
1849
1850 return NULL_RTX;
1851 }
1852 \f
1853 /* Subroutine of simplify_binary_operation to simplify a binary operation
1854 CODE that can commute with byte swapping, with result mode MODE and
1855 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
1856 Return zero if no simplification or canonicalization is possible. */
1857
1858 static rtx
1859 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
1860 rtx op0, rtx op1)
1861 {
1862 rtx tem;
1863
1864 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
1865 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
1866 {
1867 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
1868 simplify_gen_unary (BSWAP, mode, op1, mode));
1869 return simplify_gen_unary (BSWAP, mode, tem, mode);
1870 }
1871
1872 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
1873 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
1874 {
1875 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
1876 return simplify_gen_unary (BSWAP, mode, tem, mode);
1877 }
1878
1879 return NULL_RTX;
1880 }
1881
1882 /* Subroutine of simplify_binary_operation to simplify a commutative,
1883 associative binary operation CODE with result mode MODE, operating
1884 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1885 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1886 canonicalization is possible. */
1887
1888 static rtx
1889 simplify_associative_operation (enum rtx_code code, machine_mode mode,
1890 rtx op0, rtx op1)
1891 {
1892 rtx tem;
1893
1894 /* Linearize the operator to the left. */
1895 if (GET_CODE (op1) == code)
1896 {
1897 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1898 if (GET_CODE (op0) == code)
1899 {
1900 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1901 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1902 }
1903
1904 /* "a op (b op c)" becomes "(b op c) op a". */
1905 if (! swap_commutative_operands_p (op1, op0))
1906 return simplify_gen_binary (code, mode, op1, op0);
1907
1908 tem = op0;
1909 op0 = op1;
1910 op1 = tem;
1911 }
1912
1913 if (GET_CODE (op0) == code)
1914 {
1915 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1916 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1917 {
1918 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1919 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1920 }
1921
1922 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1923 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1924 if (tem != 0)
1925 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1926
1927 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1928 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1929 if (tem != 0)
1930 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1931 }
1932
1933 return 0;
1934 }
1935
1936
1937 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1938 and OP1. Return 0 if no simplification is possible.
1939
1940 Don't use this for relational operations such as EQ or LT.
1941 Use simplify_relational_operation instead. */
1942 rtx
1943 simplify_binary_operation (enum rtx_code code, machine_mode mode,
1944 rtx op0, rtx op1)
1945 {
1946 rtx trueop0, trueop1;
1947 rtx tem;
1948
1949 /* Relational operations don't work here. We must know the mode
1950 of the operands in order to do the comparison correctly.
1951 Assuming a full word can give incorrect results.
1952 Consider comparing 128 with -128 in QImode. */
1953 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1954 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1955
1956 /* Make sure the constant is second. */
1957 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1958 && swap_commutative_operands_p (op0, op1))
1959 {
1960 tem = op0, op0 = op1, op1 = tem;
1961 }
1962
1963 trueop0 = avoid_constant_pool_reference (op0);
1964 trueop1 = avoid_constant_pool_reference (op1);
1965
1966 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1967 if (tem)
1968 return tem;
1969 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1970 }
1971
1972 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1973 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1974 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1975 actual constants. */
1976
1977 static rtx
1978 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
1979 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1980 {
1981 rtx tem, reversed, opleft, opright;
1982 HOST_WIDE_INT val;
1983 unsigned int width = GET_MODE_PRECISION (mode);
1984
1985 /* Even if we can't compute a constant result,
1986 there are some cases worth simplifying. */
1987
1988 switch (code)
1989 {
1990 case PLUS:
1991 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1992 when x is NaN, infinite, or finite and nonzero. They aren't
1993 when x is -0 and the rounding mode is not towards -infinity,
1994 since (-0) + 0 is then 0. */
1995 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1996 return op0;
1997
1998 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1999 transformations are safe even for IEEE. */
2000 if (GET_CODE (op0) == NEG)
2001 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2002 else if (GET_CODE (op1) == NEG)
2003 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2004
2005 /* (~a) + 1 -> -a */
2006 if (INTEGRAL_MODE_P (mode)
2007 && GET_CODE (op0) == NOT
2008 && trueop1 == const1_rtx)
2009 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2010
2011 /* Handle both-operands-constant cases. We can only add
2012 CONST_INTs to constants since the sum of relocatable symbols
2013 can't be handled by most assemblers. Don't add CONST_INT
2014 to CONST_INT since overflow won't be computed properly if wider
2015 than HOST_BITS_PER_WIDE_INT. */
2016
2017 if ((GET_CODE (op0) == CONST
2018 || GET_CODE (op0) == SYMBOL_REF
2019 || GET_CODE (op0) == LABEL_REF)
2020 && CONST_INT_P (op1))
2021 return plus_constant (mode, op0, INTVAL (op1));
2022 else if ((GET_CODE (op1) == CONST
2023 || GET_CODE (op1) == SYMBOL_REF
2024 || GET_CODE (op1) == LABEL_REF)
2025 && CONST_INT_P (op0))
2026 return plus_constant (mode, op1, INTVAL (op0));
2027
2028 /* See if this is something like X * C - X or vice versa or
2029 if the multiplication is written as a shift. If so, we can
2030 distribute and make a new multiply, shift, or maybe just
2031 have X (if C is 2 in the example above). But don't make
2032 something more expensive than we had before. */
2033
2034 if (SCALAR_INT_MODE_P (mode))
2035 {
2036 rtx lhs = op0, rhs = op1;
2037
2038 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2039 wide_int coeff1 = wi::one (GET_MODE_PRECISION (mode));
2040
2041 if (GET_CODE (lhs) == NEG)
2042 {
2043 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2044 lhs = XEXP (lhs, 0);
2045 }
2046 else if (GET_CODE (lhs) == MULT
2047 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2048 {
2049 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2050 lhs = XEXP (lhs, 0);
2051 }
2052 else if (GET_CODE (lhs) == ASHIFT
2053 && CONST_INT_P (XEXP (lhs, 1))
2054 && INTVAL (XEXP (lhs, 1)) >= 0
2055 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2056 {
2057 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2058 GET_MODE_PRECISION (mode));
2059 lhs = XEXP (lhs, 0);
2060 }
2061
2062 if (GET_CODE (rhs) == NEG)
2063 {
2064 coeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2065 rhs = XEXP (rhs, 0);
2066 }
2067 else if (GET_CODE (rhs) == MULT
2068 && CONST_INT_P (XEXP (rhs, 1)))
2069 {
2070 coeff1 = std::make_pair (XEXP (rhs, 1), mode);
2071 rhs = XEXP (rhs, 0);
2072 }
2073 else if (GET_CODE (rhs) == ASHIFT
2074 && CONST_INT_P (XEXP (rhs, 1))
2075 && INTVAL (XEXP (rhs, 1)) >= 0
2076 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2077 {
2078 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2079 GET_MODE_PRECISION (mode));
2080 rhs = XEXP (rhs, 0);
2081 }
2082
2083 if (rtx_equal_p (lhs, rhs))
2084 {
2085 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2086 rtx coeff;
2087 bool speed = optimize_function_for_speed_p (cfun);
2088
2089 coeff = immed_wide_int_const (coeff0 + coeff1, mode);
2090
2091 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2092 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2093 ? tem : 0;
2094 }
2095 }
2096
2097 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2098 if (CONST_SCALAR_INT_P (op1)
2099 && GET_CODE (op0) == XOR
2100 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2101 && mode_signbit_p (mode, op1))
2102 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2103 simplify_gen_binary (XOR, mode, op1,
2104 XEXP (op0, 1)));
2105
2106 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2107 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2108 && GET_CODE (op0) == MULT
2109 && GET_CODE (XEXP (op0, 0)) == NEG)
2110 {
2111 rtx in1, in2;
2112
2113 in1 = XEXP (XEXP (op0, 0), 0);
2114 in2 = XEXP (op0, 1);
2115 return simplify_gen_binary (MINUS, mode, op1,
2116 simplify_gen_binary (MULT, mode,
2117 in1, in2));
2118 }
2119
2120 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2121 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2122 is 1. */
2123 if (COMPARISON_P (op0)
2124 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2125 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2126 && (reversed = reversed_comparison (op0, mode)))
2127 return
2128 simplify_gen_unary (NEG, mode, reversed, mode);
2129
2130 /* If one of the operands is a PLUS or a MINUS, see if we can
2131 simplify this by the associative law.
2132 Don't use the associative law for floating point.
2133 The inaccuracy makes it nonassociative,
2134 and subtle programs can break if operations are associated. */
2135
2136 if (INTEGRAL_MODE_P (mode)
2137 && (plus_minus_operand_p (op0)
2138 || plus_minus_operand_p (op1))
2139 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2140 return tem;
2141
2142 /* Reassociate floating point addition only when the user
2143 specifies associative math operations. */
2144 if (FLOAT_MODE_P (mode)
2145 && flag_associative_math)
2146 {
2147 tem = simplify_associative_operation (code, mode, op0, op1);
2148 if (tem)
2149 return tem;
2150 }
2151 break;
2152
2153 case COMPARE:
2154 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2155 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2156 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2157 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2158 {
2159 rtx xop00 = XEXP (op0, 0);
2160 rtx xop10 = XEXP (op1, 0);
2161
2162 #ifdef HAVE_cc0
2163 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2164 #else
2165 if (REG_P (xop00) && REG_P (xop10)
2166 && GET_MODE (xop00) == GET_MODE (xop10)
2167 && REGNO (xop00) == REGNO (xop10)
2168 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2169 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2170 #endif
2171 return xop00;
2172 }
2173 break;
2174
2175 case MINUS:
2176 /* We can't assume x-x is 0 even with non-IEEE floating point,
2177 but since it is zero except in very strange circumstances, we
2178 will treat it as zero with -ffinite-math-only. */
2179 if (rtx_equal_p (trueop0, trueop1)
2180 && ! side_effects_p (op0)
2181 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2182 return CONST0_RTX (mode);
2183
2184 /* Change subtraction from zero into negation. (0 - x) is the
2185 same as -x when x is NaN, infinite, or finite and nonzero.
2186 But if the mode has signed zeros, and does not round towards
2187 -infinity, then 0 - 0 is 0, not -0. */
2188 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2189 return simplify_gen_unary (NEG, mode, op1, mode);
2190
2191 /* (-1 - a) is ~a. */
2192 if (trueop0 == constm1_rtx)
2193 return simplify_gen_unary (NOT, mode, op1, mode);
2194
2195 /* Subtracting 0 has no effect unless the mode has signed zeros
2196 and supports rounding towards -infinity. In such a case,
2197 0 - 0 is -0. */
2198 if (!(HONOR_SIGNED_ZEROS (mode)
2199 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2200 && trueop1 == CONST0_RTX (mode))
2201 return op0;
2202
2203 /* See if this is something like X * C - X or vice versa or
2204 if the multiplication is written as a shift. If so, we can
2205 distribute and make a new multiply, shift, or maybe just
2206 have X (if C is 2 in the example above). But don't make
2207 something more expensive than we had before. */
2208
2209 if (SCALAR_INT_MODE_P (mode))
2210 {
2211 rtx lhs = op0, rhs = op1;
2212
2213 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2214 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2215
2216 if (GET_CODE (lhs) == NEG)
2217 {
2218 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2219 lhs = XEXP (lhs, 0);
2220 }
2221 else if (GET_CODE (lhs) == MULT
2222 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2223 {
2224 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2225 lhs = XEXP (lhs, 0);
2226 }
2227 else if (GET_CODE (lhs) == ASHIFT
2228 && CONST_INT_P (XEXP (lhs, 1))
2229 && INTVAL (XEXP (lhs, 1)) >= 0
2230 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2231 {
2232 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2233 GET_MODE_PRECISION (mode));
2234 lhs = XEXP (lhs, 0);
2235 }
2236
2237 if (GET_CODE (rhs) == NEG)
2238 {
2239 negcoeff1 = wi::one (GET_MODE_PRECISION (mode));
2240 rhs = XEXP (rhs, 0);
2241 }
2242 else if (GET_CODE (rhs) == MULT
2243 && CONST_INT_P (XEXP (rhs, 1)))
2244 {
2245 negcoeff1 = wi::neg (std::make_pair (XEXP (rhs, 1), mode));
2246 rhs = XEXP (rhs, 0);
2247 }
2248 else if (GET_CODE (rhs) == ASHIFT
2249 && CONST_INT_P (XEXP (rhs, 1))
2250 && INTVAL (XEXP (rhs, 1)) >= 0
2251 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2252 {
2253 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2254 GET_MODE_PRECISION (mode));
2255 negcoeff1 = -negcoeff1;
2256 rhs = XEXP (rhs, 0);
2257 }
2258
2259 if (rtx_equal_p (lhs, rhs))
2260 {
2261 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2262 rtx coeff;
2263 bool speed = optimize_function_for_speed_p (cfun);
2264
2265 coeff = immed_wide_int_const (coeff0 + negcoeff1, mode);
2266
2267 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2268 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2269 ? tem : 0;
2270 }
2271 }
2272
2273 /* (a - (-b)) -> (a + b). True even for IEEE. */
2274 if (GET_CODE (op1) == NEG)
2275 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2276
2277 /* (-x - c) may be simplified as (-c - x). */
2278 if (GET_CODE (op0) == NEG
2279 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2280 {
2281 tem = simplify_unary_operation (NEG, mode, op1, mode);
2282 if (tem)
2283 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2284 }
2285
2286 /* Don't let a relocatable value get a negative coeff. */
2287 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2288 return simplify_gen_binary (PLUS, mode,
2289 op0,
2290 neg_const_int (mode, op1));
2291
2292 /* (x - (x & y)) -> (x & ~y) */
2293 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2294 {
2295 if (rtx_equal_p (op0, XEXP (op1, 0)))
2296 {
2297 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2298 GET_MODE (XEXP (op1, 1)));
2299 return simplify_gen_binary (AND, mode, op0, tem);
2300 }
2301 if (rtx_equal_p (op0, XEXP (op1, 1)))
2302 {
2303 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2304 GET_MODE (XEXP (op1, 0)));
2305 return simplify_gen_binary (AND, mode, op0, tem);
2306 }
2307 }
2308
2309 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2310 by reversing the comparison code if valid. */
2311 if (STORE_FLAG_VALUE == 1
2312 && trueop0 == const1_rtx
2313 && COMPARISON_P (op1)
2314 && (reversed = reversed_comparison (op1, mode)))
2315 return reversed;
2316
2317 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2318 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2319 && GET_CODE (op1) == MULT
2320 && GET_CODE (XEXP (op1, 0)) == NEG)
2321 {
2322 rtx in1, in2;
2323
2324 in1 = XEXP (XEXP (op1, 0), 0);
2325 in2 = XEXP (op1, 1);
2326 return simplify_gen_binary (PLUS, mode,
2327 simplify_gen_binary (MULT, mode,
2328 in1, in2),
2329 op0);
2330 }
2331
2332 /* Canonicalize (minus (neg A) (mult B C)) to
2333 (minus (mult (neg B) C) A). */
2334 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2335 && GET_CODE (op1) == MULT
2336 && GET_CODE (op0) == NEG)
2337 {
2338 rtx in1, in2;
2339
2340 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2341 in2 = XEXP (op1, 1);
2342 return simplify_gen_binary (MINUS, mode,
2343 simplify_gen_binary (MULT, mode,
2344 in1, in2),
2345 XEXP (op0, 0));
2346 }
2347
2348 /* If one of the operands is a PLUS or a MINUS, see if we can
2349 simplify this by the associative law. This will, for example,
2350 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2351 Don't use the associative law for floating point.
2352 The inaccuracy makes it nonassociative,
2353 and subtle programs can break if operations are associated. */
2354
2355 if (INTEGRAL_MODE_P (mode)
2356 && (plus_minus_operand_p (op0)
2357 || plus_minus_operand_p (op1))
2358 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2359 return tem;
2360 break;
2361
2362 case MULT:
2363 if (trueop1 == constm1_rtx)
2364 return simplify_gen_unary (NEG, mode, op0, mode);
2365
2366 if (GET_CODE (op0) == NEG)
2367 {
2368 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2369 /* If op1 is a MULT as well and simplify_unary_operation
2370 just moved the NEG to the second operand, simplify_gen_binary
2371 below could through simplify_associative_operation move
2372 the NEG around again and recurse endlessly. */
2373 if (temp
2374 && GET_CODE (op1) == MULT
2375 && GET_CODE (temp) == MULT
2376 && XEXP (op1, 0) == XEXP (temp, 0)
2377 && GET_CODE (XEXP (temp, 1)) == NEG
2378 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2379 temp = NULL_RTX;
2380 if (temp)
2381 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2382 }
2383 if (GET_CODE (op1) == NEG)
2384 {
2385 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2386 /* If op0 is a MULT as well and simplify_unary_operation
2387 just moved the NEG to the second operand, simplify_gen_binary
2388 below could through simplify_associative_operation move
2389 the NEG around again and recurse endlessly. */
2390 if (temp
2391 && GET_CODE (op0) == MULT
2392 && GET_CODE (temp) == MULT
2393 && XEXP (op0, 0) == XEXP (temp, 0)
2394 && GET_CODE (XEXP (temp, 1)) == NEG
2395 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2396 temp = NULL_RTX;
2397 if (temp)
2398 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2399 }
2400
2401 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2402 x is NaN, since x * 0 is then also NaN. Nor is it valid
2403 when the mode has signed zeros, since multiplying a negative
2404 number by 0 will give -0, not 0. */
2405 if (!HONOR_NANS (mode)
2406 && !HONOR_SIGNED_ZEROS (mode)
2407 && trueop1 == CONST0_RTX (mode)
2408 && ! side_effects_p (op0))
2409 return op1;
2410
2411 /* In IEEE floating point, x*1 is not equivalent to x for
2412 signalling NaNs. */
2413 if (!HONOR_SNANS (mode)
2414 && trueop1 == CONST1_RTX (mode))
2415 return op0;
2416
2417 /* Convert multiply by constant power of two into shift. */
2418 if (CONST_SCALAR_INT_P (trueop1))
2419 {
2420 val = wi::exact_log2 (std::make_pair (trueop1, mode));
2421 if (val >= 0)
2422 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2423 }
2424
2425 /* x*2 is x+x and x*(-1) is -x */
2426 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2427 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2428 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2429 && GET_MODE (op0) == mode)
2430 {
2431 REAL_VALUE_TYPE d;
2432 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2433
2434 if (REAL_VALUES_EQUAL (d, dconst2))
2435 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2436
2437 if (!HONOR_SNANS (mode)
2438 && REAL_VALUES_EQUAL (d, dconstm1))
2439 return simplify_gen_unary (NEG, mode, op0, mode);
2440 }
2441
2442 /* Optimize -x * -x as x * x. */
2443 if (FLOAT_MODE_P (mode)
2444 && GET_CODE (op0) == NEG
2445 && GET_CODE (op1) == NEG
2446 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2447 && !side_effects_p (XEXP (op0, 0)))
2448 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2449
2450 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2451 if (SCALAR_FLOAT_MODE_P (mode)
2452 && GET_CODE (op0) == ABS
2453 && GET_CODE (op1) == ABS
2454 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2455 && !side_effects_p (XEXP (op0, 0)))
2456 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2457
2458 /* Reassociate multiplication, but for floating point MULTs
2459 only when the user specifies unsafe math optimizations. */
2460 if (! FLOAT_MODE_P (mode)
2461 || flag_unsafe_math_optimizations)
2462 {
2463 tem = simplify_associative_operation (code, mode, op0, op1);
2464 if (tem)
2465 return tem;
2466 }
2467 break;
2468
2469 case IOR:
2470 if (trueop1 == CONST0_RTX (mode))
2471 return op0;
2472 if (INTEGRAL_MODE_P (mode)
2473 && trueop1 == CONSTM1_RTX (mode)
2474 && !side_effects_p (op0))
2475 return op1;
2476 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2477 return op0;
2478 /* A | (~A) -> -1 */
2479 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2480 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2481 && ! side_effects_p (op0)
2482 && SCALAR_INT_MODE_P (mode))
2483 return constm1_rtx;
2484
2485 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2486 if (CONST_INT_P (op1)
2487 && HWI_COMPUTABLE_MODE_P (mode)
2488 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2489 && !side_effects_p (op0))
2490 return op1;
2491
2492 /* Canonicalize (X & C1) | C2. */
2493 if (GET_CODE (op0) == AND
2494 && CONST_INT_P (trueop1)
2495 && CONST_INT_P (XEXP (op0, 1)))
2496 {
2497 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2498 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2499 HOST_WIDE_INT c2 = INTVAL (trueop1);
2500
2501 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2502 if ((c1 & c2) == c1
2503 && !side_effects_p (XEXP (op0, 0)))
2504 return trueop1;
2505
2506 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2507 if (((c1|c2) & mask) == mask)
2508 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2509
2510 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2511 if (((c1 & ~c2) & mask) != (c1 & mask))
2512 {
2513 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2514 gen_int_mode (c1 & ~c2, mode));
2515 return simplify_gen_binary (IOR, mode, tem, op1);
2516 }
2517 }
2518
2519 /* Convert (A & B) | A to A. */
2520 if (GET_CODE (op0) == AND
2521 && (rtx_equal_p (XEXP (op0, 0), op1)
2522 || rtx_equal_p (XEXP (op0, 1), op1))
2523 && ! side_effects_p (XEXP (op0, 0))
2524 && ! side_effects_p (XEXP (op0, 1)))
2525 return op1;
2526
2527 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2528 mode size to (rotate A CX). */
2529
2530 if (GET_CODE (op1) == ASHIFT
2531 || GET_CODE (op1) == SUBREG)
2532 {
2533 opleft = op1;
2534 opright = op0;
2535 }
2536 else
2537 {
2538 opright = op1;
2539 opleft = op0;
2540 }
2541
2542 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2543 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2544 && CONST_INT_P (XEXP (opleft, 1))
2545 && CONST_INT_P (XEXP (opright, 1))
2546 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2547 == GET_MODE_PRECISION (mode)))
2548 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2549
2550 /* Same, but for ashift that has been "simplified" to a wider mode
2551 by simplify_shift_const. */
2552
2553 if (GET_CODE (opleft) == SUBREG
2554 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2555 && GET_CODE (opright) == LSHIFTRT
2556 && GET_CODE (XEXP (opright, 0)) == SUBREG
2557 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2558 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2559 && (GET_MODE_SIZE (GET_MODE (opleft))
2560 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2561 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2562 SUBREG_REG (XEXP (opright, 0)))
2563 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2564 && CONST_INT_P (XEXP (opright, 1))
2565 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2566 == GET_MODE_PRECISION (mode)))
2567 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2568 XEXP (SUBREG_REG (opleft), 1));
2569
2570 /* If we have (ior (and (X C1) C2)), simplify this by making
2571 C1 as small as possible if C1 actually changes. */
2572 if (CONST_INT_P (op1)
2573 && (HWI_COMPUTABLE_MODE_P (mode)
2574 || INTVAL (op1) > 0)
2575 && GET_CODE (op0) == AND
2576 && CONST_INT_P (XEXP (op0, 1))
2577 && CONST_INT_P (op1)
2578 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2579 {
2580 rtx tmp = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2581 gen_int_mode (UINTVAL (XEXP (op0, 1))
2582 & ~UINTVAL (op1),
2583 mode));
2584 return simplify_gen_binary (IOR, mode, tmp, op1);
2585 }
2586
2587 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2588 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2589 the PLUS does not affect any of the bits in OP1: then we can do
2590 the IOR as a PLUS and we can associate. This is valid if OP1
2591 can be safely shifted left C bits. */
2592 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2593 && GET_CODE (XEXP (op0, 0)) == PLUS
2594 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2595 && CONST_INT_P (XEXP (op0, 1))
2596 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2597 {
2598 int count = INTVAL (XEXP (op0, 1));
2599 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2600
2601 if (mask >> count == INTVAL (trueop1)
2602 && trunc_int_for_mode (mask, mode) == mask
2603 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2604 return simplify_gen_binary (ASHIFTRT, mode,
2605 plus_constant (mode, XEXP (op0, 0),
2606 mask),
2607 XEXP (op0, 1));
2608 }
2609
2610 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2611 if (tem)
2612 return tem;
2613
2614 tem = simplify_associative_operation (code, mode, op0, op1);
2615 if (tem)
2616 return tem;
2617 break;
2618
2619 case XOR:
2620 if (trueop1 == CONST0_RTX (mode))
2621 return op0;
2622 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2623 return simplify_gen_unary (NOT, mode, op0, mode);
2624 if (rtx_equal_p (trueop0, trueop1)
2625 && ! side_effects_p (op0)
2626 && GET_MODE_CLASS (mode) != MODE_CC)
2627 return CONST0_RTX (mode);
2628
2629 /* Canonicalize XOR of the most significant bit to PLUS. */
2630 if (CONST_SCALAR_INT_P (op1)
2631 && mode_signbit_p (mode, op1))
2632 return simplify_gen_binary (PLUS, mode, op0, op1);
2633 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2634 if (CONST_SCALAR_INT_P (op1)
2635 && GET_CODE (op0) == PLUS
2636 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2637 && mode_signbit_p (mode, XEXP (op0, 1)))
2638 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2639 simplify_gen_binary (XOR, mode, op1,
2640 XEXP (op0, 1)));
2641
2642 /* If we are XORing two things that have no bits in common,
2643 convert them into an IOR. This helps to detect rotation encoded
2644 using those methods and possibly other simplifications. */
2645
2646 if (HWI_COMPUTABLE_MODE_P (mode)
2647 && (nonzero_bits (op0, mode)
2648 & nonzero_bits (op1, mode)) == 0)
2649 return (simplify_gen_binary (IOR, mode, op0, op1));
2650
2651 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2652 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2653 (NOT y). */
2654 {
2655 int num_negated = 0;
2656
2657 if (GET_CODE (op0) == NOT)
2658 num_negated++, op0 = XEXP (op0, 0);
2659 if (GET_CODE (op1) == NOT)
2660 num_negated++, op1 = XEXP (op1, 0);
2661
2662 if (num_negated == 2)
2663 return simplify_gen_binary (XOR, mode, op0, op1);
2664 else if (num_negated == 1)
2665 return simplify_gen_unary (NOT, mode,
2666 simplify_gen_binary (XOR, mode, op0, op1),
2667 mode);
2668 }
2669
2670 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2671 correspond to a machine insn or result in further simplifications
2672 if B is a constant. */
2673
2674 if (GET_CODE (op0) == AND
2675 && rtx_equal_p (XEXP (op0, 1), op1)
2676 && ! side_effects_p (op1))
2677 return simplify_gen_binary (AND, mode,
2678 simplify_gen_unary (NOT, mode,
2679 XEXP (op0, 0), mode),
2680 op1);
2681
2682 else if (GET_CODE (op0) == AND
2683 && rtx_equal_p (XEXP (op0, 0), op1)
2684 && ! side_effects_p (op1))
2685 return simplify_gen_binary (AND, mode,
2686 simplify_gen_unary (NOT, mode,
2687 XEXP (op0, 1), mode),
2688 op1);
2689
2690 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2691 we can transform like this:
2692 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2693 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2694 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2695 Attempt a few simplifications when B and C are both constants. */
2696 if (GET_CODE (op0) == AND
2697 && CONST_INT_P (op1)
2698 && CONST_INT_P (XEXP (op0, 1)))
2699 {
2700 rtx a = XEXP (op0, 0);
2701 rtx b = XEXP (op0, 1);
2702 rtx c = op1;
2703 HOST_WIDE_INT bval = INTVAL (b);
2704 HOST_WIDE_INT cval = INTVAL (c);
2705
2706 rtx na_c
2707 = simplify_binary_operation (AND, mode,
2708 simplify_gen_unary (NOT, mode, a, mode),
2709 c);
2710 if ((~cval & bval) == 0)
2711 {
2712 /* Try to simplify ~A&C | ~B&C. */
2713 if (na_c != NULL_RTX)
2714 return simplify_gen_binary (IOR, mode, na_c,
2715 gen_int_mode (~bval & cval, mode));
2716 }
2717 else
2718 {
2719 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2720 if (na_c == const0_rtx)
2721 {
2722 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2723 gen_int_mode (~cval & bval,
2724 mode));
2725 return simplify_gen_binary (IOR, mode, a_nc_b,
2726 gen_int_mode (~bval & cval,
2727 mode));
2728 }
2729 }
2730 }
2731
2732 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2733 comparison if STORE_FLAG_VALUE is 1. */
2734 if (STORE_FLAG_VALUE == 1
2735 && trueop1 == const1_rtx
2736 && COMPARISON_P (op0)
2737 && (reversed = reversed_comparison (op0, mode)))
2738 return reversed;
2739
2740 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2741 is (lt foo (const_int 0)), so we can perform the above
2742 simplification if STORE_FLAG_VALUE is 1. */
2743
2744 if (STORE_FLAG_VALUE == 1
2745 && trueop1 == const1_rtx
2746 && GET_CODE (op0) == LSHIFTRT
2747 && CONST_INT_P (XEXP (op0, 1))
2748 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2749 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2750
2751 /* (xor (comparison foo bar) (const_int sign-bit))
2752 when STORE_FLAG_VALUE is the sign bit. */
2753 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2754 && trueop1 == const_true_rtx
2755 && COMPARISON_P (op0)
2756 && (reversed = reversed_comparison (op0, mode)))
2757 return reversed;
2758
2759 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2760 if (tem)
2761 return tem;
2762
2763 tem = simplify_associative_operation (code, mode, op0, op1);
2764 if (tem)
2765 return tem;
2766 break;
2767
2768 case AND:
2769 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2770 return trueop1;
2771 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2772 return op0;
2773 if (HWI_COMPUTABLE_MODE_P (mode))
2774 {
2775 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2776 HOST_WIDE_INT nzop1;
2777 if (CONST_INT_P (trueop1))
2778 {
2779 HOST_WIDE_INT val1 = INTVAL (trueop1);
2780 /* If we are turning off bits already known off in OP0, we need
2781 not do an AND. */
2782 if ((nzop0 & ~val1) == 0)
2783 return op0;
2784 }
2785 nzop1 = nonzero_bits (trueop1, mode);
2786 /* If we are clearing all the nonzero bits, the result is zero. */
2787 if ((nzop1 & nzop0) == 0
2788 && !side_effects_p (op0) && !side_effects_p (op1))
2789 return CONST0_RTX (mode);
2790 }
2791 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2792 && GET_MODE_CLASS (mode) != MODE_CC)
2793 return op0;
2794 /* A & (~A) -> 0 */
2795 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2796 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2797 && ! side_effects_p (op0)
2798 && GET_MODE_CLASS (mode) != MODE_CC)
2799 return CONST0_RTX (mode);
2800
2801 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2802 there are no nonzero bits of C outside of X's mode. */
2803 if ((GET_CODE (op0) == SIGN_EXTEND
2804 || GET_CODE (op0) == ZERO_EXTEND)
2805 && CONST_INT_P (trueop1)
2806 && HWI_COMPUTABLE_MODE_P (mode)
2807 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2808 & UINTVAL (trueop1)) == 0)
2809 {
2810 machine_mode imode = GET_MODE (XEXP (op0, 0));
2811 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2812 gen_int_mode (INTVAL (trueop1),
2813 imode));
2814 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2815 }
2816
2817 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2818 we might be able to further simplify the AND with X and potentially
2819 remove the truncation altogether. */
2820 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2821 {
2822 rtx x = XEXP (op0, 0);
2823 machine_mode xmode = GET_MODE (x);
2824 tem = simplify_gen_binary (AND, xmode, x,
2825 gen_int_mode (INTVAL (trueop1), xmode));
2826 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2827 }
2828
2829 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2830 if (GET_CODE (op0) == IOR
2831 && CONST_INT_P (trueop1)
2832 && CONST_INT_P (XEXP (op0, 1)))
2833 {
2834 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2835 return simplify_gen_binary (IOR, mode,
2836 simplify_gen_binary (AND, mode,
2837 XEXP (op0, 0), op1),
2838 gen_int_mode (tmp, mode));
2839 }
2840
2841 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2842 insn (and may simplify more). */
2843 if (GET_CODE (op0) == XOR
2844 && rtx_equal_p (XEXP (op0, 0), op1)
2845 && ! side_effects_p (op1))
2846 return simplify_gen_binary (AND, mode,
2847 simplify_gen_unary (NOT, mode,
2848 XEXP (op0, 1), mode),
2849 op1);
2850
2851 if (GET_CODE (op0) == XOR
2852 && rtx_equal_p (XEXP (op0, 1), op1)
2853 && ! side_effects_p (op1))
2854 return simplify_gen_binary (AND, mode,
2855 simplify_gen_unary (NOT, mode,
2856 XEXP (op0, 0), mode),
2857 op1);
2858
2859 /* Similarly for (~(A ^ B)) & A. */
2860 if (GET_CODE (op0) == NOT
2861 && GET_CODE (XEXP (op0, 0)) == XOR
2862 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2863 && ! side_effects_p (op1))
2864 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2865
2866 if (GET_CODE (op0) == NOT
2867 && GET_CODE (XEXP (op0, 0)) == XOR
2868 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2869 && ! side_effects_p (op1))
2870 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2871
2872 /* Convert (A | B) & A to A. */
2873 if (GET_CODE (op0) == IOR
2874 && (rtx_equal_p (XEXP (op0, 0), op1)
2875 || rtx_equal_p (XEXP (op0, 1), op1))
2876 && ! side_effects_p (XEXP (op0, 0))
2877 && ! side_effects_p (XEXP (op0, 1)))
2878 return op1;
2879
2880 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2881 ((A & N) + B) & M -> (A + B) & M
2882 Similarly if (N & M) == 0,
2883 ((A | N) + B) & M -> (A + B) & M
2884 and for - instead of + and/or ^ instead of |.
2885 Also, if (N & M) == 0, then
2886 (A +- N) & M -> A & M. */
2887 if (CONST_INT_P (trueop1)
2888 && HWI_COMPUTABLE_MODE_P (mode)
2889 && ~UINTVAL (trueop1)
2890 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
2891 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2892 {
2893 rtx pmop[2];
2894 int which;
2895
2896 pmop[0] = XEXP (op0, 0);
2897 pmop[1] = XEXP (op0, 1);
2898
2899 if (CONST_INT_P (pmop[1])
2900 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
2901 return simplify_gen_binary (AND, mode, pmop[0], op1);
2902
2903 for (which = 0; which < 2; which++)
2904 {
2905 tem = pmop[which];
2906 switch (GET_CODE (tem))
2907 {
2908 case AND:
2909 if (CONST_INT_P (XEXP (tem, 1))
2910 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
2911 == UINTVAL (trueop1))
2912 pmop[which] = XEXP (tem, 0);
2913 break;
2914 case IOR:
2915 case XOR:
2916 if (CONST_INT_P (XEXP (tem, 1))
2917 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
2918 pmop[which] = XEXP (tem, 0);
2919 break;
2920 default:
2921 break;
2922 }
2923 }
2924
2925 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2926 {
2927 tem = simplify_gen_binary (GET_CODE (op0), mode,
2928 pmop[0], pmop[1]);
2929 return simplify_gen_binary (code, mode, tem, op1);
2930 }
2931 }
2932
2933 /* (and X (ior (not X) Y) -> (and X Y) */
2934 if (GET_CODE (op1) == IOR
2935 && GET_CODE (XEXP (op1, 0)) == NOT
2936 && op0 == XEXP (XEXP (op1, 0), 0))
2937 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2938
2939 /* (and (ior (not X) Y) X) -> (and X Y) */
2940 if (GET_CODE (op0) == IOR
2941 && GET_CODE (XEXP (op0, 0)) == NOT
2942 && op1 == XEXP (XEXP (op0, 0), 0))
2943 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2944
2945 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2946 if (tem)
2947 return tem;
2948
2949 tem = simplify_associative_operation (code, mode, op0, op1);
2950 if (tem)
2951 return tem;
2952 break;
2953
2954 case UDIV:
2955 /* 0/x is 0 (or x&0 if x has side-effects). */
2956 if (trueop0 == CONST0_RTX (mode))
2957 {
2958 if (side_effects_p (op1))
2959 return simplify_gen_binary (AND, mode, op1, trueop0);
2960 return trueop0;
2961 }
2962 /* x/1 is x. */
2963 if (trueop1 == CONST1_RTX (mode))
2964 {
2965 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2966 if (tem)
2967 return tem;
2968 }
2969 /* Convert divide by power of two into shift. */
2970 if (CONST_INT_P (trueop1)
2971 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
2972 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2973 break;
2974
2975 case DIV:
2976 /* Handle floating point and integers separately. */
2977 if (SCALAR_FLOAT_MODE_P (mode))
2978 {
2979 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2980 safe for modes with NaNs, since 0.0 / 0.0 will then be
2981 NaN rather than 0.0. Nor is it safe for modes with signed
2982 zeros, since dividing 0 by a negative number gives -0.0 */
2983 if (trueop0 == CONST0_RTX (mode)
2984 && !HONOR_NANS (mode)
2985 && !HONOR_SIGNED_ZEROS (mode)
2986 && ! side_effects_p (op1))
2987 return op0;
2988 /* x/1.0 is x. */
2989 if (trueop1 == CONST1_RTX (mode)
2990 && !HONOR_SNANS (mode))
2991 return op0;
2992
2993 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2994 && trueop1 != CONST0_RTX (mode))
2995 {
2996 REAL_VALUE_TYPE d;
2997 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2998
2999 /* x/-1.0 is -x. */
3000 if (REAL_VALUES_EQUAL (d, dconstm1)
3001 && !HONOR_SNANS (mode))
3002 return simplify_gen_unary (NEG, mode, op0, mode);
3003
3004 /* Change FP division by a constant into multiplication.
3005 Only do this with -freciprocal-math. */
3006 if (flag_reciprocal_math
3007 && !REAL_VALUES_EQUAL (d, dconst0))
3008 {
3009 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
3010 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3011 return simplify_gen_binary (MULT, mode, op0, tem);
3012 }
3013 }
3014 }
3015 else if (SCALAR_INT_MODE_P (mode))
3016 {
3017 /* 0/x is 0 (or x&0 if x has side-effects). */
3018 if (trueop0 == CONST0_RTX (mode)
3019 && !cfun->can_throw_non_call_exceptions)
3020 {
3021 if (side_effects_p (op1))
3022 return simplify_gen_binary (AND, mode, op1, trueop0);
3023 return trueop0;
3024 }
3025 /* x/1 is x. */
3026 if (trueop1 == CONST1_RTX (mode))
3027 {
3028 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3029 if (tem)
3030 return tem;
3031 }
3032 /* x/-1 is -x. */
3033 if (trueop1 == constm1_rtx)
3034 {
3035 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3036 if (x)
3037 return simplify_gen_unary (NEG, mode, x, mode);
3038 }
3039 }
3040 break;
3041
3042 case UMOD:
3043 /* 0%x is 0 (or x&0 if x has side-effects). */
3044 if (trueop0 == CONST0_RTX (mode))
3045 {
3046 if (side_effects_p (op1))
3047 return simplify_gen_binary (AND, mode, op1, trueop0);
3048 return trueop0;
3049 }
3050 /* x%1 is 0 (of x&0 if x has side-effects). */
3051 if (trueop1 == CONST1_RTX (mode))
3052 {
3053 if (side_effects_p (op0))
3054 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3055 return CONST0_RTX (mode);
3056 }
3057 /* Implement modulus by power of two as AND. */
3058 if (CONST_INT_P (trueop1)
3059 && exact_log2 (UINTVAL (trueop1)) > 0)
3060 return simplify_gen_binary (AND, mode, op0,
3061 gen_int_mode (INTVAL (op1) - 1, mode));
3062 break;
3063
3064 case MOD:
3065 /* 0%x is 0 (or x&0 if x has side-effects). */
3066 if (trueop0 == CONST0_RTX (mode))
3067 {
3068 if (side_effects_p (op1))
3069 return simplify_gen_binary (AND, mode, op1, trueop0);
3070 return trueop0;
3071 }
3072 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3073 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3074 {
3075 if (side_effects_p (op0))
3076 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3077 return CONST0_RTX (mode);
3078 }
3079 break;
3080
3081 case ROTATERT:
3082 case ROTATE:
3083 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3084 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3085 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3086 amount instead. */
3087 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3088 if (CONST_INT_P (trueop1)
3089 && IN_RANGE (INTVAL (trueop1),
3090 GET_MODE_PRECISION (mode) / 2 + (code == ROTATE),
3091 GET_MODE_PRECISION (mode) - 1))
3092 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3093 mode, op0, GEN_INT (GET_MODE_PRECISION (mode)
3094 - INTVAL (trueop1)));
3095 #endif
3096 /* FALLTHRU */
3097 case ASHIFTRT:
3098 if (trueop1 == CONST0_RTX (mode))
3099 return op0;
3100 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3101 return op0;
3102 /* Rotating ~0 always results in ~0. */
3103 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3104 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3105 && ! side_effects_p (op1))
3106 return op0;
3107 canonicalize_shift:
3108 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3109 {
3110 val = INTVAL (op1) & (GET_MODE_PRECISION (mode) - 1);
3111 if (val != INTVAL (op1))
3112 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3113 }
3114 break;
3115
3116 case ASHIFT:
3117 case SS_ASHIFT:
3118 case US_ASHIFT:
3119 if (trueop1 == CONST0_RTX (mode))
3120 return op0;
3121 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3122 return op0;
3123 goto canonicalize_shift;
3124
3125 case LSHIFTRT:
3126 if (trueop1 == CONST0_RTX (mode))
3127 return op0;
3128 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3129 return op0;
3130 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3131 if (GET_CODE (op0) == CLZ
3132 && CONST_INT_P (trueop1)
3133 && STORE_FLAG_VALUE == 1
3134 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3135 {
3136 machine_mode imode = GET_MODE (XEXP (op0, 0));
3137 unsigned HOST_WIDE_INT zero_val = 0;
3138
3139 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3140 && zero_val == GET_MODE_PRECISION (imode)
3141 && INTVAL (trueop1) == exact_log2 (zero_val))
3142 return simplify_gen_relational (EQ, mode, imode,
3143 XEXP (op0, 0), const0_rtx);
3144 }
3145 goto canonicalize_shift;
3146
3147 case SMIN:
3148 if (width <= HOST_BITS_PER_WIDE_INT
3149 && mode_signbit_p (mode, trueop1)
3150 && ! side_effects_p (op0))
3151 return op1;
3152 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3153 return op0;
3154 tem = simplify_associative_operation (code, mode, op0, op1);
3155 if (tem)
3156 return tem;
3157 break;
3158
3159 case SMAX:
3160 if (width <= HOST_BITS_PER_WIDE_INT
3161 && CONST_INT_P (trueop1)
3162 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3163 && ! side_effects_p (op0))
3164 return op1;
3165 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3166 return op0;
3167 tem = simplify_associative_operation (code, mode, op0, op1);
3168 if (tem)
3169 return tem;
3170 break;
3171
3172 case UMIN:
3173 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3174 return op1;
3175 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3176 return op0;
3177 tem = simplify_associative_operation (code, mode, op0, op1);
3178 if (tem)
3179 return tem;
3180 break;
3181
3182 case UMAX:
3183 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3184 return op1;
3185 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3186 return op0;
3187 tem = simplify_associative_operation (code, mode, op0, op1);
3188 if (tem)
3189 return tem;
3190 break;
3191
3192 case SS_PLUS:
3193 case US_PLUS:
3194 case SS_MINUS:
3195 case US_MINUS:
3196 case SS_MULT:
3197 case US_MULT:
3198 case SS_DIV:
3199 case US_DIV:
3200 /* ??? There are simplifications that can be done. */
3201 return 0;
3202
3203 case VEC_SELECT:
3204 if (!VECTOR_MODE_P (mode))
3205 {
3206 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3207 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3208 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3209 gcc_assert (XVECLEN (trueop1, 0) == 1);
3210 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3211
3212 if (GET_CODE (trueop0) == CONST_VECTOR)
3213 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3214 (trueop1, 0, 0)));
3215
3216 /* Extract a scalar element from a nested VEC_SELECT expression
3217 (with optional nested VEC_CONCAT expression). Some targets
3218 (i386) extract scalar element from a vector using chain of
3219 nested VEC_SELECT expressions. When input operand is a memory
3220 operand, this operation can be simplified to a simple scalar
3221 load from an offseted memory address. */
3222 if (GET_CODE (trueop0) == VEC_SELECT)
3223 {
3224 rtx op0 = XEXP (trueop0, 0);
3225 rtx op1 = XEXP (trueop0, 1);
3226
3227 machine_mode opmode = GET_MODE (op0);
3228 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3229 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3230
3231 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3232 int elem;
3233
3234 rtvec vec;
3235 rtx tmp_op, tmp;
3236
3237 gcc_assert (GET_CODE (op1) == PARALLEL);
3238 gcc_assert (i < n_elts);
3239
3240 /* Select element, pointed by nested selector. */
3241 elem = INTVAL (XVECEXP (op1, 0, i));
3242
3243 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3244 if (GET_CODE (op0) == VEC_CONCAT)
3245 {
3246 rtx op00 = XEXP (op0, 0);
3247 rtx op01 = XEXP (op0, 1);
3248
3249 machine_mode mode00, mode01;
3250 int n_elts00, n_elts01;
3251
3252 mode00 = GET_MODE (op00);
3253 mode01 = GET_MODE (op01);
3254
3255 /* Find out number of elements of each operand. */
3256 if (VECTOR_MODE_P (mode00))
3257 {
3258 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3259 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3260 }
3261 else
3262 n_elts00 = 1;
3263
3264 if (VECTOR_MODE_P (mode01))
3265 {
3266 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3267 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3268 }
3269 else
3270 n_elts01 = 1;
3271
3272 gcc_assert (n_elts == n_elts00 + n_elts01);
3273
3274 /* Select correct operand of VEC_CONCAT
3275 and adjust selector. */
3276 if (elem < n_elts01)
3277 tmp_op = op00;
3278 else
3279 {
3280 tmp_op = op01;
3281 elem -= n_elts00;
3282 }
3283 }
3284 else
3285 tmp_op = op0;
3286
3287 vec = rtvec_alloc (1);
3288 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3289
3290 tmp = gen_rtx_fmt_ee (code, mode,
3291 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3292 return tmp;
3293 }
3294 if (GET_CODE (trueop0) == VEC_DUPLICATE
3295 && GET_MODE (XEXP (trueop0, 0)) == mode)
3296 return XEXP (trueop0, 0);
3297 }
3298 else
3299 {
3300 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3301 gcc_assert (GET_MODE_INNER (mode)
3302 == GET_MODE_INNER (GET_MODE (trueop0)));
3303 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3304
3305 if (GET_CODE (trueop0) == CONST_VECTOR)
3306 {
3307 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3308 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3309 rtvec v = rtvec_alloc (n_elts);
3310 unsigned int i;
3311
3312 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3313 for (i = 0; i < n_elts; i++)
3314 {
3315 rtx x = XVECEXP (trueop1, 0, i);
3316
3317 gcc_assert (CONST_INT_P (x));
3318 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3319 INTVAL (x));
3320 }
3321
3322 return gen_rtx_CONST_VECTOR (mode, v);
3323 }
3324
3325 /* Recognize the identity. */
3326 if (GET_MODE (trueop0) == mode)
3327 {
3328 bool maybe_ident = true;
3329 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3330 {
3331 rtx j = XVECEXP (trueop1, 0, i);
3332 if (!CONST_INT_P (j) || INTVAL (j) != i)
3333 {
3334 maybe_ident = false;
3335 break;
3336 }
3337 }
3338 if (maybe_ident)
3339 return trueop0;
3340 }
3341
3342 /* If we build {a,b} then permute it, build the result directly. */
3343 if (XVECLEN (trueop1, 0) == 2
3344 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3345 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3346 && GET_CODE (trueop0) == VEC_CONCAT
3347 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3348 && GET_MODE (XEXP (trueop0, 0)) == mode
3349 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3350 && GET_MODE (XEXP (trueop0, 1)) == mode)
3351 {
3352 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3353 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3354 rtx subop0, subop1;
3355
3356 gcc_assert (i0 < 4 && i1 < 4);
3357 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3358 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3359
3360 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3361 }
3362
3363 if (XVECLEN (trueop1, 0) == 2
3364 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3365 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3366 && GET_CODE (trueop0) == VEC_CONCAT
3367 && GET_MODE (trueop0) == mode)
3368 {
3369 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3370 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3371 rtx subop0, subop1;
3372
3373 gcc_assert (i0 < 2 && i1 < 2);
3374 subop0 = XEXP (trueop0, i0);
3375 subop1 = XEXP (trueop0, i1);
3376
3377 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3378 }
3379
3380 /* If we select one half of a vec_concat, return that. */
3381 if (GET_CODE (trueop0) == VEC_CONCAT
3382 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3383 {
3384 rtx subop0 = XEXP (trueop0, 0);
3385 rtx subop1 = XEXP (trueop0, 1);
3386 machine_mode mode0 = GET_MODE (subop0);
3387 machine_mode mode1 = GET_MODE (subop1);
3388 int li = GET_MODE_SIZE (GET_MODE_INNER (mode0));
3389 int l0 = GET_MODE_SIZE (mode0) / li;
3390 int l1 = GET_MODE_SIZE (mode1) / li;
3391 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3392 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3393 {
3394 bool success = true;
3395 for (int i = 1; i < l0; ++i)
3396 {
3397 rtx j = XVECEXP (trueop1, 0, i);
3398 if (!CONST_INT_P (j) || INTVAL (j) != i)
3399 {
3400 success = false;
3401 break;
3402 }
3403 }
3404 if (success)
3405 return subop0;
3406 }
3407 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3408 {
3409 bool success = true;
3410 for (int i = 1; i < l1; ++i)
3411 {
3412 rtx j = XVECEXP (trueop1, 0, i);
3413 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3414 {
3415 success = false;
3416 break;
3417 }
3418 }
3419 if (success)
3420 return subop1;
3421 }
3422 }
3423 }
3424
3425 if (XVECLEN (trueop1, 0) == 1
3426 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3427 && GET_CODE (trueop0) == VEC_CONCAT)
3428 {
3429 rtx vec = trueop0;
3430 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3431
3432 /* Try to find the element in the VEC_CONCAT. */
3433 while (GET_MODE (vec) != mode
3434 && GET_CODE (vec) == VEC_CONCAT)
3435 {
3436 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3437 if (offset < vec_size)
3438 vec = XEXP (vec, 0);
3439 else
3440 {
3441 offset -= vec_size;
3442 vec = XEXP (vec, 1);
3443 }
3444 vec = avoid_constant_pool_reference (vec);
3445 }
3446
3447 if (GET_MODE (vec) == mode)
3448 return vec;
3449 }
3450
3451 /* If we select elements in a vec_merge that all come from the same
3452 operand, select from that operand directly. */
3453 if (GET_CODE (op0) == VEC_MERGE)
3454 {
3455 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3456 if (CONST_INT_P (trueop02))
3457 {
3458 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3459 bool all_operand0 = true;
3460 bool all_operand1 = true;
3461 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3462 {
3463 rtx j = XVECEXP (trueop1, 0, i);
3464 if (sel & (1 << UINTVAL (j)))
3465 all_operand1 = false;
3466 else
3467 all_operand0 = false;
3468 }
3469 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3470 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3471 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3472 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3473 }
3474 }
3475
3476 /* If we have two nested selects that are inverses of each
3477 other, replace them with the source operand. */
3478 if (GET_CODE (trueop0) == VEC_SELECT
3479 && GET_MODE (XEXP (trueop0, 0)) == mode)
3480 {
3481 rtx op0_subop1 = XEXP (trueop0, 1);
3482 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3483 gcc_assert (XVECLEN (trueop1, 0) == GET_MODE_NUNITS (mode));
3484
3485 /* Apply the outer ordering vector to the inner one. (The inner
3486 ordering vector is expressly permitted to be of a different
3487 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3488 then the two VEC_SELECTs cancel. */
3489 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3490 {
3491 rtx x = XVECEXP (trueop1, 0, i);
3492 if (!CONST_INT_P (x))
3493 return 0;
3494 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3495 if (!CONST_INT_P (y) || i != INTVAL (y))
3496 return 0;
3497 }
3498 return XEXP (trueop0, 0);
3499 }
3500
3501 return 0;
3502 case VEC_CONCAT:
3503 {
3504 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3505 ? GET_MODE (trueop0)
3506 : GET_MODE_INNER (mode));
3507 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3508 ? GET_MODE (trueop1)
3509 : GET_MODE_INNER (mode));
3510
3511 gcc_assert (VECTOR_MODE_P (mode));
3512 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3513 == GET_MODE_SIZE (mode));
3514
3515 if (VECTOR_MODE_P (op0_mode))
3516 gcc_assert (GET_MODE_INNER (mode)
3517 == GET_MODE_INNER (op0_mode));
3518 else
3519 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3520
3521 if (VECTOR_MODE_P (op1_mode))
3522 gcc_assert (GET_MODE_INNER (mode)
3523 == GET_MODE_INNER (op1_mode));
3524 else
3525 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3526
3527 if ((GET_CODE (trueop0) == CONST_VECTOR
3528 || CONST_SCALAR_INT_P (trueop0)
3529 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3530 && (GET_CODE (trueop1) == CONST_VECTOR
3531 || CONST_SCALAR_INT_P (trueop1)
3532 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3533 {
3534 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3535 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3536 rtvec v = rtvec_alloc (n_elts);
3537 unsigned int i;
3538 unsigned in_n_elts = 1;
3539
3540 if (VECTOR_MODE_P (op0_mode))
3541 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3542 for (i = 0; i < n_elts; i++)
3543 {
3544 if (i < in_n_elts)
3545 {
3546 if (!VECTOR_MODE_P (op0_mode))
3547 RTVEC_ELT (v, i) = trueop0;
3548 else
3549 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3550 }
3551 else
3552 {
3553 if (!VECTOR_MODE_P (op1_mode))
3554 RTVEC_ELT (v, i) = trueop1;
3555 else
3556 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3557 i - in_n_elts);
3558 }
3559 }
3560
3561 return gen_rtx_CONST_VECTOR (mode, v);
3562 }
3563
3564 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3565 Restrict the transformation to avoid generating a VEC_SELECT with a
3566 mode unrelated to its operand. */
3567 if (GET_CODE (trueop0) == VEC_SELECT
3568 && GET_CODE (trueop1) == VEC_SELECT
3569 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3570 && GET_MODE (XEXP (trueop0, 0)) == mode)
3571 {
3572 rtx par0 = XEXP (trueop0, 1);
3573 rtx par1 = XEXP (trueop1, 1);
3574 int len0 = XVECLEN (par0, 0);
3575 int len1 = XVECLEN (par1, 0);
3576 rtvec vec = rtvec_alloc (len0 + len1);
3577 for (int i = 0; i < len0; i++)
3578 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3579 for (int i = 0; i < len1; i++)
3580 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3581 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3582 gen_rtx_PARALLEL (VOIDmode, vec));
3583 }
3584 }
3585 return 0;
3586
3587 default:
3588 gcc_unreachable ();
3589 }
3590
3591 return 0;
3592 }
3593
3594 rtx
3595 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
3596 rtx op0, rtx op1)
3597 {
3598 unsigned int width = GET_MODE_PRECISION (mode);
3599
3600 if (VECTOR_MODE_P (mode)
3601 && code != VEC_CONCAT
3602 && GET_CODE (op0) == CONST_VECTOR
3603 && GET_CODE (op1) == CONST_VECTOR)
3604 {
3605 unsigned n_elts = GET_MODE_NUNITS (mode);
3606 machine_mode op0mode = GET_MODE (op0);
3607 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3608 machine_mode op1mode = GET_MODE (op1);
3609 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3610 rtvec v = rtvec_alloc (n_elts);
3611 unsigned int i;
3612
3613 gcc_assert (op0_n_elts == n_elts);
3614 gcc_assert (op1_n_elts == n_elts);
3615 for (i = 0; i < n_elts; i++)
3616 {
3617 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3618 CONST_VECTOR_ELT (op0, i),
3619 CONST_VECTOR_ELT (op1, i));
3620 if (!x)
3621 return 0;
3622 RTVEC_ELT (v, i) = x;
3623 }
3624
3625 return gen_rtx_CONST_VECTOR (mode, v);
3626 }
3627
3628 if (VECTOR_MODE_P (mode)
3629 && code == VEC_CONCAT
3630 && (CONST_SCALAR_INT_P (op0)
3631 || GET_CODE (op0) == CONST_FIXED
3632 || CONST_DOUBLE_AS_FLOAT_P (op0))
3633 && (CONST_SCALAR_INT_P (op1)
3634 || CONST_DOUBLE_AS_FLOAT_P (op1)
3635 || GET_CODE (op1) == CONST_FIXED))
3636 {
3637 unsigned n_elts = GET_MODE_NUNITS (mode);
3638 rtvec v = rtvec_alloc (n_elts);
3639
3640 gcc_assert (n_elts >= 2);
3641 if (n_elts == 2)
3642 {
3643 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3644 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3645
3646 RTVEC_ELT (v, 0) = op0;
3647 RTVEC_ELT (v, 1) = op1;
3648 }
3649 else
3650 {
3651 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3652 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3653 unsigned i;
3654
3655 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3656 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3657 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3658
3659 for (i = 0; i < op0_n_elts; ++i)
3660 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3661 for (i = 0; i < op1_n_elts; ++i)
3662 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3663 }
3664
3665 return gen_rtx_CONST_VECTOR (mode, v);
3666 }
3667
3668 if (SCALAR_FLOAT_MODE_P (mode)
3669 && CONST_DOUBLE_AS_FLOAT_P (op0)
3670 && CONST_DOUBLE_AS_FLOAT_P (op1)
3671 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3672 {
3673 if (code == AND
3674 || code == IOR
3675 || code == XOR)
3676 {
3677 long tmp0[4];
3678 long tmp1[4];
3679 REAL_VALUE_TYPE r;
3680 int i;
3681
3682 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3683 GET_MODE (op0));
3684 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3685 GET_MODE (op1));
3686 for (i = 0; i < 4; i++)
3687 {
3688 switch (code)
3689 {
3690 case AND:
3691 tmp0[i] &= tmp1[i];
3692 break;
3693 case IOR:
3694 tmp0[i] |= tmp1[i];
3695 break;
3696 case XOR:
3697 tmp0[i] ^= tmp1[i];
3698 break;
3699 default:
3700 gcc_unreachable ();
3701 }
3702 }
3703 real_from_target (&r, tmp0, mode);
3704 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3705 }
3706 else
3707 {
3708 REAL_VALUE_TYPE f0, f1, value, result;
3709 bool inexact;
3710
3711 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3712 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3713 real_convert (&f0, mode, &f0);
3714 real_convert (&f1, mode, &f1);
3715
3716 if (HONOR_SNANS (mode)
3717 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3718 return 0;
3719
3720 if (code == DIV
3721 && REAL_VALUES_EQUAL (f1, dconst0)
3722 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3723 return 0;
3724
3725 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3726 && flag_trapping_math
3727 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3728 {
3729 int s0 = REAL_VALUE_NEGATIVE (f0);
3730 int s1 = REAL_VALUE_NEGATIVE (f1);
3731
3732 switch (code)
3733 {
3734 case PLUS:
3735 /* Inf + -Inf = NaN plus exception. */
3736 if (s0 != s1)
3737 return 0;
3738 break;
3739 case MINUS:
3740 /* Inf - Inf = NaN plus exception. */
3741 if (s0 == s1)
3742 return 0;
3743 break;
3744 case DIV:
3745 /* Inf / Inf = NaN plus exception. */
3746 return 0;
3747 default:
3748 break;
3749 }
3750 }
3751
3752 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3753 && flag_trapping_math
3754 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3755 || (REAL_VALUE_ISINF (f1)
3756 && REAL_VALUES_EQUAL (f0, dconst0))))
3757 /* Inf * 0 = NaN plus exception. */
3758 return 0;
3759
3760 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3761 &f0, &f1);
3762 real_convert (&result, mode, &value);
3763
3764 /* Don't constant fold this floating point operation if
3765 the result has overflowed and flag_trapping_math. */
3766
3767 if (flag_trapping_math
3768 && MODE_HAS_INFINITIES (mode)
3769 && REAL_VALUE_ISINF (result)
3770 && !REAL_VALUE_ISINF (f0)
3771 && !REAL_VALUE_ISINF (f1))
3772 /* Overflow plus exception. */
3773 return 0;
3774
3775 /* Don't constant fold this floating point operation if the
3776 result may dependent upon the run-time rounding mode and
3777 flag_rounding_math is set, or if GCC's software emulation
3778 is unable to accurately represent the result. */
3779
3780 if ((flag_rounding_math
3781 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3782 && (inexact || !real_identical (&result, &value)))
3783 return NULL_RTX;
3784
3785 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3786 }
3787 }
3788
3789 /* We can fold some multi-word operations. */
3790 if ((GET_MODE_CLASS (mode) == MODE_INT
3791 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
3792 && CONST_SCALAR_INT_P (op0)
3793 && CONST_SCALAR_INT_P (op1))
3794 {
3795 wide_int result;
3796 bool overflow;
3797 rtx_mode_t pop0 = std::make_pair (op0, mode);
3798 rtx_mode_t pop1 = std::make_pair (op1, mode);
3799
3800 #if TARGET_SUPPORTS_WIDE_INT == 0
3801 /* This assert keeps the simplification from producing a result
3802 that cannot be represented in a CONST_DOUBLE but a lot of
3803 upstream callers expect that this function never fails to
3804 simplify something and so you if you added this to the test
3805 above the code would die later anyway. If this assert
3806 happens, you just need to make the port support wide int. */
3807 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
3808 #endif
3809 switch (code)
3810 {
3811 case MINUS:
3812 result = wi::sub (pop0, pop1);
3813 break;
3814
3815 case PLUS:
3816 result = wi::add (pop0, pop1);
3817 break;
3818
3819 case MULT:
3820 result = wi::mul (pop0, pop1);
3821 break;
3822
3823 case DIV:
3824 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
3825 if (overflow)
3826 return NULL_RTX;
3827 break;
3828
3829 case MOD:
3830 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
3831 if (overflow)
3832 return NULL_RTX;
3833 break;
3834
3835 case UDIV:
3836 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
3837 if (overflow)
3838 return NULL_RTX;
3839 break;
3840
3841 case UMOD:
3842 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
3843 if (overflow)
3844 return NULL_RTX;
3845 break;
3846
3847 case AND:
3848 result = wi::bit_and (pop0, pop1);
3849 break;
3850
3851 case IOR:
3852 result = wi::bit_or (pop0, pop1);
3853 break;
3854
3855 case XOR:
3856 result = wi::bit_xor (pop0, pop1);
3857 break;
3858
3859 case SMIN:
3860 result = wi::smin (pop0, pop1);
3861 break;
3862
3863 case SMAX:
3864 result = wi::smax (pop0, pop1);
3865 break;
3866
3867 case UMIN:
3868 result = wi::umin (pop0, pop1);
3869 break;
3870
3871 case UMAX:
3872 result = wi::umax (pop0, pop1);
3873 break;
3874
3875 case LSHIFTRT:
3876 case ASHIFTRT:
3877 case ASHIFT:
3878 {
3879 wide_int wop1 = pop1;
3880 if (SHIFT_COUNT_TRUNCATED)
3881 wop1 = wi::umod_trunc (wop1, width);
3882 else if (wi::geu_p (wop1, width))
3883 return NULL_RTX;
3884
3885 switch (code)
3886 {
3887 case LSHIFTRT:
3888 result = wi::lrshift (pop0, wop1);
3889 break;
3890
3891 case ASHIFTRT:
3892 result = wi::arshift (pop0, wop1);
3893 break;
3894
3895 case ASHIFT:
3896 result = wi::lshift (pop0, wop1);
3897 break;
3898
3899 default:
3900 gcc_unreachable ();
3901 }
3902 break;
3903 }
3904 case ROTATE:
3905 case ROTATERT:
3906 {
3907 if (wi::neg_p (pop1))
3908 return NULL_RTX;
3909
3910 switch (code)
3911 {
3912 case ROTATE:
3913 result = wi::lrotate (pop0, pop1);
3914 break;
3915
3916 case ROTATERT:
3917 result = wi::rrotate (pop0, pop1);
3918 break;
3919
3920 default:
3921 gcc_unreachable ();
3922 }
3923 break;
3924 }
3925 default:
3926 return NULL_RTX;
3927 }
3928 return immed_wide_int_const (result, mode);
3929 }
3930
3931 return NULL_RTX;
3932 }
3933
3934
3935 \f
3936 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3937 PLUS or MINUS.
3938
3939 Rather than test for specific case, we do this by a brute-force method
3940 and do all possible simplifications until no more changes occur. Then
3941 we rebuild the operation. */
3942
3943 struct simplify_plus_minus_op_data
3944 {
3945 rtx op;
3946 short neg;
3947 };
3948
3949 static bool
3950 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3951 {
3952 int result;
3953
3954 result = (commutative_operand_precedence (y)
3955 - commutative_operand_precedence (x));
3956 if (result)
3957 return result > 0;
3958
3959 /* Group together equal REGs to do more simplification. */
3960 if (REG_P (x) && REG_P (y))
3961 return REGNO (x) > REGNO (y);
3962 else
3963 return false;
3964 }
3965
3966 static rtx
3967 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
3968 rtx op1)
3969 {
3970 struct simplify_plus_minus_op_data ops[16];
3971 rtx result, tem;
3972 int n_ops = 2;
3973 int changed, n_constants, canonicalized = 0;
3974 int i, j;
3975
3976 memset (ops, 0, sizeof ops);
3977
3978 /* Set up the two operands and then expand them until nothing has been
3979 changed. If we run out of room in our array, give up; this should
3980 almost never happen. */
3981
3982 ops[0].op = op0;
3983 ops[0].neg = 0;
3984 ops[1].op = op1;
3985 ops[1].neg = (code == MINUS);
3986
3987 do
3988 {
3989 changed = 0;
3990 n_constants = 0;
3991
3992 for (i = 0; i < n_ops; i++)
3993 {
3994 rtx this_op = ops[i].op;
3995 int this_neg = ops[i].neg;
3996 enum rtx_code this_code = GET_CODE (this_op);
3997
3998 switch (this_code)
3999 {
4000 case PLUS:
4001 case MINUS:
4002 if (n_ops == ARRAY_SIZE (ops))
4003 return NULL_RTX;
4004
4005 ops[n_ops].op = XEXP (this_op, 1);
4006 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4007 n_ops++;
4008
4009 ops[i].op = XEXP (this_op, 0);
4010 changed = 1;
4011 canonicalized |= this_neg || i != n_ops - 2;
4012 break;
4013
4014 case NEG:
4015 ops[i].op = XEXP (this_op, 0);
4016 ops[i].neg = ! this_neg;
4017 changed = 1;
4018 canonicalized = 1;
4019 break;
4020
4021 case CONST:
4022 if (n_ops != ARRAY_SIZE (ops)
4023 && GET_CODE (XEXP (this_op, 0)) == PLUS
4024 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4025 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4026 {
4027 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4028 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4029 ops[n_ops].neg = this_neg;
4030 n_ops++;
4031 changed = 1;
4032 canonicalized = 1;
4033 }
4034 break;
4035
4036 case NOT:
4037 /* ~a -> (-a - 1) */
4038 if (n_ops != ARRAY_SIZE (ops))
4039 {
4040 ops[n_ops].op = CONSTM1_RTX (mode);
4041 ops[n_ops++].neg = this_neg;
4042 ops[i].op = XEXP (this_op, 0);
4043 ops[i].neg = !this_neg;
4044 changed = 1;
4045 canonicalized = 1;
4046 }
4047 break;
4048
4049 case CONST_INT:
4050 n_constants++;
4051 if (this_neg)
4052 {
4053 ops[i].op = neg_const_int (mode, this_op);
4054 ops[i].neg = 0;
4055 changed = 1;
4056 canonicalized = 1;
4057 }
4058 break;
4059
4060 default:
4061 break;
4062 }
4063 }
4064 }
4065 while (changed);
4066
4067 if (n_constants > 1)
4068 canonicalized = 1;
4069
4070 gcc_assert (n_ops >= 2);
4071
4072 /* If we only have two operands, we can avoid the loops. */
4073 if (n_ops == 2)
4074 {
4075 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4076 rtx lhs, rhs;
4077
4078 /* Get the two operands. Be careful with the order, especially for
4079 the cases where code == MINUS. */
4080 if (ops[0].neg && ops[1].neg)
4081 {
4082 lhs = gen_rtx_NEG (mode, ops[0].op);
4083 rhs = ops[1].op;
4084 }
4085 else if (ops[0].neg)
4086 {
4087 lhs = ops[1].op;
4088 rhs = ops[0].op;
4089 }
4090 else
4091 {
4092 lhs = ops[0].op;
4093 rhs = ops[1].op;
4094 }
4095
4096 return simplify_const_binary_operation (code, mode, lhs, rhs);
4097 }
4098
4099 /* Now simplify each pair of operands until nothing changes. */
4100 do
4101 {
4102 /* Insertion sort is good enough for a small array. */
4103 for (i = 1; i < n_ops; i++)
4104 {
4105 struct simplify_plus_minus_op_data save;
4106 j = i - 1;
4107 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
4108 continue;
4109
4110 canonicalized = 1;
4111 save = ops[i];
4112 do
4113 ops[j + 1] = ops[j];
4114 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
4115 ops[j + 1] = save;
4116 }
4117
4118 changed = 0;
4119 for (i = n_ops - 1; i > 0; i--)
4120 for (j = i - 1; j >= 0; j--)
4121 {
4122 rtx lhs = ops[j].op, rhs = ops[i].op;
4123 int lneg = ops[j].neg, rneg = ops[i].neg;
4124
4125 if (lhs != 0 && rhs != 0)
4126 {
4127 enum rtx_code ncode = PLUS;
4128
4129 if (lneg != rneg)
4130 {
4131 ncode = MINUS;
4132 if (lneg)
4133 tem = lhs, lhs = rhs, rhs = tem;
4134 }
4135 else if (swap_commutative_operands_p (lhs, rhs))
4136 tem = lhs, lhs = rhs, rhs = tem;
4137
4138 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4139 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4140 {
4141 rtx tem_lhs, tem_rhs;
4142
4143 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4144 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4145 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
4146
4147 if (tem && !CONSTANT_P (tem))
4148 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4149 }
4150 else
4151 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4152
4153 if (tem)
4154 {
4155 /* Reject "simplifications" that just wrap the two
4156 arguments in a CONST. Failure to do so can result
4157 in infinite recursion with simplify_binary_operation
4158 when it calls us to simplify CONST operations.
4159 Also, if we find such a simplification, don't try
4160 any more combinations with this rhs: We must have
4161 something like symbol+offset, ie. one of the
4162 trivial CONST expressions we handle later. */
4163 if (GET_CODE (tem) == CONST
4164 && GET_CODE (XEXP (tem, 0)) == ncode
4165 && XEXP (XEXP (tem, 0), 0) == lhs
4166 && XEXP (XEXP (tem, 0), 1) == rhs)
4167 break;
4168 lneg &= rneg;
4169 if (GET_CODE (tem) == NEG)
4170 tem = XEXP (tem, 0), lneg = !lneg;
4171 if (CONST_INT_P (tem) && lneg)
4172 tem = neg_const_int (mode, tem), lneg = 0;
4173
4174 ops[i].op = tem;
4175 ops[i].neg = lneg;
4176 ops[j].op = NULL_RTX;
4177 changed = 1;
4178 canonicalized = 1;
4179 }
4180 }
4181 }
4182
4183 /* If nothing changed, fail. */
4184 if (!canonicalized)
4185 return NULL_RTX;
4186
4187 /* Pack all the operands to the lower-numbered entries. */
4188 for (i = 0, j = 0; j < n_ops; j++)
4189 if (ops[j].op)
4190 {
4191 ops[i] = ops[j];
4192 i++;
4193 }
4194 n_ops = i;
4195 }
4196 while (changed);
4197
4198 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4199 if (n_ops == 2
4200 && CONST_INT_P (ops[1].op)
4201 && CONSTANT_P (ops[0].op)
4202 && ops[0].neg)
4203 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4204
4205 /* We suppressed creation of trivial CONST expressions in the
4206 combination loop to avoid recursion. Create one manually now.
4207 The combination loop should have ensured that there is exactly
4208 one CONST_INT, and the sort will have ensured that it is last
4209 in the array and that any other constant will be next-to-last. */
4210
4211 if (n_ops > 1
4212 && CONST_INT_P (ops[n_ops - 1].op)
4213 && CONSTANT_P (ops[n_ops - 2].op))
4214 {
4215 rtx value = ops[n_ops - 1].op;
4216 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4217 value = neg_const_int (mode, value);
4218 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4219 INTVAL (value));
4220 n_ops--;
4221 }
4222
4223 /* Put a non-negated operand first, if possible. */
4224
4225 for (i = 0; i < n_ops && ops[i].neg; i++)
4226 continue;
4227 if (i == n_ops)
4228 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4229 else if (i != 0)
4230 {
4231 tem = ops[0].op;
4232 ops[0] = ops[i];
4233 ops[i].op = tem;
4234 ops[i].neg = 1;
4235 }
4236
4237 /* Now make the result by performing the requested operations. */
4238 result = ops[0].op;
4239 for (i = 1; i < n_ops; i++)
4240 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4241 mode, result, ops[i].op);
4242
4243 return result;
4244 }
4245
4246 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4247 static bool
4248 plus_minus_operand_p (const_rtx x)
4249 {
4250 return GET_CODE (x) == PLUS
4251 || GET_CODE (x) == MINUS
4252 || (GET_CODE (x) == CONST
4253 && GET_CODE (XEXP (x, 0)) == PLUS
4254 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4255 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4256 }
4257
4258 /* Like simplify_binary_operation except used for relational operators.
4259 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4260 not also be VOIDmode.
4261
4262 CMP_MODE specifies in which mode the comparison is done in, so it is
4263 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4264 the operands or, if both are VOIDmode, the operands are compared in
4265 "infinite precision". */
4266 rtx
4267 simplify_relational_operation (enum rtx_code code, machine_mode mode,
4268 machine_mode cmp_mode, rtx op0, rtx op1)
4269 {
4270 rtx tem, trueop0, trueop1;
4271
4272 if (cmp_mode == VOIDmode)
4273 cmp_mode = GET_MODE (op0);
4274 if (cmp_mode == VOIDmode)
4275 cmp_mode = GET_MODE (op1);
4276
4277 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4278 if (tem)
4279 {
4280 if (SCALAR_FLOAT_MODE_P (mode))
4281 {
4282 if (tem == const0_rtx)
4283 return CONST0_RTX (mode);
4284 #ifdef FLOAT_STORE_FLAG_VALUE
4285 {
4286 REAL_VALUE_TYPE val;
4287 val = FLOAT_STORE_FLAG_VALUE (mode);
4288 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4289 }
4290 #else
4291 return NULL_RTX;
4292 #endif
4293 }
4294 if (VECTOR_MODE_P (mode))
4295 {
4296 if (tem == const0_rtx)
4297 return CONST0_RTX (mode);
4298 #ifdef VECTOR_STORE_FLAG_VALUE
4299 {
4300 int i, units;
4301 rtvec v;
4302
4303 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4304 if (val == NULL_RTX)
4305 return NULL_RTX;
4306 if (val == const1_rtx)
4307 return CONST1_RTX (mode);
4308
4309 units = GET_MODE_NUNITS (mode);
4310 v = rtvec_alloc (units);
4311 for (i = 0; i < units; i++)
4312 RTVEC_ELT (v, i) = val;
4313 return gen_rtx_raw_CONST_VECTOR (mode, v);
4314 }
4315 #else
4316 return NULL_RTX;
4317 #endif
4318 }
4319
4320 return tem;
4321 }
4322
4323 /* For the following tests, ensure const0_rtx is op1. */
4324 if (swap_commutative_operands_p (op0, op1)
4325 || (op0 == const0_rtx && op1 != const0_rtx))
4326 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4327
4328 /* If op0 is a compare, extract the comparison arguments from it. */
4329 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4330 return simplify_gen_relational (code, mode, VOIDmode,
4331 XEXP (op0, 0), XEXP (op0, 1));
4332
4333 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4334 || CC0_P (op0))
4335 return NULL_RTX;
4336
4337 trueop0 = avoid_constant_pool_reference (op0);
4338 trueop1 = avoid_constant_pool_reference (op1);
4339 return simplify_relational_operation_1 (code, mode, cmp_mode,
4340 trueop0, trueop1);
4341 }
4342
4343 /* This part of simplify_relational_operation is only used when CMP_MODE
4344 is not in class MODE_CC (i.e. it is a real comparison).
4345
4346 MODE is the mode of the result, while CMP_MODE specifies in which
4347 mode the comparison is done in, so it is the mode of the operands. */
4348
4349 static rtx
4350 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4351 machine_mode cmp_mode, rtx op0, rtx op1)
4352 {
4353 enum rtx_code op0code = GET_CODE (op0);
4354
4355 if (op1 == const0_rtx && COMPARISON_P (op0))
4356 {
4357 /* If op0 is a comparison, extract the comparison arguments
4358 from it. */
4359 if (code == NE)
4360 {
4361 if (GET_MODE (op0) == mode)
4362 return simplify_rtx (op0);
4363 else
4364 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4365 XEXP (op0, 0), XEXP (op0, 1));
4366 }
4367 else if (code == EQ)
4368 {
4369 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4370 if (new_code != UNKNOWN)
4371 return simplify_gen_relational (new_code, mode, VOIDmode,
4372 XEXP (op0, 0), XEXP (op0, 1));
4373 }
4374 }
4375
4376 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4377 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4378 if ((code == LTU || code == GEU)
4379 && GET_CODE (op0) == PLUS
4380 && CONST_INT_P (XEXP (op0, 1))
4381 && (rtx_equal_p (op1, XEXP (op0, 0))
4382 || rtx_equal_p (op1, XEXP (op0, 1)))
4383 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4384 && XEXP (op0, 1) != const0_rtx)
4385 {
4386 rtx new_cmp
4387 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4388 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4389 cmp_mode, XEXP (op0, 0), new_cmp);
4390 }
4391
4392 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4393 if ((code == LTU || code == GEU)
4394 && GET_CODE (op0) == PLUS
4395 && rtx_equal_p (op1, XEXP (op0, 1))
4396 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4397 && !rtx_equal_p (op1, XEXP (op0, 0)))
4398 return simplify_gen_relational (code, mode, cmp_mode, op0,
4399 copy_rtx (XEXP (op0, 0)));
4400
4401 if (op1 == const0_rtx)
4402 {
4403 /* Canonicalize (GTU x 0) as (NE x 0). */
4404 if (code == GTU)
4405 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4406 /* Canonicalize (LEU x 0) as (EQ x 0). */
4407 if (code == LEU)
4408 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4409 }
4410 else if (op1 == const1_rtx)
4411 {
4412 switch (code)
4413 {
4414 case GE:
4415 /* Canonicalize (GE x 1) as (GT x 0). */
4416 return simplify_gen_relational (GT, mode, cmp_mode,
4417 op0, const0_rtx);
4418 case GEU:
4419 /* Canonicalize (GEU x 1) as (NE x 0). */
4420 return simplify_gen_relational (NE, mode, cmp_mode,
4421 op0, const0_rtx);
4422 case LT:
4423 /* Canonicalize (LT x 1) as (LE x 0). */
4424 return simplify_gen_relational (LE, mode, cmp_mode,
4425 op0, const0_rtx);
4426 case LTU:
4427 /* Canonicalize (LTU x 1) as (EQ x 0). */
4428 return simplify_gen_relational (EQ, mode, cmp_mode,
4429 op0, const0_rtx);
4430 default:
4431 break;
4432 }
4433 }
4434 else if (op1 == constm1_rtx)
4435 {
4436 /* Canonicalize (LE x -1) as (LT x 0). */
4437 if (code == LE)
4438 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4439 /* Canonicalize (GT x -1) as (GE x 0). */
4440 if (code == GT)
4441 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4442 }
4443
4444 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4445 if ((code == EQ || code == NE)
4446 && (op0code == PLUS || op0code == MINUS)
4447 && CONSTANT_P (op1)
4448 && CONSTANT_P (XEXP (op0, 1))
4449 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4450 {
4451 rtx x = XEXP (op0, 0);
4452 rtx c = XEXP (op0, 1);
4453 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4454 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4455
4456 /* Detect an infinite recursive condition, where we oscillate at this
4457 simplification case between:
4458 A + B == C <---> C - B == A,
4459 where A, B, and C are all constants with non-simplifiable expressions,
4460 usually SYMBOL_REFs. */
4461 if (GET_CODE (tem) == invcode
4462 && CONSTANT_P (x)
4463 && rtx_equal_p (c, XEXP (tem, 1)))
4464 return NULL_RTX;
4465
4466 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4467 }
4468
4469 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4470 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4471 if (code == NE
4472 && op1 == const0_rtx
4473 && GET_MODE_CLASS (mode) == MODE_INT
4474 && cmp_mode != VOIDmode
4475 /* ??? Work-around BImode bugs in the ia64 backend. */
4476 && mode != BImode
4477 && cmp_mode != BImode
4478 && nonzero_bits (op0, cmp_mode) == 1
4479 && STORE_FLAG_VALUE == 1)
4480 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4481 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4482 : lowpart_subreg (mode, op0, cmp_mode);
4483
4484 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4485 if ((code == EQ || code == NE)
4486 && op1 == const0_rtx
4487 && op0code == XOR)
4488 return simplify_gen_relational (code, mode, cmp_mode,
4489 XEXP (op0, 0), XEXP (op0, 1));
4490
4491 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4492 if ((code == EQ || code == NE)
4493 && op0code == XOR
4494 && rtx_equal_p (XEXP (op0, 0), op1)
4495 && !side_effects_p (XEXP (op0, 0)))
4496 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
4497 CONST0_RTX (mode));
4498
4499 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4500 if ((code == EQ || code == NE)
4501 && op0code == XOR
4502 && rtx_equal_p (XEXP (op0, 1), op1)
4503 && !side_effects_p (XEXP (op0, 1)))
4504 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4505 CONST0_RTX (mode));
4506
4507 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4508 if ((code == EQ || code == NE)
4509 && op0code == XOR
4510 && CONST_SCALAR_INT_P (op1)
4511 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4512 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4513 simplify_gen_binary (XOR, cmp_mode,
4514 XEXP (op0, 1), op1));
4515
4516 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4517 if ((code == EQ || code == NE)
4518 && GET_CODE (op0) == BSWAP
4519 && CONST_SCALAR_INT_P (op1))
4520 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4521 simplify_gen_unary (BSWAP, cmp_mode,
4522 op1, cmp_mode));
4523
4524 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4525 if ((code == EQ || code == NE)
4526 && GET_CODE (op0) == BSWAP
4527 && GET_CODE (op1) == BSWAP)
4528 return simplify_gen_relational (code, mode, cmp_mode,
4529 XEXP (op0, 0), XEXP (op1, 0));
4530
4531 if (op0code == POPCOUNT && op1 == const0_rtx)
4532 switch (code)
4533 {
4534 case EQ:
4535 case LE:
4536 case LEU:
4537 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4538 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4539 XEXP (op0, 0), const0_rtx);
4540
4541 case NE:
4542 case GT:
4543 case GTU:
4544 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4545 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4546 XEXP (op0, 0), const0_rtx);
4547
4548 default:
4549 break;
4550 }
4551
4552 return NULL_RTX;
4553 }
4554
4555 enum
4556 {
4557 CMP_EQ = 1,
4558 CMP_LT = 2,
4559 CMP_GT = 4,
4560 CMP_LTU = 8,
4561 CMP_GTU = 16
4562 };
4563
4564
4565 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4566 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4567 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4568 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4569 For floating-point comparisons, assume that the operands were ordered. */
4570
4571 static rtx
4572 comparison_result (enum rtx_code code, int known_results)
4573 {
4574 switch (code)
4575 {
4576 case EQ:
4577 case UNEQ:
4578 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4579 case NE:
4580 case LTGT:
4581 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4582
4583 case LT:
4584 case UNLT:
4585 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4586 case GE:
4587 case UNGE:
4588 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4589
4590 case GT:
4591 case UNGT:
4592 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4593 case LE:
4594 case UNLE:
4595 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4596
4597 case LTU:
4598 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4599 case GEU:
4600 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4601
4602 case GTU:
4603 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4604 case LEU:
4605 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4606
4607 case ORDERED:
4608 return const_true_rtx;
4609 case UNORDERED:
4610 return const0_rtx;
4611 default:
4612 gcc_unreachable ();
4613 }
4614 }
4615
4616 /* Check if the given comparison (done in the given MODE) is actually
4617 a tautology or a contradiction. If the mode is VOID_mode, the
4618 comparison is done in "infinite precision". If no simplification
4619 is possible, this function returns zero. Otherwise, it returns
4620 either const_true_rtx or const0_rtx. */
4621
4622 rtx
4623 simplify_const_relational_operation (enum rtx_code code,
4624 machine_mode mode,
4625 rtx op0, rtx op1)
4626 {
4627 rtx tem;
4628 rtx trueop0;
4629 rtx trueop1;
4630
4631 gcc_assert (mode != VOIDmode
4632 || (GET_MODE (op0) == VOIDmode
4633 && GET_MODE (op1) == VOIDmode));
4634
4635 /* If op0 is a compare, extract the comparison arguments from it. */
4636 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4637 {
4638 op1 = XEXP (op0, 1);
4639 op0 = XEXP (op0, 0);
4640
4641 if (GET_MODE (op0) != VOIDmode)
4642 mode = GET_MODE (op0);
4643 else if (GET_MODE (op1) != VOIDmode)
4644 mode = GET_MODE (op1);
4645 else
4646 return 0;
4647 }
4648
4649 /* We can't simplify MODE_CC values since we don't know what the
4650 actual comparison is. */
4651 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4652 return 0;
4653
4654 /* Make sure the constant is second. */
4655 if (swap_commutative_operands_p (op0, op1))
4656 {
4657 tem = op0, op0 = op1, op1 = tem;
4658 code = swap_condition (code);
4659 }
4660
4661 trueop0 = avoid_constant_pool_reference (op0);
4662 trueop1 = avoid_constant_pool_reference (op1);
4663
4664 /* For integer comparisons of A and B maybe we can simplify A - B and can
4665 then simplify a comparison of that with zero. If A and B are both either
4666 a register or a CONST_INT, this can't help; testing for these cases will
4667 prevent infinite recursion here and speed things up.
4668
4669 We can only do this for EQ and NE comparisons as otherwise we may
4670 lose or introduce overflow which we cannot disregard as undefined as
4671 we do not know the signedness of the operation on either the left or
4672 the right hand side of the comparison. */
4673
4674 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4675 && (code == EQ || code == NE)
4676 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4677 && (REG_P (op1) || CONST_INT_P (trueop1)))
4678 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4679 /* We cannot do this if tem is a nonzero address. */
4680 && ! nonzero_address_p (tem))
4681 return simplify_const_relational_operation (signed_condition (code),
4682 mode, tem, const0_rtx);
4683
4684 if (! HONOR_NANS (mode) && code == ORDERED)
4685 return const_true_rtx;
4686
4687 if (! HONOR_NANS (mode) && code == UNORDERED)
4688 return const0_rtx;
4689
4690 /* For modes without NaNs, if the two operands are equal, we know the
4691 result except if they have side-effects. Even with NaNs we know
4692 the result of unordered comparisons and, if signaling NaNs are
4693 irrelevant, also the result of LT/GT/LTGT. */
4694 if ((! HONOR_NANS (GET_MODE (trueop0))
4695 || code == UNEQ || code == UNLE || code == UNGE
4696 || ((code == LT || code == GT || code == LTGT)
4697 && ! HONOR_SNANS (GET_MODE (trueop0))))
4698 && rtx_equal_p (trueop0, trueop1)
4699 && ! side_effects_p (trueop0))
4700 return comparison_result (code, CMP_EQ);
4701
4702 /* If the operands are floating-point constants, see if we can fold
4703 the result. */
4704 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
4705 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
4706 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4707 {
4708 REAL_VALUE_TYPE d0, d1;
4709
4710 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4711 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4712
4713 /* Comparisons are unordered iff at least one of the values is NaN. */
4714 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4715 switch (code)
4716 {
4717 case UNEQ:
4718 case UNLT:
4719 case UNGT:
4720 case UNLE:
4721 case UNGE:
4722 case NE:
4723 case UNORDERED:
4724 return const_true_rtx;
4725 case EQ:
4726 case LT:
4727 case GT:
4728 case LE:
4729 case GE:
4730 case LTGT:
4731 case ORDERED:
4732 return const0_rtx;
4733 default:
4734 return 0;
4735 }
4736
4737 return comparison_result (code,
4738 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4739 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4740 }
4741
4742 /* Otherwise, see if the operands are both integers. */
4743 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4744 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
4745 {
4746 /* It would be nice if we really had a mode here. However, the
4747 largest int representable on the target is as good as
4748 infinite. */
4749 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
4750 rtx_mode_t ptrueop0 = std::make_pair (trueop0, cmode);
4751 rtx_mode_t ptrueop1 = std::make_pair (trueop1, cmode);
4752
4753 if (wi::eq_p (ptrueop0, ptrueop1))
4754 return comparison_result (code, CMP_EQ);
4755 else
4756 {
4757 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
4758 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
4759 return comparison_result (code, cr);
4760 }
4761 }
4762
4763 /* Optimize comparisons with upper and lower bounds. */
4764 if (HWI_COMPUTABLE_MODE_P (mode)
4765 && CONST_INT_P (trueop1))
4766 {
4767 int sign;
4768 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4769 HOST_WIDE_INT val = INTVAL (trueop1);
4770 HOST_WIDE_INT mmin, mmax;
4771
4772 if (code == GEU
4773 || code == LEU
4774 || code == GTU
4775 || code == LTU)
4776 sign = 0;
4777 else
4778 sign = 1;
4779
4780 /* Get a reduced range if the sign bit is zero. */
4781 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4782 {
4783 mmin = 0;
4784 mmax = nonzero;
4785 }
4786 else
4787 {
4788 rtx mmin_rtx, mmax_rtx;
4789 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4790
4791 mmin = INTVAL (mmin_rtx);
4792 mmax = INTVAL (mmax_rtx);
4793 if (sign)
4794 {
4795 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4796
4797 mmin >>= (sign_copies - 1);
4798 mmax >>= (sign_copies - 1);
4799 }
4800 }
4801
4802 switch (code)
4803 {
4804 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4805 case GEU:
4806 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4807 return const_true_rtx;
4808 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4809 return const0_rtx;
4810 break;
4811 case GE:
4812 if (val <= mmin)
4813 return const_true_rtx;
4814 if (val > mmax)
4815 return const0_rtx;
4816 break;
4817
4818 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4819 case LEU:
4820 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4821 return const_true_rtx;
4822 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4823 return const0_rtx;
4824 break;
4825 case LE:
4826 if (val >= mmax)
4827 return const_true_rtx;
4828 if (val < mmin)
4829 return const0_rtx;
4830 break;
4831
4832 case EQ:
4833 /* x == y is always false for y out of range. */
4834 if (val < mmin || val > mmax)
4835 return const0_rtx;
4836 break;
4837
4838 /* x > y is always false for y >= mmax, always true for y < mmin. */
4839 case GTU:
4840 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4841 return const0_rtx;
4842 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4843 return const_true_rtx;
4844 break;
4845 case GT:
4846 if (val >= mmax)
4847 return const0_rtx;
4848 if (val < mmin)
4849 return const_true_rtx;
4850 break;
4851
4852 /* x < y is always false for y <= mmin, always true for y > mmax. */
4853 case LTU:
4854 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4855 return const0_rtx;
4856 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4857 return const_true_rtx;
4858 break;
4859 case LT:
4860 if (val <= mmin)
4861 return const0_rtx;
4862 if (val > mmax)
4863 return const_true_rtx;
4864 break;
4865
4866 case NE:
4867 /* x != y is always true for y out of range. */
4868 if (val < mmin || val > mmax)
4869 return const_true_rtx;
4870 break;
4871
4872 default:
4873 break;
4874 }
4875 }
4876
4877 /* Optimize integer comparisons with zero. */
4878 if (trueop1 == const0_rtx)
4879 {
4880 /* Some addresses are known to be nonzero. We don't know
4881 their sign, but equality comparisons are known. */
4882 if (nonzero_address_p (trueop0))
4883 {
4884 if (code == EQ || code == LEU)
4885 return const0_rtx;
4886 if (code == NE || code == GTU)
4887 return const_true_rtx;
4888 }
4889
4890 /* See if the first operand is an IOR with a constant. If so, we
4891 may be able to determine the result of this comparison. */
4892 if (GET_CODE (op0) == IOR)
4893 {
4894 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
4895 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
4896 {
4897 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
4898 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
4899 && (UINTVAL (inner_const)
4900 & ((unsigned HOST_WIDE_INT) 1
4901 << sign_bitnum)));
4902
4903 switch (code)
4904 {
4905 case EQ:
4906 case LEU:
4907 return const0_rtx;
4908 case NE:
4909 case GTU:
4910 return const_true_rtx;
4911 case LT:
4912 case LE:
4913 if (has_sign)
4914 return const_true_rtx;
4915 break;
4916 case GT:
4917 case GE:
4918 if (has_sign)
4919 return const0_rtx;
4920 break;
4921 default:
4922 break;
4923 }
4924 }
4925 }
4926 }
4927
4928 /* Optimize comparison of ABS with zero. */
4929 if (trueop1 == CONST0_RTX (mode)
4930 && (GET_CODE (trueop0) == ABS
4931 || (GET_CODE (trueop0) == FLOAT_EXTEND
4932 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
4933 {
4934 switch (code)
4935 {
4936 case LT:
4937 /* Optimize abs(x) < 0.0. */
4938 if (!HONOR_SNANS (mode)
4939 && (!INTEGRAL_MODE_P (mode)
4940 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4941 {
4942 if (INTEGRAL_MODE_P (mode)
4943 && (issue_strict_overflow_warning
4944 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4945 warning (OPT_Wstrict_overflow,
4946 ("assuming signed overflow does not occur when "
4947 "assuming abs (x) < 0 is false"));
4948 return const0_rtx;
4949 }
4950 break;
4951
4952 case GE:
4953 /* Optimize abs(x) >= 0.0. */
4954 if (!HONOR_NANS (mode)
4955 && (!INTEGRAL_MODE_P (mode)
4956 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4957 {
4958 if (INTEGRAL_MODE_P (mode)
4959 && (issue_strict_overflow_warning
4960 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4961 warning (OPT_Wstrict_overflow,
4962 ("assuming signed overflow does not occur when "
4963 "assuming abs (x) >= 0 is true"));
4964 return const_true_rtx;
4965 }
4966 break;
4967
4968 case UNGE:
4969 /* Optimize ! (abs(x) < 0.0). */
4970 return const_true_rtx;
4971
4972 default:
4973 break;
4974 }
4975 }
4976
4977 return 0;
4978 }
4979 \f
4980 /* Simplify CODE, an operation with result mode MODE and three operands,
4981 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4982 a constant. Return 0 if no simplifications is possible. */
4983
4984 rtx
4985 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
4986 machine_mode op0_mode, rtx op0, rtx op1,
4987 rtx op2)
4988 {
4989 unsigned int width = GET_MODE_PRECISION (mode);
4990 bool any_change = false;
4991 rtx tem, trueop2;
4992
4993 /* VOIDmode means "infinite" precision. */
4994 if (width == 0)
4995 width = HOST_BITS_PER_WIDE_INT;
4996
4997 switch (code)
4998 {
4999 case FMA:
5000 /* Simplify negations around the multiplication. */
5001 /* -a * -b + c => a * b + c. */
5002 if (GET_CODE (op0) == NEG)
5003 {
5004 tem = simplify_unary_operation (NEG, mode, op1, mode);
5005 if (tem)
5006 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5007 }
5008 else if (GET_CODE (op1) == NEG)
5009 {
5010 tem = simplify_unary_operation (NEG, mode, op0, mode);
5011 if (tem)
5012 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5013 }
5014
5015 /* Canonicalize the two multiplication operands. */
5016 /* a * -b + c => -b * a + c. */
5017 if (swap_commutative_operands_p (op0, op1))
5018 tem = op0, op0 = op1, op1 = tem, any_change = true;
5019
5020 if (any_change)
5021 return gen_rtx_FMA (mode, op0, op1, op2);
5022 return NULL_RTX;
5023
5024 case SIGN_EXTRACT:
5025 case ZERO_EXTRACT:
5026 if (CONST_INT_P (op0)
5027 && CONST_INT_P (op1)
5028 && CONST_INT_P (op2)
5029 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5030 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5031 {
5032 /* Extracting a bit-field from a constant */
5033 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5034 HOST_WIDE_INT op1val = INTVAL (op1);
5035 HOST_WIDE_INT op2val = INTVAL (op2);
5036 if (BITS_BIG_ENDIAN)
5037 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5038 else
5039 val >>= op2val;
5040
5041 if (HOST_BITS_PER_WIDE_INT != op1val)
5042 {
5043 /* First zero-extend. */
5044 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
5045 /* If desired, propagate sign bit. */
5046 if (code == SIGN_EXTRACT
5047 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
5048 != 0)
5049 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
5050 }
5051
5052 return gen_int_mode (val, mode);
5053 }
5054 break;
5055
5056 case IF_THEN_ELSE:
5057 if (CONST_INT_P (op0))
5058 return op0 != const0_rtx ? op1 : op2;
5059
5060 /* Convert c ? a : a into "a". */
5061 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5062 return op1;
5063
5064 /* Convert a != b ? a : b into "a". */
5065 if (GET_CODE (op0) == NE
5066 && ! side_effects_p (op0)
5067 && ! HONOR_NANS (mode)
5068 && ! HONOR_SIGNED_ZEROS (mode)
5069 && ((rtx_equal_p (XEXP (op0, 0), op1)
5070 && rtx_equal_p (XEXP (op0, 1), op2))
5071 || (rtx_equal_p (XEXP (op0, 0), op2)
5072 && rtx_equal_p (XEXP (op0, 1), op1))))
5073 return op1;
5074
5075 /* Convert a == b ? a : b into "b". */
5076 if (GET_CODE (op0) == EQ
5077 && ! side_effects_p (op0)
5078 && ! HONOR_NANS (mode)
5079 && ! HONOR_SIGNED_ZEROS (mode)
5080 && ((rtx_equal_p (XEXP (op0, 0), op1)
5081 && rtx_equal_p (XEXP (op0, 1), op2))
5082 || (rtx_equal_p (XEXP (op0, 0), op2)
5083 && rtx_equal_p (XEXP (op0, 1), op1))))
5084 return op2;
5085
5086 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5087 {
5088 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5089 ? GET_MODE (XEXP (op0, 1))
5090 : GET_MODE (XEXP (op0, 0)));
5091 rtx temp;
5092
5093 /* Look for happy constants in op1 and op2. */
5094 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5095 {
5096 HOST_WIDE_INT t = INTVAL (op1);
5097 HOST_WIDE_INT f = INTVAL (op2);
5098
5099 if (t == STORE_FLAG_VALUE && f == 0)
5100 code = GET_CODE (op0);
5101 else if (t == 0 && f == STORE_FLAG_VALUE)
5102 {
5103 enum rtx_code tmp;
5104 tmp = reversed_comparison_code (op0, NULL_RTX);
5105 if (tmp == UNKNOWN)
5106 break;
5107 code = tmp;
5108 }
5109 else
5110 break;
5111
5112 return simplify_gen_relational (code, mode, cmp_mode,
5113 XEXP (op0, 0), XEXP (op0, 1));
5114 }
5115
5116 if (cmp_mode == VOIDmode)
5117 cmp_mode = op0_mode;
5118 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5119 cmp_mode, XEXP (op0, 0),
5120 XEXP (op0, 1));
5121
5122 /* See if any simplifications were possible. */
5123 if (temp)
5124 {
5125 if (CONST_INT_P (temp))
5126 return temp == const0_rtx ? op2 : op1;
5127 else if (temp)
5128 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5129 }
5130 }
5131 break;
5132
5133 case VEC_MERGE:
5134 gcc_assert (GET_MODE (op0) == mode);
5135 gcc_assert (GET_MODE (op1) == mode);
5136 gcc_assert (VECTOR_MODE_P (mode));
5137 trueop2 = avoid_constant_pool_reference (op2);
5138 if (CONST_INT_P (trueop2))
5139 {
5140 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
5141 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5142 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5143 unsigned HOST_WIDE_INT mask;
5144 if (n_elts == HOST_BITS_PER_WIDE_INT)
5145 mask = -1;
5146 else
5147 mask = ((unsigned HOST_WIDE_INT) 1 << n_elts) - 1;
5148
5149 if (!(sel & mask) && !side_effects_p (op0))
5150 return op1;
5151 if ((sel & mask) == mask && !side_effects_p (op1))
5152 return op0;
5153
5154 rtx trueop0 = avoid_constant_pool_reference (op0);
5155 rtx trueop1 = avoid_constant_pool_reference (op1);
5156 if (GET_CODE (trueop0) == CONST_VECTOR
5157 && GET_CODE (trueop1) == CONST_VECTOR)
5158 {
5159 rtvec v = rtvec_alloc (n_elts);
5160 unsigned int i;
5161
5162 for (i = 0; i < n_elts; i++)
5163 RTVEC_ELT (v, i) = ((sel & ((unsigned HOST_WIDE_INT) 1 << i))
5164 ? CONST_VECTOR_ELT (trueop0, i)
5165 : CONST_VECTOR_ELT (trueop1, i));
5166 return gen_rtx_CONST_VECTOR (mode, v);
5167 }
5168
5169 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5170 if no element from a appears in the result. */
5171 if (GET_CODE (op0) == VEC_MERGE)
5172 {
5173 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5174 if (CONST_INT_P (tem))
5175 {
5176 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5177 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5178 return simplify_gen_ternary (code, mode, mode,
5179 XEXP (op0, 1), op1, op2);
5180 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5181 return simplify_gen_ternary (code, mode, mode,
5182 XEXP (op0, 0), op1, op2);
5183 }
5184 }
5185 if (GET_CODE (op1) == VEC_MERGE)
5186 {
5187 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5188 if (CONST_INT_P (tem))
5189 {
5190 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5191 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5192 return simplify_gen_ternary (code, mode, mode,
5193 op0, XEXP (op1, 1), op2);
5194 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5195 return simplify_gen_ternary (code, mode, mode,
5196 op0, XEXP (op1, 0), op2);
5197 }
5198 }
5199 }
5200
5201 if (rtx_equal_p (op0, op1)
5202 && !side_effects_p (op2) && !side_effects_p (op1))
5203 return op0;
5204
5205 break;
5206
5207 default:
5208 gcc_unreachable ();
5209 }
5210
5211 return 0;
5212 }
5213
5214 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5215 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5216 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5217
5218 Works by unpacking OP into a collection of 8-bit values
5219 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5220 and then repacking them again for OUTERMODE. */
5221
5222 static rtx
5223 simplify_immed_subreg (machine_mode outermode, rtx op,
5224 machine_mode innermode, unsigned int byte)
5225 {
5226 enum {
5227 value_bit = 8,
5228 value_mask = (1 << value_bit) - 1
5229 };
5230 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
5231 int value_start;
5232 int i;
5233 int elem;
5234
5235 int num_elem;
5236 rtx * elems;
5237 int elem_bitsize;
5238 rtx result_s;
5239 rtvec result_v = NULL;
5240 enum mode_class outer_class;
5241 machine_mode outer_submode;
5242 int max_bitsize;
5243
5244 /* Some ports misuse CCmode. */
5245 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5246 return op;
5247
5248 /* We have no way to represent a complex constant at the rtl level. */
5249 if (COMPLEX_MODE_P (outermode))
5250 return NULL_RTX;
5251
5252 /* We support any size mode. */
5253 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5254 GET_MODE_BITSIZE (innermode));
5255
5256 /* Unpack the value. */
5257
5258 if (GET_CODE (op) == CONST_VECTOR)
5259 {
5260 num_elem = CONST_VECTOR_NUNITS (op);
5261 elems = &CONST_VECTOR_ELT (op, 0);
5262 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5263 }
5264 else
5265 {
5266 num_elem = 1;
5267 elems = &op;
5268 elem_bitsize = max_bitsize;
5269 }
5270 /* If this asserts, it is too complicated; reducing value_bit may help. */
5271 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5272 /* I don't know how to handle endianness of sub-units. */
5273 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5274
5275 for (elem = 0; elem < num_elem; elem++)
5276 {
5277 unsigned char * vp;
5278 rtx el = elems[elem];
5279
5280 /* Vectors are kept in target memory order. (This is probably
5281 a mistake.) */
5282 {
5283 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5284 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5285 / BITS_PER_UNIT);
5286 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5287 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5288 unsigned bytele = (subword_byte % UNITS_PER_WORD
5289 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5290 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5291 }
5292
5293 switch (GET_CODE (el))
5294 {
5295 case CONST_INT:
5296 for (i = 0;
5297 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5298 i += value_bit)
5299 *vp++ = INTVAL (el) >> i;
5300 /* CONST_INTs are always logically sign-extended. */
5301 for (; i < elem_bitsize; i += value_bit)
5302 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5303 break;
5304
5305 case CONST_WIDE_INT:
5306 {
5307 rtx_mode_t val = std::make_pair (el, innermode);
5308 unsigned char extend = wi::sign_mask (val);
5309
5310 for (i = 0; i < elem_bitsize; i += value_bit)
5311 *vp++ = wi::extract_uhwi (val, i, value_bit);
5312 for (; i < elem_bitsize; i += value_bit)
5313 *vp++ = extend;
5314 }
5315 break;
5316
5317 case CONST_DOUBLE:
5318 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
5319 {
5320 unsigned char extend = 0;
5321 /* If this triggers, someone should have generated a
5322 CONST_INT instead. */
5323 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5324
5325 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5326 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5327 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5328 {
5329 *vp++
5330 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5331 i += value_bit;
5332 }
5333
5334 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5335 extend = -1;
5336 for (; i < elem_bitsize; i += value_bit)
5337 *vp++ = extend;
5338 }
5339 else
5340 {
5341 /* This is big enough for anything on the platform. */
5342 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5343 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5344
5345 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5346 gcc_assert (bitsize <= elem_bitsize);
5347 gcc_assert (bitsize % value_bit == 0);
5348
5349 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5350 GET_MODE (el));
5351
5352 /* real_to_target produces its result in words affected by
5353 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5354 and use WORDS_BIG_ENDIAN instead; see the documentation
5355 of SUBREG in rtl.texi. */
5356 for (i = 0; i < bitsize; i += value_bit)
5357 {
5358 int ibase;
5359 if (WORDS_BIG_ENDIAN)
5360 ibase = bitsize - 1 - i;
5361 else
5362 ibase = i;
5363 *vp++ = tmp[ibase / 32] >> i % 32;
5364 }
5365
5366 /* It shouldn't matter what's done here, so fill it with
5367 zero. */
5368 for (; i < elem_bitsize; i += value_bit)
5369 *vp++ = 0;
5370 }
5371 break;
5372
5373 case CONST_FIXED:
5374 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5375 {
5376 for (i = 0; i < elem_bitsize; i += value_bit)
5377 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5378 }
5379 else
5380 {
5381 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5382 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5383 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5384 i += value_bit)
5385 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5386 >> (i - HOST_BITS_PER_WIDE_INT);
5387 for (; i < elem_bitsize; i += value_bit)
5388 *vp++ = 0;
5389 }
5390 break;
5391
5392 default:
5393 gcc_unreachable ();
5394 }
5395 }
5396
5397 /* Now, pick the right byte to start with. */
5398 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5399 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5400 will already have offset 0. */
5401 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5402 {
5403 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5404 - byte);
5405 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5406 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5407 byte = (subword_byte % UNITS_PER_WORD
5408 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5409 }
5410
5411 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5412 so if it's become negative it will instead be very large.) */
5413 gcc_assert (byte < GET_MODE_SIZE (innermode));
5414
5415 /* Convert from bytes to chunks of size value_bit. */
5416 value_start = byte * (BITS_PER_UNIT / value_bit);
5417
5418 /* Re-pack the value. */
5419
5420 if (VECTOR_MODE_P (outermode))
5421 {
5422 num_elem = GET_MODE_NUNITS (outermode);
5423 result_v = rtvec_alloc (num_elem);
5424 elems = &RTVEC_ELT (result_v, 0);
5425 outer_submode = GET_MODE_INNER (outermode);
5426 }
5427 else
5428 {
5429 num_elem = 1;
5430 elems = &result_s;
5431 outer_submode = outermode;
5432 }
5433
5434 outer_class = GET_MODE_CLASS (outer_submode);
5435 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5436
5437 gcc_assert (elem_bitsize % value_bit == 0);
5438 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5439
5440 for (elem = 0; elem < num_elem; elem++)
5441 {
5442 unsigned char *vp;
5443
5444 /* Vectors are stored in target memory order. (This is probably
5445 a mistake.) */
5446 {
5447 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5448 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5449 / BITS_PER_UNIT);
5450 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5451 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5452 unsigned bytele = (subword_byte % UNITS_PER_WORD
5453 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5454 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5455 }
5456
5457 switch (outer_class)
5458 {
5459 case MODE_INT:
5460 case MODE_PARTIAL_INT:
5461 {
5462 int u;
5463 int base = 0;
5464 int units
5465 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
5466 / HOST_BITS_PER_WIDE_INT;
5467 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
5468 wide_int r;
5469
5470 for (u = 0; u < units; u++)
5471 {
5472 unsigned HOST_WIDE_INT buf = 0;
5473 for (i = 0;
5474 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
5475 i += value_bit)
5476 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5477
5478 tmp[u] = buf;
5479 base += HOST_BITS_PER_WIDE_INT;
5480 }
5481 gcc_assert (GET_MODE_PRECISION (outer_submode)
5482 <= MAX_BITSIZE_MODE_ANY_INT);
5483 r = wide_int::from_array (tmp, units,
5484 GET_MODE_PRECISION (outer_submode));
5485 elems[elem] = immed_wide_int_const (r, outer_submode);
5486 }
5487 break;
5488
5489 case MODE_FLOAT:
5490 case MODE_DECIMAL_FLOAT:
5491 {
5492 REAL_VALUE_TYPE r;
5493 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5494
5495 /* real_from_target wants its input in words affected by
5496 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5497 and use WORDS_BIG_ENDIAN instead; see the documentation
5498 of SUBREG in rtl.texi. */
5499 for (i = 0; i < max_bitsize / 32; i++)
5500 tmp[i] = 0;
5501 for (i = 0; i < elem_bitsize; i += value_bit)
5502 {
5503 int ibase;
5504 if (WORDS_BIG_ENDIAN)
5505 ibase = elem_bitsize - 1 - i;
5506 else
5507 ibase = i;
5508 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5509 }
5510
5511 real_from_target (&r, tmp, outer_submode);
5512 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5513 }
5514 break;
5515
5516 case MODE_FRACT:
5517 case MODE_UFRACT:
5518 case MODE_ACCUM:
5519 case MODE_UACCUM:
5520 {
5521 FIXED_VALUE_TYPE f;
5522 f.data.low = 0;
5523 f.data.high = 0;
5524 f.mode = outer_submode;
5525
5526 for (i = 0;
5527 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5528 i += value_bit)
5529 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5530 for (; i < elem_bitsize; i += value_bit)
5531 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5532 << (i - HOST_BITS_PER_WIDE_INT));
5533
5534 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5535 }
5536 break;
5537
5538 default:
5539 gcc_unreachable ();
5540 }
5541 }
5542 if (VECTOR_MODE_P (outermode))
5543 return gen_rtx_CONST_VECTOR (outermode, result_v);
5544 else
5545 return result_s;
5546 }
5547
5548 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5549 Return 0 if no simplifications are possible. */
5550 rtx
5551 simplify_subreg (machine_mode outermode, rtx op,
5552 machine_mode innermode, unsigned int byte)
5553 {
5554 /* Little bit of sanity checking. */
5555 gcc_assert (innermode != VOIDmode);
5556 gcc_assert (outermode != VOIDmode);
5557 gcc_assert (innermode != BLKmode);
5558 gcc_assert (outermode != BLKmode);
5559
5560 gcc_assert (GET_MODE (op) == innermode
5561 || GET_MODE (op) == VOIDmode);
5562
5563 if ((byte % GET_MODE_SIZE (outermode)) != 0)
5564 return NULL_RTX;
5565
5566 if (byte >= GET_MODE_SIZE (innermode))
5567 return NULL_RTX;
5568
5569 if (outermode == innermode && !byte)
5570 return op;
5571
5572 if (CONST_SCALAR_INT_P (op)
5573 || CONST_DOUBLE_AS_FLOAT_P (op)
5574 || GET_CODE (op) == CONST_FIXED
5575 || GET_CODE (op) == CONST_VECTOR)
5576 return simplify_immed_subreg (outermode, op, innermode, byte);
5577
5578 /* Changing mode twice with SUBREG => just change it once,
5579 or not at all if changing back op starting mode. */
5580 if (GET_CODE (op) == SUBREG)
5581 {
5582 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5583 int final_offset = byte + SUBREG_BYTE (op);
5584 rtx newx;
5585
5586 if (outermode == innermostmode
5587 && byte == 0 && SUBREG_BYTE (op) == 0)
5588 return SUBREG_REG (op);
5589
5590 /* The SUBREG_BYTE represents offset, as if the value were stored
5591 in memory. Irritating exception is paradoxical subreg, where
5592 we define SUBREG_BYTE to be 0. On big endian machines, this
5593 value should be negative. For a moment, undo this exception. */
5594 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5595 {
5596 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5597 if (WORDS_BIG_ENDIAN)
5598 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5599 if (BYTES_BIG_ENDIAN)
5600 final_offset += difference % UNITS_PER_WORD;
5601 }
5602 if (SUBREG_BYTE (op) == 0
5603 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5604 {
5605 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5606 if (WORDS_BIG_ENDIAN)
5607 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5608 if (BYTES_BIG_ENDIAN)
5609 final_offset += difference % UNITS_PER_WORD;
5610 }
5611
5612 /* See whether resulting subreg will be paradoxical. */
5613 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5614 {
5615 /* In nonparadoxical subregs we can't handle negative offsets. */
5616 if (final_offset < 0)
5617 return NULL_RTX;
5618 /* Bail out in case resulting subreg would be incorrect. */
5619 if (final_offset % GET_MODE_SIZE (outermode)
5620 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5621 return NULL_RTX;
5622 }
5623 else
5624 {
5625 int offset = 0;
5626 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5627
5628 /* In paradoxical subreg, see if we are still looking on lower part.
5629 If so, our SUBREG_BYTE will be 0. */
5630 if (WORDS_BIG_ENDIAN)
5631 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5632 if (BYTES_BIG_ENDIAN)
5633 offset += difference % UNITS_PER_WORD;
5634 if (offset == final_offset)
5635 final_offset = 0;
5636 else
5637 return NULL_RTX;
5638 }
5639
5640 /* Recurse for further possible simplifications. */
5641 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5642 final_offset);
5643 if (newx)
5644 return newx;
5645 if (validate_subreg (outermode, innermostmode,
5646 SUBREG_REG (op), final_offset))
5647 {
5648 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5649 if (SUBREG_PROMOTED_VAR_P (op)
5650 && SUBREG_PROMOTED_SIGN (op) >= 0
5651 && GET_MODE_CLASS (outermode) == MODE_INT
5652 && IN_RANGE (GET_MODE_SIZE (outermode),
5653 GET_MODE_SIZE (innermode),
5654 GET_MODE_SIZE (innermostmode))
5655 && subreg_lowpart_p (newx))
5656 {
5657 SUBREG_PROMOTED_VAR_P (newx) = 1;
5658 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
5659 }
5660 return newx;
5661 }
5662 return NULL_RTX;
5663 }
5664
5665 /* SUBREG of a hard register => just change the register number
5666 and/or mode. If the hard register is not valid in that mode,
5667 suppress this simplification. If the hard register is the stack,
5668 frame, or argument pointer, leave this as a SUBREG. */
5669
5670 if (REG_P (op) && HARD_REGISTER_P (op))
5671 {
5672 unsigned int regno, final_regno;
5673
5674 regno = REGNO (op);
5675 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5676 if (HARD_REGISTER_NUM_P (final_regno))
5677 {
5678 rtx x;
5679 int final_offset = byte;
5680
5681 /* Adjust offset for paradoxical subregs. */
5682 if (byte == 0
5683 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5684 {
5685 int difference = (GET_MODE_SIZE (innermode)
5686 - GET_MODE_SIZE (outermode));
5687 if (WORDS_BIG_ENDIAN)
5688 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5689 if (BYTES_BIG_ENDIAN)
5690 final_offset += difference % UNITS_PER_WORD;
5691 }
5692
5693 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5694
5695 /* Propagate original regno. We don't have any way to specify
5696 the offset inside original regno, so do so only for lowpart.
5697 The information is used only by alias analysis that can not
5698 grog partial register anyway. */
5699
5700 if (subreg_lowpart_offset (outermode, innermode) == byte)
5701 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5702 return x;
5703 }
5704 }
5705
5706 /* If we have a SUBREG of a register that we are replacing and we are
5707 replacing it with a MEM, make a new MEM and try replacing the
5708 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5709 or if we would be widening it. */
5710
5711 if (MEM_P (op)
5712 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
5713 /* Allow splitting of volatile memory references in case we don't
5714 have instruction to move the whole thing. */
5715 && (! MEM_VOLATILE_P (op)
5716 || ! have_insn_for (SET, innermode))
5717 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5718 return adjust_address_nv (op, outermode, byte);
5719
5720 /* Handle complex values represented as CONCAT
5721 of real and imaginary part. */
5722 if (GET_CODE (op) == CONCAT)
5723 {
5724 unsigned int part_size, final_offset;
5725 rtx part, res;
5726
5727 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5728 if (byte < part_size)
5729 {
5730 part = XEXP (op, 0);
5731 final_offset = byte;
5732 }
5733 else
5734 {
5735 part = XEXP (op, 1);
5736 final_offset = byte - part_size;
5737 }
5738
5739 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5740 return NULL_RTX;
5741
5742 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5743 if (res)
5744 return res;
5745 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5746 return gen_rtx_SUBREG (outermode, part, final_offset);
5747 return NULL_RTX;
5748 }
5749
5750 /* A SUBREG resulting from a zero extension may fold to zero if
5751 it extracts higher bits that the ZERO_EXTEND's source bits. */
5752 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
5753 {
5754 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5755 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
5756 return CONST0_RTX (outermode);
5757 }
5758
5759 if (SCALAR_INT_MODE_P (outermode)
5760 && SCALAR_INT_MODE_P (innermode)
5761 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5762 && byte == subreg_lowpart_offset (outermode, innermode))
5763 {
5764 rtx tem = simplify_truncation (outermode, op, innermode);
5765 if (tem)
5766 return tem;
5767 }
5768
5769 return NULL_RTX;
5770 }
5771
5772 /* Make a SUBREG operation or equivalent if it folds. */
5773
5774 rtx
5775 simplify_gen_subreg (machine_mode outermode, rtx op,
5776 machine_mode innermode, unsigned int byte)
5777 {
5778 rtx newx;
5779
5780 newx = simplify_subreg (outermode, op, innermode, byte);
5781 if (newx)
5782 return newx;
5783
5784 if (GET_CODE (op) == SUBREG
5785 || GET_CODE (op) == CONCAT
5786 || GET_MODE (op) == VOIDmode)
5787 return NULL_RTX;
5788
5789 if (validate_subreg (outermode, innermode, op, byte))
5790 return gen_rtx_SUBREG (outermode, op, byte);
5791
5792 return NULL_RTX;
5793 }
5794
5795 /* Simplify X, an rtx expression.
5796
5797 Return the simplified expression or NULL if no simplifications
5798 were possible.
5799
5800 This is the preferred entry point into the simplification routines;
5801 however, we still allow passes to call the more specific routines.
5802
5803 Right now GCC has three (yes, three) major bodies of RTL simplification
5804 code that need to be unified.
5805
5806 1. fold_rtx in cse.c. This code uses various CSE specific
5807 information to aid in RTL simplification.
5808
5809 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5810 it uses combine specific information to aid in RTL
5811 simplification.
5812
5813 3. The routines in this file.
5814
5815
5816 Long term we want to only have one body of simplification code; to
5817 get to that state I recommend the following steps:
5818
5819 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5820 which are not pass dependent state into these routines.
5821
5822 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5823 use this routine whenever possible.
5824
5825 3. Allow for pass dependent state to be provided to these
5826 routines and add simplifications based on the pass dependent
5827 state. Remove code from cse.c & combine.c that becomes
5828 redundant/dead.
5829
5830 It will take time, but ultimately the compiler will be easier to
5831 maintain and improve. It's totally silly that when we add a
5832 simplification that it needs to be added to 4 places (3 for RTL
5833 simplification and 1 for tree simplification. */
5834
5835 rtx
5836 simplify_rtx (const_rtx x)
5837 {
5838 const enum rtx_code code = GET_CODE (x);
5839 const machine_mode mode = GET_MODE (x);
5840
5841 switch (GET_RTX_CLASS (code))
5842 {
5843 case RTX_UNARY:
5844 return simplify_unary_operation (code, mode,
5845 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5846 case RTX_COMM_ARITH:
5847 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5848 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5849
5850 /* Fall through.... */
5851
5852 case RTX_BIN_ARITH:
5853 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5854
5855 case RTX_TERNARY:
5856 case RTX_BITFIELD_OPS:
5857 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5858 XEXP (x, 0), XEXP (x, 1),
5859 XEXP (x, 2));
5860
5861 case RTX_COMPARE:
5862 case RTX_COMM_COMPARE:
5863 return simplify_relational_operation (code, mode,
5864 ((GET_MODE (XEXP (x, 0))
5865 != VOIDmode)
5866 ? GET_MODE (XEXP (x, 0))
5867 : GET_MODE (XEXP (x, 1))),
5868 XEXP (x, 0),
5869 XEXP (x, 1));
5870
5871 case RTX_EXTRA:
5872 if (code == SUBREG)
5873 return simplify_subreg (mode, SUBREG_REG (x),
5874 GET_MODE (SUBREG_REG (x)),
5875 SUBREG_BYTE (x));
5876 break;
5877
5878 case RTX_OBJ:
5879 if (code == LO_SUM)
5880 {
5881 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5882 if (GET_CODE (XEXP (x, 0)) == HIGH
5883 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5884 return XEXP (x, 1);
5885 }
5886 break;
5887
5888 default:
5889 break;
5890 }
5891 return NULL;
5892 }