target-insns.def (can_extend, ptr_extend): New targetm instruction patterns.
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2015 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "predict.h"
26 #include "rtl.h"
27 #include "alias.h"
28 #include "tree.h"
29 #include "fold-const.h"
30 #include "varasm.h"
31 #include "tm_p.h"
32 #include "regs.h"
33 #include "flags.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "insn-codes.h"
37 #include "optabs.h"
38 #include "expmed.h"
39 #include "dojump.h"
40 #include "explow.h"
41 #include "calls.h"
42 #include "emit-rtl.h"
43 #include "stmt.h"
44 #include "expr.h"
45 #include "diagnostic-core.h"
46 #include "target.h"
47
48 /* Simplification and canonicalization of RTL. */
49
50 /* Much code operates on (low, high) pairs; the low value is an
51 unsigned wide int, the high value a signed wide int. We
52 occasionally need to sign extend from low to high as if low were a
53 signed wide int. */
54 #define HWI_SIGN_EXTEND(low) \
55 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
56
57 static rtx neg_const_int (machine_mode, const_rtx);
58 static bool plus_minus_operand_p (const_rtx);
59 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
60 static rtx simplify_immed_subreg (machine_mode, rtx, machine_mode,
61 unsigned int);
62 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
63 rtx, rtx);
64 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
65 machine_mode, rtx, rtx);
66 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
67 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
68 rtx, rtx, rtx, rtx);
69 \f
70 /* Negate a CONST_INT rtx, truncating (because a conversion from a
71 maximally negative number can overflow). */
72 static rtx
73 neg_const_int (machine_mode mode, const_rtx i)
74 {
75 return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
76 }
77
78 /* Test whether expression, X, is an immediate constant that represents
79 the most significant bit of machine mode MODE. */
80
81 bool
82 mode_signbit_p (machine_mode mode, const_rtx x)
83 {
84 unsigned HOST_WIDE_INT val;
85 unsigned int width;
86
87 if (GET_MODE_CLASS (mode) != MODE_INT)
88 return false;
89
90 width = GET_MODE_PRECISION (mode);
91 if (width == 0)
92 return false;
93
94 if (width <= HOST_BITS_PER_WIDE_INT
95 && CONST_INT_P (x))
96 val = INTVAL (x);
97 #if TARGET_SUPPORTS_WIDE_INT
98 else if (CONST_WIDE_INT_P (x))
99 {
100 unsigned int i;
101 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
102 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
103 return false;
104 for (i = 0; i < elts - 1; i++)
105 if (CONST_WIDE_INT_ELT (x, i) != 0)
106 return false;
107 val = CONST_WIDE_INT_ELT (x, elts - 1);
108 width %= HOST_BITS_PER_WIDE_INT;
109 if (width == 0)
110 width = HOST_BITS_PER_WIDE_INT;
111 }
112 #else
113 else if (width <= HOST_BITS_PER_DOUBLE_INT
114 && CONST_DOUBLE_AS_INT_P (x)
115 && CONST_DOUBLE_LOW (x) == 0)
116 {
117 val = CONST_DOUBLE_HIGH (x);
118 width -= HOST_BITS_PER_WIDE_INT;
119 }
120 #endif
121 else
122 /* X is not an integer constant. */
123 return false;
124
125 if (width < HOST_BITS_PER_WIDE_INT)
126 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
127 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
128 }
129
130 /* Test whether VAL is equal to the most significant bit of mode MODE
131 (after masking with the mode mask of MODE). Returns false if the
132 precision of MODE is too large to handle. */
133
134 bool
135 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
136 {
137 unsigned int width;
138
139 if (GET_MODE_CLASS (mode) != MODE_INT)
140 return false;
141
142 width = GET_MODE_PRECISION (mode);
143 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
144 return false;
145
146 val &= GET_MODE_MASK (mode);
147 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
148 }
149
150 /* Test whether the most significant bit of mode MODE is set in VAL.
151 Returns false if the precision of MODE is too large to handle. */
152 bool
153 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
154 {
155 unsigned int width;
156
157 if (GET_MODE_CLASS (mode) != MODE_INT)
158 return false;
159
160 width = GET_MODE_PRECISION (mode);
161 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
162 return false;
163
164 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
165 return val != 0;
166 }
167
168 /* Test whether the most significant bit of mode MODE is clear in VAL.
169 Returns false if the precision of MODE is too large to handle. */
170 bool
171 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
172 {
173 unsigned int width;
174
175 if (GET_MODE_CLASS (mode) != MODE_INT)
176 return false;
177
178 width = GET_MODE_PRECISION (mode);
179 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
180 return false;
181
182 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
183 return val == 0;
184 }
185 \f
186 /* Make a binary operation by properly ordering the operands and
187 seeing if the expression folds. */
188
189 rtx
190 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
191 rtx op1)
192 {
193 rtx tem;
194
195 /* If this simplifies, do it. */
196 tem = simplify_binary_operation (code, mode, op0, op1);
197 if (tem)
198 return tem;
199
200 /* Put complex operands first and constants second if commutative. */
201 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
202 && swap_commutative_operands_p (op0, op1))
203 std::swap (op0, op1);
204
205 return gen_rtx_fmt_ee (code, mode, op0, op1);
206 }
207 \f
208 /* If X is a MEM referencing the constant pool, return the real value.
209 Otherwise return X. */
210 rtx
211 avoid_constant_pool_reference (rtx x)
212 {
213 rtx c, tmp, addr;
214 machine_mode cmode;
215 HOST_WIDE_INT offset = 0;
216
217 switch (GET_CODE (x))
218 {
219 case MEM:
220 break;
221
222 case FLOAT_EXTEND:
223 /* Handle float extensions of constant pool references. */
224 tmp = XEXP (x, 0);
225 c = avoid_constant_pool_reference (tmp);
226 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
227 {
228 REAL_VALUE_TYPE d;
229
230 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
231 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
232 }
233 return x;
234
235 default:
236 return x;
237 }
238
239 if (GET_MODE (x) == BLKmode)
240 return x;
241
242 addr = XEXP (x, 0);
243
244 /* Call target hook to avoid the effects of -fpic etc.... */
245 addr = targetm.delegitimize_address (addr);
246
247 /* Split the address into a base and integer offset. */
248 if (GET_CODE (addr) == CONST
249 && GET_CODE (XEXP (addr, 0)) == PLUS
250 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
251 {
252 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
253 addr = XEXP (XEXP (addr, 0), 0);
254 }
255
256 if (GET_CODE (addr) == LO_SUM)
257 addr = XEXP (addr, 1);
258
259 /* If this is a constant pool reference, we can turn it into its
260 constant and hope that simplifications happen. */
261 if (GET_CODE (addr) == SYMBOL_REF
262 && CONSTANT_POOL_ADDRESS_P (addr))
263 {
264 c = get_pool_constant (addr);
265 cmode = get_pool_mode (addr);
266
267 /* If we're accessing the constant in a different mode than it was
268 originally stored, attempt to fix that up via subreg simplifications.
269 If that fails we have no choice but to return the original memory. */
270 if ((offset != 0 || cmode != GET_MODE (x))
271 && offset >= 0 && offset < GET_MODE_SIZE (cmode))
272 {
273 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
274 if (tem && CONSTANT_P (tem))
275 return tem;
276 }
277 else
278 return c;
279 }
280
281 return x;
282 }
283 \f
284 /* Simplify a MEM based on its attributes. This is the default
285 delegitimize_address target hook, and it's recommended that every
286 overrider call it. */
287
288 rtx
289 delegitimize_mem_from_attrs (rtx x)
290 {
291 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
292 use their base addresses as equivalent. */
293 if (MEM_P (x)
294 && MEM_EXPR (x)
295 && MEM_OFFSET_KNOWN_P (x))
296 {
297 tree decl = MEM_EXPR (x);
298 machine_mode mode = GET_MODE (x);
299 HOST_WIDE_INT offset = 0;
300
301 switch (TREE_CODE (decl))
302 {
303 default:
304 decl = NULL;
305 break;
306
307 case VAR_DECL:
308 break;
309
310 case ARRAY_REF:
311 case ARRAY_RANGE_REF:
312 case COMPONENT_REF:
313 case BIT_FIELD_REF:
314 case REALPART_EXPR:
315 case IMAGPART_EXPR:
316 case VIEW_CONVERT_EXPR:
317 {
318 HOST_WIDE_INT bitsize, bitpos;
319 tree toffset;
320 int unsignedp, volatilep = 0;
321
322 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
323 &mode, &unsignedp, &volatilep, false);
324 if (bitsize != GET_MODE_BITSIZE (mode)
325 || (bitpos % BITS_PER_UNIT)
326 || (toffset && !tree_fits_shwi_p (toffset)))
327 decl = NULL;
328 else
329 {
330 offset += bitpos / BITS_PER_UNIT;
331 if (toffset)
332 offset += tree_to_shwi (toffset);
333 }
334 break;
335 }
336 }
337
338 if (decl
339 && mode == GET_MODE (x)
340 && TREE_CODE (decl) == VAR_DECL
341 && (TREE_STATIC (decl)
342 || DECL_THREAD_LOCAL_P (decl))
343 && DECL_RTL_SET_P (decl)
344 && MEM_P (DECL_RTL (decl)))
345 {
346 rtx newx;
347
348 offset += MEM_OFFSET (x);
349
350 newx = DECL_RTL (decl);
351
352 if (MEM_P (newx))
353 {
354 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
355
356 /* Avoid creating a new MEM needlessly if we already had
357 the same address. We do if there's no OFFSET and the
358 old address X is identical to NEWX, or if X is of the
359 form (plus NEWX OFFSET), or the NEWX is of the form
360 (plus Y (const_int Z)) and X is that with the offset
361 added: (plus Y (const_int Z+OFFSET)). */
362 if (!((offset == 0
363 || (GET_CODE (o) == PLUS
364 && GET_CODE (XEXP (o, 1)) == CONST_INT
365 && (offset == INTVAL (XEXP (o, 1))
366 || (GET_CODE (n) == PLUS
367 && GET_CODE (XEXP (n, 1)) == CONST_INT
368 && (INTVAL (XEXP (n, 1)) + offset
369 == INTVAL (XEXP (o, 1)))
370 && (n = XEXP (n, 0))))
371 && (o = XEXP (o, 0))))
372 && rtx_equal_p (o, n)))
373 x = adjust_address_nv (newx, mode, offset);
374 }
375 else if (GET_MODE (x) == GET_MODE (newx)
376 && offset == 0)
377 x = newx;
378 }
379 }
380
381 return x;
382 }
383 \f
384 /* Make a unary operation by first seeing if it folds and otherwise making
385 the specified operation. */
386
387 rtx
388 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
389 machine_mode op_mode)
390 {
391 rtx tem;
392
393 /* If this simplifies, use it. */
394 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
395 return tem;
396
397 return gen_rtx_fmt_e (code, mode, op);
398 }
399
400 /* Likewise for ternary operations. */
401
402 rtx
403 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
404 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
405 {
406 rtx tem;
407
408 /* If this simplifies, use it. */
409 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
410 op0, op1, op2)))
411 return tem;
412
413 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
414 }
415
416 /* Likewise, for relational operations.
417 CMP_MODE specifies mode comparison is done in. */
418
419 rtx
420 simplify_gen_relational (enum rtx_code code, machine_mode mode,
421 machine_mode cmp_mode, rtx op0, rtx op1)
422 {
423 rtx tem;
424
425 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
426 op0, op1)))
427 return tem;
428
429 return gen_rtx_fmt_ee (code, mode, op0, op1);
430 }
431 \f
432 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
433 and simplify the result. If FN is non-NULL, call this callback on each
434 X, if it returns non-NULL, replace X with its return value and simplify the
435 result. */
436
437 rtx
438 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
439 rtx (*fn) (rtx, const_rtx, void *), void *data)
440 {
441 enum rtx_code code = GET_CODE (x);
442 machine_mode mode = GET_MODE (x);
443 machine_mode op_mode;
444 const char *fmt;
445 rtx op0, op1, op2, newx, op;
446 rtvec vec, newvec;
447 int i, j;
448
449 if (__builtin_expect (fn != NULL, 0))
450 {
451 newx = fn (x, old_rtx, data);
452 if (newx)
453 return newx;
454 }
455 else if (rtx_equal_p (x, old_rtx))
456 return copy_rtx ((rtx) data);
457
458 switch (GET_RTX_CLASS (code))
459 {
460 case RTX_UNARY:
461 op0 = XEXP (x, 0);
462 op_mode = GET_MODE (op0);
463 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
464 if (op0 == XEXP (x, 0))
465 return x;
466 return simplify_gen_unary (code, mode, op0, op_mode);
467
468 case RTX_BIN_ARITH:
469 case RTX_COMM_ARITH:
470 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
471 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
472 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
473 return x;
474 return simplify_gen_binary (code, mode, op0, op1);
475
476 case RTX_COMPARE:
477 case RTX_COMM_COMPARE:
478 op0 = XEXP (x, 0);
479 op1 = XEXP (x, 1);
480 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
481 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
482 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
483 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
484 return x;
485 return simplify_gen_relational (code, mode, op_mode, op0, op1);
486
487 case RTX_TERNARY:
488 case RTX_BITFIELD_OPS:
489 op0 = XEXP (x, 0);
490 op_mode = GET_MODE (op0);
491 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
492 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
493 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
494 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
495 return x;
496 if (op_mode == VOIDmode)
497 op_mode = GET_MODE (op0);
498 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
499
500 case RTX_EXTRA:
501 if (code == SUBREG)
502 {
503 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
504 if (op0 == SUBREG_REG (x))
505 return x;
506 op0 = simplify_gen_subreg (GET_MODE (x), op0,
507 GET_MODE (SUBREG_REG (x)),
508 SUBREG_BYTE (x));
509 return op0 ? op0 : x;
510 }
511 break;
512
513 case RTX_OBJ:
514 if (code == MEM)
515 {
516 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
517 if (op0 == XEXP (x, 0))
518 return x;
519 return replace_equiv_address_nv (x, op0);
520 }
521 else if (code == LO_SUM)
522 {
523 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
524 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
525
526 /* (lo_sum (high x) y) -> y where x and y have the same base. */
527 if (GET_CODE (op0) == HIGH)
528 {
529 rtx base0, base1, offset0, offset1;
530 split_const (XEXP (op0, 0), &base0, &offset0);
531 split_const (op1, &base1, &offset1);
532 if (rtx_equal_p (base0, base1))
533 return op1;
534 }
535
536 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
537 return x;
538 return gen_rtx_LO_SUM (mode, op0, op1);
539 }
540 break;
541
542 default:
543 break;
544 }
545
546 newx = x;
547 fmt = GET_RTX_FORMAT (code);
548 for (i = 0; fmt[i]; i++)
549 switch (fmt[i])
550 {
551 case 'E':
552 vec = XVEC (x, i);
553 newvec = XVEC (newx, i);
554 for (j = 0; j < GET_NUM_ELEM (vec); j++)
555 {
556 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
557 old_rtx, fn, data);
558 if (op != RTVEC_ELT (vec, j))
559 {
560 if (newvec == vec)
561 {
562 newvec = shallow_copy_rtvec (vec);
563 if (x == newx)
564 newx = shallow_copy_rtx (x);
565 XVEC (newx, i) = newvec;
566 }
567 RTVEC_ELT (newvec, j) = op;
568 }
569 }
570 break;
571
572 case 'e':
573 if (XEXP (x, i))
574 {
575 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
576 if (op != XEXP (x, i))
577 {
578 if (x == newx)
579 newx = shallow_copy_rtx (x);
580 XEXP (newx, i) = op;
581 }
582 }
583 break;
584 }
585 return newx;
586 }
587
588 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
589 resulting RTX. Return a new RTX which is as simplified as possible. */
590
591 rtx
592 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
593 {
594 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
595 }
596 \f
597 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
598 Only handle cases where the truncated value is inherently an rvalue.
599
600 RTL provides two ways of truncating a value:
601
602 1. a lowpart subreg. This form is only a truncation when both
603 the outer and inner modes (here MODE and OP_MODE respectively)
604 are scalar integers, and only then when the subreg is used as
605 an rvalue.
606
607 It is only valid to form such truncating subregs if the
608 truncation requires no action by the target. The onus for
609 proving this is on the creator of the subreg -- e.g. the
610 caller to simplify_subreg or simplify_gen_subreg -- and typically
611 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
612
613 2. a TRUNCATE. This form handles both scalar and compound integers.
614
615 The first form is preferred where valid. However, the TRUNCATE
616 handling in simplify_unary_operation turns the second form into the
617 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
618 so it is generally safe to form rvalue truncations using:
619
620 simplify_gen_unary (TRUNCATE, ...)
621
622 and leave simplify_unary_operation to work out which representation
623 should be used.
624
625 Because of the proof requirements on (1), simplify_truncation must
626 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
627 regardless of whether the outer truncation came from a SUBREG or a
628 TRUNCATE. For example, if the caller has proven that an SImode
629 truncation of:
630
631 (and:DI X Y)
632
633 is a no-op and can be represented as a subreg, it does not follow
634 that SImode truncations of X and Y are also no-ops. On a target
635 like 64-bit MIPS that requires SImode values to be stored in
636 sign-extended form, an SImode truncation of:
637
638 (and:DI (reg:DI X) (const_int 63))
639
640 is trivially a no-op because only the lower 6 bits can be set.
641 However, X is still an arbitrary 64-bit number and so we cannot
642 assume that truncating it too is a no-op. */
643
644 static rtx
645 simplify_truncation (machine_mode mode, rtx op,
646 machine_mode op_mode)
647 {
648 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
649 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
650 gcc_assert (precision <= op_precision);
651
652 /* Optimize truncations of zero and sign extended values. */
653 if (GET_CODE (op) == ZERO_EXTEND
654 || GET_CODE (op) == SIGN_EXTEND)
655 {
656 /* There are three possibilities. If MODE is the same as the
657 origmode, we can omit both the extension and the subreg.
658 If MODE is not larger than the origmode, we can apply the
659 truncation without the extension. Finally, if the outermode
660 is larger than the origmode, we can just extend to the appropriate
661 mode. */
662 machine_mode origmode = GET_MODE (XEXP (op, 0));
663 if (mode == origmode)
664 return XEXP (op, 0);
665 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
666 return simplify_gen_unary (TRUNCATE, mode,
667 XEXP (op, 0), origmode);
668 else
669 return simplify_gen_unary (GET_CODE (op), mode,
670 XEXP (op, 0), origmode);
671 }
672
673 /* If the machine can perform operations in the truncated mode, distribute
674 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
675 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
676 if (1
677 && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD)
678 && (GET_CODE (op) == PLUS
679 || GET_CODE (op) == MINUS
680 || GET_CODE (op) == MULT))
681 {
682 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
683 if (op0)
684 {
685 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
686 if (op1)
687 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
688 }
689 }
690
691 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
692 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
693 the outer subreg is effectively a truncation to the original mode. */
694 if ((GET_CODE (op) == LSHIFTRT
695 || GET_CODE (op) == ASHIFTRT)
696 /* Ensure that OP_MODE is at least twice as wide as MODE
697 to avoid the possibility that an outer LSHIFTRT shifts by more
698 than the sign extension's sign_bit_copies and introduces zeros
699 into the high bits of the result. */
700 && 2 * precision <= op_precision
701 && CONST_INT_P (XEXP (op, 1))
702 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
703 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
704 && UINTVAL (XEXP (op, 1)) < precision)
705 return simplify_gen_binary (ASHIFTRT, mode,
706 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
707
708 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
709 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
710 the outer subreg is effectively a truncation to the original mode. */
711 if ((GET_CODE (op) == LSHIFTRT
712 || GET_CODE (op) == ASHIFTRT)
713 && CONST_INT_P (XEXP (op, 1))
714 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
715 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
716 && UINTVAL (XEXP (op, 1)) < precision)
717 return simplify_gen_binary (LSHIFTRT, mode,
718 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
719
720 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
721 to (ashift:QI (x:QI) C), where C is a suitable small constant and
722 the outer subreg is effectively a truncation to the original mode. */
723 if (GET_CODE (op) == ASHIFT
724 && CONST_INT_P (XEXP (op, 1))
725 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
726 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
727 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
728 && UINTVAL (XEXP (op, 1)) < precision)
729 return simplify_gen_binary (ASHIFT, mode,
730 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
731
732 /* Recognize a word extraction from a multi-word subreg. */
733 if ((GET_CODE (op) == LSHIFTRT
734 || GET_CODE (op) == ASHIFTRT)
735 && SCALAR_INT_MODE_P (mode)
736 && SCALAR_INT_MODE_P (op_mode)
737 && precision >= BITS_PER_WORD
738 && 2 * precision <= op_precision
739 && CONST_INT_P (XEXP (op, 1))
740 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
741 && UINTVAL (XEXP (op, 1)) < op_precision)
742 {
743 int byte = subreg_lowpart_offset (mode, op_mode);
744 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
745 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
746 (WORDS_BIG_ENDIAN
747 ? byte - shifted_bytes
748 : byte + shifted_bytes));
749 }
750
751 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
752 and try replacing the TRUNCATE and shift with it. Don't do this
753 if the MEM has a mode-dependent address. */
754 if ((GET_CODE (op) == LSHIFTRT
755 || GET_CODE (op) == ASHIFTRT)
756 && SCALAR_INT_MODE_P (op_mode)
757 && MEM_P (XEXP (op, 0))
758 && CONST_INT_P (XEXP (op, 1))
759 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
760 && INTVAL (XEXP (op, 1)) > 0
761 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
762 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
763 MEM_ADDR_SPACE (XEXP (op, 0)))
764 && ! MEM_VOLATILE_P (XEXP (op, 0))
765 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
766 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
767 {
768 int byte = subreg_lowpart_offset (mode, op_mode);
769 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
770 return adjust_address_nv (XEXP (op, 0), mode,
771 (WORDS_BIG_ENDIAN
772 ? byte - shifted_bytes
773 : byte + shifted_bytes));
774 }
775
776 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
777 (OP:SI foo:SI) if OP is NEG or ABS. */
778 if ((GET_CODE (op) == ABS
779 || GET_CODE (op) == NEG)
780 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
781 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
782 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
783 return simplify_gen_unary (GET_CODE (op), mode,
784 XEXP (XEXP (op, 0), 0), mode);
785
786 /* (truncate:A (subreg:B (truncate:C X) 0)) is
787 (truncate:A X). */
788 if (GET_CODE (op) == SUBREG
789 && SCALAR_INT_MODE_P (mode)
790 && SCALAR_INT_MODE_P (op_mode)
791 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
792 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
793 && subreg_lowpart_p (op))
794 {
795 rtx inner = XEXP (SUBREG_REG (op), 0);
796 if (GET_MODE_PRECISION (mode)
797 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
798 return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
799 else
800 /* If subreg above is paradoxical and C is narrower
801 than A, return (subreg:A (truncate:C X) 0). */
802 return simplify_gen_subreg (mode, SUBREG_REG (op),
803 GET_MODE (SUBREG_REG (op)), 0);
804 }
805
806 /* (truncate:A (truncate:B X)) is (truncate:A X). */
807 if (GET_CODE (op) == TRUNCATE)
808 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
809 GET_MODE (XEXP (op, 0)));
810
811 return NULL_RTX;
812 }
813 \f
814 /* Try to simplify a unary operation CODE whose output mode is to be
815 MODE with input operand OP whose mode was originally OP_MODE.
816 Return zero if no simplification can be made. */
817 rtx
818 simplify_unary_operation (enum rtx_code code, machine_mode mode,
819 rtx op, machine_mode op_mode)
820 {
821 rtx trueop, tem;
822
823 trueop = avoid_constant_pool_reference (op);
824
825 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
826 if (tem)
827 return tem;
828
829 return simplify_unary_operation_1 (code, mode, op);
830 }
831
832 /* Perform some simplifications we can do even if the operands
833 aren't constant. */
834 static rtx
835 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
836 {
837 enum rtx_code reversed;
838 rtx temp;
839
840 switch (code)
841 {
842 case NOT:
843 /* (not (not X)) == X. */
844 if (GET_CODE (op) == NOT)
845 return XEXP (op, 0);
846
847 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
848 comparison is all ones. */
849 if (COMPARISON_P (op)
850 && (mode == BImode || STORE_FLAG_VALUE == -1)
851 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
852 return simplify_gen_relational (reversed, mode, VOIDmode,
853 XEXP (op, 0), XEXP (op, 1));
854
855 /* (not (plus X -1)) can become (neg X). */
856 if (GET_CODE (op) == PLUS
857 && XEXP (op, 1) == constm1_rtx)
858 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
859
860 /* Similarly, (not (neg X)) is (plus X -1). */
861 if (GET_CODE (op) == NEG)
862 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
863 CONSTM1_RTX (mode));
864
865 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
866 if (GET_CODE (op) == XOR
867 && CONST_INT_P (XEXP (op, 1))
868 && (temp = simplify_unary_operation (NOT, mode,
869 XEXP (op, 1), mode)) != 0)
870 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
871
872 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
873 if (GET_CODE (op) == PLUS
874 && CONST_INT_P (XEXP (op, 1))
875 && mode_signbit_p (mode, XEXP (op, 1))
876 && (temp = simplify_unary_operation (NOT, mode,
877 XEXP (op, 1), mode)) != 0)
878 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
879
880
881 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
882 operands other than 1, but that is not valid. We could do a
883 similar simplification for (not (lshiftrt C X)) where C is
884 just the sign bit, but this doesn't seem common enough to
885 bother with. */
886 if (GET_CODE (op) == ASHIFT
887 && XEXP (op, 0) == const1_rtx)
888 {
889 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
890 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
891 }
892
893 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
894 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
895 so we can perform the above simplification. */
896 if (STORE_FLAG_VALUE == -1
897 && GET_CODE (op) == ASHIFTRT
898 && CONST_INT_P (XEXP (op, 1))
899 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
900 return simplify_gen_relational (GE, mode, VOIDmode,
901 XEXP (op, 0), const0_rtx);
902
903
904 if (GET_CODE (op) == SUBREG
905 && subreg_lowpart_p (op)
906 && (GET_MODE_SIZE (GET_MODE (op))
907 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
908 && GET_CODE (SUBREG_REG (op)) == ASHIFT
909 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
910 {
911 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
912 rtx x;
913
914 x = gen_rtx_ROTATE (inner_mode,
915 simplify_gen_unary (NOT, inner_mode, const1_rtx,
916 inner_mode),
917 XEXP (SUBREG_REG (op), 1));
918 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
919 if (temp)
920 return temp;
921 }
922
923 /* Apply De Morgan's laws to reduce number of patterns for machines
924 with negating logical insns (and-not, nand, etc.). If result has
925 only one NOT, put it first, since that is how the patterns are
926 coded. */
927 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
928 {
929 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
930 machine_mode op_mode;
931
932 op_mode = GET_MODE (in1);
933 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
934
935 op_mode = GET_MODE (in2);
936 if (op_mode == VOIDmode)
937 op_mode = mode;
938 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
939
940 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
941 std::swap (in1, in2);
942
943 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
944 mode, in1, in2);
945 }
946
947 /* (not (bswap x)) -> (bswap (not x)). */
948 if (GET_CODE (op) == BSWAP)
949 {
950 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
951 return simplify_gen_unary (BSWAP, mode, x, mode);
952 }
953 break;
954
955 case NEG:
956 /* (neg (neg X)) == X. */
957 if (GET_CODE (op) == NEG)
958 return XEXP (op, 0);
959
960 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
961 If comparison is not reversible use
962 x ? y : (neg y). */
963 if (GET_CODE (op) == IF_THEN_ELSE)
964 {
965 rtx cond = XEXP (op, 0);
966 rtx true_rtx = XEXP (op, 1);
967 rtx false_rtx = XEXP (op, 2);
968
969 if ((GET_CODE (true_rtx) == NEG
970 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
971 || (GET_CODE (false_rtx) == NEG
972 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)))
973 {
974 if (reversed_comparison_code (cond, NULL_RTX) != UNKNOWN)
975 temp = reversed_comparison (cond, mode);
976 else
977 {
978 temp = cond;
979 std::swap (true_rtx, false_rtx);
980 }
981 return simplify_gen_ternary (IF_THEN_ELSE, mode,
982 mode, temp, true_rtx, false_rtx);
983 }
984 }
985
986 /* (neg (plus X 1)) can become (not X). */
987 if (GET_CODE (op) == PLUS
988 && XEXP (op, 1) == const1_rtx)
989 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
990
991 /* Similarly, (neg (not X)) is (plus X 1). */
992 if (GET_CODE (op) == NOT)
993 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
994 CONST1_RTX (mode));
995
996 /* (neg (minus X Y)) can become (minus Y X). This transformation
997 isn't safe for modes with signed zeros, since if X and Y are
998 both +0, (minus Y X) is the same as (minus X Y). If the
999 rounding mode is towards +infinity (or -infinity) then the two
1000 expressions will be rounded differently. */
1001 if (GET_CODE (op) == MINUS
1002 && !HONOR_SIGNED_ZEROS (mode)
1003 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1004 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
1005
1006 if (GET_CODE (op) == PLUS
1007 && !HONOR_SIGNED_ZEROS (mode)
1008 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1009 {
1010 /* (neg (plus A C)) is simplified to (minus -C A). */
1011 if (CONST_SCALAR_INT_P (XEXP (op, 1))
1012 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
1013 {
1014 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1015 if (temp)
1016 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1017 }
1018
1019 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1020 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1021 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1022 }
1023
1024 /* (neg (mult A B)) becomes (mult A (neg B)).
1025 This works even for floating-point values. */
1026 if (GET_CODE (op) == MULT
1027 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1028 {
1029 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1030 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1031 }
1032
1033 /* NEG commutes with ASHIFT since it is multiplication. Only do
1034 this if we can then eliminate the NEG (e.g., if the operand
1035 is a constant). */
1036 if (GET_CODE (op) == ASHIFT)
1037 {
1038 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1039 if (temp)
1040 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1041 }
1042
1043 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1044 C is equal to the width of MODE minus 1. */
1045 if (GET_CODE (op) == ASHIFTRT
1046 && CONST_INT_P (XEXP (op, 1))
1047 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1048 return simplify_gen_binary (LSHIFTRT, mode,
1049 XEXP (op, 0), XEXP (op, 1));
1050
1051 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1052 C is equal to the width of MODE minus 1. */
1053 if (GET_CODE (op) == LSHIFTRT
1054 && CONST_INT_P (XEXP (op, 1))
1055 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1056 return simplify_gen_binary (ASHIFTRT, mode,
1057 XEXP (op, 0), XEXP (op, 1));
1058
1059 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1060 if (GET_CODE (op) == XOR
1061 && XEXP (op, 1) == const1_rtx
1062 && nonzero_bits (XEXP (op, 0), mode) == 1)
1063 return plus_constant (mode, XEXP (op, 0), -1);
1064
1065 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1066 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1067 if (GET_CODE (op) == LT
1068 && XEXP (op, 1) == const0_rtx
1069 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
1070 {
1071 machine_mode inner = GET_MODE (XEXP (op, 0));
1072 int isize = GET_MODE_PRECISION (inner);
1073 if (STORE_FLAG_VALUE == 1)
1074 {
1075 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1076 GEN_INT (isize - 1));
1077 if (mode == inner)
1078 return temp;
1079 if (GET_MODE_PRECISION (mode) > isize)
1080 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1081 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1082 }
1083 else if (STORE_FLAG_VALUE == -1)
1084 {
1085 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1086 GEN_INT (isize - 1));
1087 if (mode == inner)
1088 return temp;
1089 if (GET_MODE_PRECISION (mode) > isize)
1090 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1091 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1092 }
1093 }
1094 break;
1095
1096 case TRUNCATE:
1097 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1098 with the umulXi3_highpart patterns. */
1099 if (GET_CODE (op) == LSHIFTRT
1100 && GET_CODE (XEXP (op, 0)) == MULT)
1101 break;
1102
1103 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1104 {
1105 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1106 {
1107 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1108 if (temp)
1109 return temp;
1110 }
1111 /* We can't handle truncation to a partial integer mode here
1112 because we don't know the real bitsize of the partial
1113 integer mode. */
1114 break;
1115 }
1116
1117 if (GET_MODE (op) != VOIDmode)
1118 {
1119 temp = simplify_truncation (mode, op, GET_MODE (op));
1120 if (temp)
1121 return temp;
1122 }
1123
1124 /* If we know that the value is already truncated, we can
1125 replace the TRUNCATE with a SUBREG. */
1126 if (GET_MODE_NUNITS (mode) == 1
1127 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1128 || truncated_to_mode (mode, op)))
1129 {
1130 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1131 if (temp)
1132 return temp;
1133 }
1134
1135 /* A truncate of a comparison can be replaced with a subreg if
1136 STORE_FLAG_VALUE permits. This is like the previous test,
1137 but it works even if the comparison is done in a mode larger
1138 than HOST_BITS_PER_WIDE_INT. */
1139 if (HWI_COMPUTABLE_MODE_P (mode)
1140 && COMPARISON_P (op)
1141 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1142 {
1143 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1144 if (temp)
1145 return temp;
1146 }
1147
1148 /* A truncate of a memory is just loading the low part of the memory
1149 if we are not changing the meaning of the address. */
1150 if (GET_CODE (op) == MEM
1151 && !VECTOR_MODE_P (mode)
1152 && !MEM_VOLATILE_P (op)
1153 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1154 {
1155 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1156 if (temp)
1157 return temp;
1158 }
1159
1160 break;
1161
1162 case FLOAT_TRUNCATE:
1163 if (DECIMAL_FLOAT_MODE_P (mode))
1164 break;
1165
1166 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1167 if (GET_CODE (op) == FLOAT_EXTEND
1168 && GET_MODE (XEXP (op, 0)) == mode)
1169 return XEXP (op, 0);
1170
1171 /* (float_truncate:SF (float_truncate:DF foo:XF))
1172 = (float_truncate:SF foo:XF).
1173 This may eliminate double rounding, so it is unsafe.
1174
1175 (float_truncate:SF (float_extend:XF foo:DF))
1176 = (float_truncate:SF foo:DF).
1177
1178 (float_truncate:DF (float_extend:XF foo:SF))
1179 = (float_extend:DF foo:SF). */
1180 if ((GET_CODE (op) == FLOAT_TRUNCATE
1181 && flag_unsafe_math_optimizations)
1182 || GET_CODE (op) == FLOAT_EXTEND)
1183 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1184 0)))
1185 > GET_MODE_SIZE (mode)
1186 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1187 mode,
1188 XEXP (op, 0), mode);
1189
1190 /* (float_truncate (float x)) is (float x) */
1191 if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1192 && (flag_unsafe_math_optimizations
1193 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
1194 && ((unsigned)significand_size (GET_MODE (op))
1195 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1196 - num_sign_bit_copies (XEXP (op, 0),
1197 GET_MODE (XEXP (op, 0))))))))
1198 return simplify_gen_unary (GET_CODE (op), mode,
1199 XEXP (op, 0),
1200 GET_MODE (XEXP (op, 0)));
1201
1202 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1203 (OP:SF foo:SF) if OP is NEG or ABS. */
1204 if ((GET_CODE (op) == ABS
1205 || GET_CODE (op) == NEG)
1206 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1207 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1208 return simplify_gen_unary (GET_CODE (op), mode,
1209 XEXP (XEXP (op, 0), 0), mode);
1210
1211 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1212 is (float_truncate:SF x). */
1213 if (GET_CODE (op) == SUBREG
1214 && subreg_lowpart_p (op)
1215 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1216 return SUBREG_REG (op);
1217 break;
1218
1219 case FLOAT_EXTEND:
1220 if (DECIMAL_FLOAT_MODE_P (mode))
1221 break;
1222
1223 /* (float_extend (float_extend x)) is (float_extend x)
1224
1225 (float_extend (float x)) is (float x) assuming that double
1226 rounding can't happen.
1227 */
1228 if (GET_CODE (op) == FLOAT_EXTEND
1229 || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1230 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1231 && ((unsigned)significand_size (GET_MODE (op))
1232 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1233 - num_sign_bit_copies (XEXP (op, 0),
1234 GET_MODE (XEXP (op, 0)))))))
1235 return simplify_gen_unary (GET_CODE (op), mode,
1236 XEXP (op, 0),
1237 GET_MODE (XEXP (op, 0)));
1238
1239 break;
1240
1241 case ABS:
1242 /* (abs (neg <foo>)) -> (abs <foo>) */
1243 if (GET_CODE (op) == NEG)
1244 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1245 GET_MODE (XEXP (op, 0)));
1246
1247 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1248 do nothing. */
1249 if (GET_MODE (op) == VOIDmode)
1250 break;
1251
1252 /* If operand is something known to be positive, ignore the ABS. */
1253 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1254 || val_signbit_known_clear_p (GET_MODE (op),
1255 nonzero_bits (op, GET_MODE (op))))
1256 return op;
1257
1258 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1259 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1260 return gen_rtx_NEG (mode, op);
1261
1262 break;
1263
1264 case FFS:
1265 /* (ffs (*_extend <X>)) = (ffs <X>) */
1266 if (GET_CODE (op) == SIGN_EXTEND
1267 || GET_CODE (op) == ZERO_EXTEND)
1268 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1269 GET_MODE (XEXP (op, 0)));
1270 break;
1271
1272 case POPCOUNT:
1273 switch (GET_CODE (op))
1274 {
1275 case BSWAP:
1276 case ZERO_EXTEND:
1277 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1278 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1279 GET_MODE (XEXP (op, 0)));
1280
1281 case ROTATE:
1282 case ROTATERT:
1283 /* Rotations don't affect popcount. */
1284 if (!side_effects_p (XEXP (op, 1)))
1285 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1286 GET_MODE (XEXP (op, 0)));
1287 break;
1288
1289 default:
1290 break;
1291 }
1292 break;
1293
1294 case PARITY:
1295 switch (GET_CODE (op))
1296 {
1297 case NOT:
1298 case BSWAP:
1299 case ZERO_EXTEND:
1300 case SIGN_EXTEND:
1301 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1302 GET_MODE (XEXP (op, 0)));
1303
1304 case ROTATE:
1305 case ROTATERT:
1306 /* Rotations don't affect parity. */
1307 if (!side_effects_p (XEXP (op, 1)))
1308 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1309 GET_MODE (XEXP (op, 0)));
1310 break;
1311
1312 default:
1313 break;
1314 }
1315 break;
1316
1317 case BSWAP:
1318 /* (bswap (bswap x)) -> x. */
1319 if (GET_CODE (op) == BSWAP)
1320 return XEXP (op, 0);
1321 break;
1322
1323 case FLOAT:
1324 /* (float (sign_extend <X>)) = (float <X>). */
1325 if (GET_CODE (op) == SIGN_EXTEND)
1326 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1327 GET_MODE (XEXP (op, 0)));
1328 break;
1329
1330 case SIGN_EXTEND:
1331 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1332 becomes just the MINUS if its mode is MODE. This allows
1333 folding switch statements on machines using casesi (such as
1334 the VAX). */
1335 if (GET_CODE (op) == TRUNCATE
1336 && GET_MODE (XEXP (op, 0)) == mode
1337 && GET_CODE (XEXP (op, 0)) == MINUS
1338 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1339 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1340 return XEXP (op, 0);
1341
1342 /* Extending a widening multiplication should be canonicalized to
1343 a wider widening multiplication. */
1344 if (GET_CODE (op) == MULT)
1345 {
1346 rtx lhs = XEXP (op, 0);
1347 rtx rhs = XEXP (op, 1);
1348 enum rtx_code lcode = GET_CODE (lhs);
1349 enum rtx_code rcode = GET_CODE (rhs);
1350
1351 /* Widening multiplies usually extend both operands, but sometimes
1352 they use a shift to extract a portion of a register. */
1353 if ((lcode == SIGN_EXTEND
1354 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1355 && (rcode == SIGN_EXTEND
1356 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1357 {
1358 machine_mode lmode = GET_MODE (lhs);
1359 machine_mode rmode = GET_MODE (rhs);
1360 int bits;
1361
1362 if (lcode == ASHIFTRT)
1363 /* Number of bits not shifted off the end. */
1364 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1365 else /* lcode == SIGN_EXTEND */
1366 /* Size of inner mode. */
1367 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1368
1369 if (rcode == ASHIFTRT)
1370 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1371 else /* rcode == SIGN_EXTEND */
1372 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1373
1374 /* We can only widen multiplies if the result is mathematiclly
1375 equivalent. I.e. if overflow was impossible. */
1376 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1377 return simplify_gen_binary
1378 (MULT, mode,
1379 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1380 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1381 }
1382 }
1383
1384 /* Check for a sign extension of a subreg of a promoted
1385 variable, where the promotion is sign-extended, and the
1386 target mode is the same as the variable's promotion. */
1387 if (GET_CODE (op) == SUBREG
1388 && SUBREG_PROMOTED_VAR_P (op)
1389 && SUBREG_PROMOTED_SIGNED_P (op)
1390 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1391 {
1392 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1393 if (temp)
1394 return temp;
1395 }
1396
1397 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1398 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1399 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1400 {
1401 gcc_assert (GET_MODE_PRECISION (mode)
1402 > GET_MODE_PRECISION (GET_MODE (op)));
1403 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1404 GET_MODE (XEXP (op, 0)));
1405 }
1406
1407 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1408 is (sign_extend:M (subreg:O <X>)) if there is mode with
1409 GET_MODE_BITSIZE (N) - I bits.
1410 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1411 is similarly (zero_extend:M (subreg:O <X>)). */
1412 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1413 && GET_CODE (XEXP (op, 0)) == ASHIFT
1414 && CONST_INT_P (XEXP (op, 1))
1415 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1416 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1417 {
1418 machine_mode tmode
1419 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1420 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1421 gcc_assert (GET_MODE_BITSIZE (mode)
1422 > GET_MODE_BITSIZE (GET_MODE (op)));
1423 if (tmode != BLKmode)
1424 {
1425 rtx inner =
1426 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1427 if (inner)
1428 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1429 ? SIGN_EXTEND : ZERO_EXTEND,
1430 mode, inner, tmode);
1431 }
1432 }
1433
1434 #if defined(POINTERS_EXTEND_UNSIGNED)
1435 /* As we do not know which address space the pointer is referring to,
1436 we can do this only if the target does not support different pointer
1437 or address modes depending on the address space. */
1438 if (target_default_pointer_address_modes_p ()
1439 && ! POINTERS_EXTEND_UNSIGNED
1440 && mode == Pmode && GET_MODE (op) == ptr_mode
1441 && (CONSTANT_P (op)
1442 || (GET_CODE (op) == SUBREG
1443 && REG_P (SUBREG_REG (op))
1444 && REG_POINTER (SUBREG_REG (op))
1445 && GET_MODE (SUBREG_REG (op)) == Pmode))
1446 && !targetm.have_ptr_extend ())
1447 return convert_memory_address (Pmode, op);
1448 #endif
1449 break;
1450
1451 case ZERO_EXTEND:
1452 /* Check for a zero extension of a subreg of a promoted
1453 variable, where the promotion is zero-extended, and the
1454 target mode is the same as the variable's promotion. */
1455 if (GET_CODE (op) == SUBREG
1456 && SUBREG_PROMOTED_VAR_P (op)
1457 && SUBREG_PROMOTED_UNSIGNED_P (op)
1458 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1459 {
1460 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1461 if (temp)
1462 return temp;
1463 }
1464
1465 /* Extending a widening multiplication should be canonicalized to
1466 a wider widening multiplication. */
1467 if (GET_CODE (op) == MULT)
1468 {
1469 rtx lhs = XEXP (op, 0);
1470 rtx rhs = XEXP (op, 1);
1471 enum rtx_code lcode = GET_CODE (lhs);
1472 enum rtx_code rcode = GET_CODE (rhs);
1473
1474 /* Widening multiplies usually extend both operands, but sometimes
1475 they use a shift to extract a portion of a register. */
1476 if ((lcode == ZERO_EXTEND
1477 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1478 && (rcode == ZERO_EXTEND
1479 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1480 {
1481 machine_mode lmode = GET_MODE (lhs);
1482 machine_mode rmode = GET_MODE (rhs);
1483 int bits;
1484
1485 if (lcode == LSHIFTRT)
1486 /* Number of bits not shifted off the end. */
1487 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1488 else /* lcode == ZERO_EXTEND */
1489 /* Size of inner mode. */
1490 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1491
1492 if (rcode == LSHIFTRT)
1493 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1494 else /* rcode == ZERO_EXTEND */
1495 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1496
1497 /* We can only widen multiplies if the result is mathematiclly
1498 equivalent. I.e. if overflow was impossible. */
1499 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1500 return simplify_gen_binary
1501 (MULT, mode,
1502 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1503 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1504 }
1505 }
1506
1507 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1508 if (GET_CODE (op) == ZERO_EXTEND)
1509 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1510 GET_MODE (XEXP (op, 0)));
1511
1512 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1513 is (zero_extend:M (subreg:O <X>)) if there is mode with
1514 GET_MODE_PRECISION (N) - I bits. */
1515 if (GET_CODE (op) == LSHIFTRT
1516 && GET_CODE (XEXP (op, 0)) == ASHIFT
1517 && CONST_INT_P (XEXP (op, 1))
1518 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1519 && GET_MODE_PRECISION (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1520 {
1521 machine_mode tmode
1522 = mode_for_size (GET_MODE_PRECISION (GET_MODE (op))
1523 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1524 if (tmode != BLKmode)
1525 {
1526 rtx inner =
1527 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1528 if (inner)
1529 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1530 }
1531 }
1532
1533 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1534 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1535 of mode N. E.g.
1536 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1537 (and:SI (reg:SI) (const_int 63)). */
1538 if (GET_CODE (op) == SUBREG
1539 && GET_MODE_PRECISION (GET_MODE (op))
1540 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1541 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1542 <= HOST_BITS_PER_WIDE_INT
1543 && GET_MODE_PRECISION (mode)
1544 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1545 && subreg_lowpart_p (op)
1546 && (nonzero_bits (SUBREG_REG (op), GET_MODE (SUBREG_REG (op)))
1547 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1548 {
1549 if (GET_MODE_PRECISION (mode)
1550 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
1551 return SUBREG_REG (op);
1552 return simplify_gen_unary (ZERO_EXTEND, mode, SUBREG_REG (op),
1553 GET_MODE (SUBREG_REG (op)));
1554 }
1555
1556 #if defined(POINTERS_EXTEND_UNSIGNED)
1557 /* As we do not know which address space the pointer is referring to,
1558 we can do this only if the target does not support different pointer
1559 or address modes depending on the address space. */
1560 if (target_default_pointer_address_modes_p ()
1561 && POINTERS_EXTEND_UNSIGNED > 0
1562 && mode == Pmode && GET_MODE (op) == ptr_mode
1563 && (CONSTANT_P (op)
1564 || (GET_CODE (op) == SUBREG
1565 && REG_P (SUBREG_REG (op))
1566 && REG_POINTER (SUBREG_REG (op))
1567 && GET_MODE (SUBREG_REG (op)) == Pmode))
1568 && !targetm.have_ptr_extend ())
1569 return convert_memory_address (Pmode, op);
1570 #endif
1571 break;
1572
1573 default:
1574 break;
1575 }
1576
1577 return 0;
1578 }
1579
1580 /* Try to compute the value of a unary operation CODE whose output mode is to
1581 be MODE with input operand OP whose mode was originally OP_MODE.
1582 Return zero if the value cannot be computed. */
1583 rtx
1584 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1585 rtx op, machine_mode op_mode)
1586 {
1587 unsigned int width = GET_MODE_PRECISION (mode);
1588
1589 if (code == VEC_DUPLICATE)
1590 {
1591 gcc_assert (VECTOR_MODE_P (mode));
1592 if (GET_MODE (op) != VOIDmode)
1593 {
1594 if (!VECTOR_MODE_P (GET_MODE (op)))
1595 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1596 else
1597 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1598 (GET_MODE (op)));
1599 }
1600 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1601 || GET_CODE (op) == CONST_VECTOR)
1602 {
1603 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1604 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1605 rtvec v = rtvec_alloc (n_elts);
1606 unsigned int i;
1607
1608 if (GET_CODE (op) != CONST_VECTOR)
1609 for (i = 0; i < n_elts; i++)
1610 RTVEC_ELT (v, i) = op;
1611 else
1612 {
1613 machine_mode inmode = GET_MODE (op);
1614 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1615 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1616
1617 gcc_assert (in_n_elts < n_elts);
1618 gcc_assert ((n_elts % in_n_elts) == 0);
1619 for (i = 0; i < n_elts; i++)
1620 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1621 }
1622 return gen_rtx_CONST_VECTOR (mode, v);
1623 }
1624 }
1625
1626 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1627 {
1628 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1629 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1630 machine_mode opmode = GET_MODE (op);
1631 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1632 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1633 rtvec v = rtvec_alloc (n_elts);
1634 unsigned int i;
1635
1636 gcc_assert (op_n_elts == n_elts);
1637 for (i = 0; i < n_elts; i++)
1638 {
1639 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1640 CONST_VECTOR_ELT (op, i),
1641 GET_MODE_INNER (opmode));
1642 if (!x)
1643 return 0;
1644 RTVEC_ELT (v, i) = x;
1645 }
1646 return gen_rtx_CONST_VECTOR (mode, v);
1647 }
1648
1649 /* The order of these tests is critical so that, for example, we don't
1650 check the wrong mode (input vs. output) for a conversion operation,
1651 such as FIX. At some point, this should be simplified. */
1652
1653 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1654 {
1655 REAL_VALUE_TYPE d;
1656
1657 if (op_mode == VOIDmode)
1658 {
1659 /* CONST_INT have VOIDmode as the mode. We assume that all
1660 the bits of the constant are significant, though, this is
1661 a dangerous assumption as many times CONST_INTs are
1662 created and used with garbage in the bits outside of the
1663 precision of the implied mode of the const_int. */
1664 op_mode = MAX_MODE_INT;
1665 }
1666
1667 real_from_integer (&d, mode, std::make_pair (op, op_mode), SIGNED);
1668 d = real_value_truncate (mode, d);
1669 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1670 }
1671 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1672 {
1673 REAL_VALUE_TYPE d;
1674
1675 if (op_mode == VOIDmode)
1676 {
1677 /* CONST_INT have VOIDmode as the mode. We assume that all
1678 the bits of the constant are significant, though, this is
1679 a dangerous assumption as many times CONST_INTs are
1680 created and used with garbage in the bits outside of the
1681 precision of the implied mode of the const_int. */
1682 op_mode = MAX_MODE_INT;
1683 }
1684
1685 real_from_integer (&d, mode, std::make_pair (op, op_mode), UNSIGNED);
1686 d = real_value_truncate (mode, d);
1687 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1688 }
1689
1690 if (CONST_SCALAR_INT_P (op) && width > 0)
1691 {
1692 wide_int result;
1693 machine_mode imode = op_mode == VOIDmode ? mode : op_mode;
1694 rtx_mode_t op0 = std::make_pair (op, imode);
1695 int int_value;
1696
1697 #if TARGET_SUPPORTS_WIDE_INT == 0
1698 /* This assert keeps the simplification from producing a result
1699 that cannot be represented in a CONST_DOUBLE but a lot of
1700 upstream callers expect that this function never fails to
1701 simplify something and so you if you added this to the test
1702 above the code would die later anyway. If this assert
1703 happens, you just need to make the port support wide int. */
1704 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1705 #endif
1706
1707 switch (code)
1708 {
1709 case NOT:
1710 result = wi::bit_not (op0);
1711 break;
1712
1713 case NEG:
1714 result = wi::neg (op0);
1715 break;
1716
1717 case ABS:
1718 result = wi::abs (op0);
1719 break;
1720
1721 case FFS:
1722 result = wi::shwi (wi::ffs (op0), mode);
1723 break;
1724
1725 case CLZ:
1726 if (wi::ne_p (op0, 0))
1727 int_value = wi::clz (op0);
1728 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1729 int_value = GET_MODE_PRECISION (mode);
1730 result = wi::shwi (int_value, mode);
1731 break;
1732
1733 case CLRSB:
1734 result = wi::shwi (wi::clrsb (op0), mode);
1735 break;
1736
1737 case CTZ:
1738 if (wi::ne_p (op0, 0))
1739 int_value = wi::ctz (op0);
1740 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1741 int_value = GET_MODE_PRECISION (mode);
1742 result = wi::shwi (int_value, mode);
1743 break;
1744
1745 case POPCOUNT:
1746 result = wi::shwi (wi::popcount (op0), mode);
1747 break;
1748
1749 case PARITY:
1750 result = wi::shwi (wi::parity (op0), mode);
1751 break;
1752
1753 case BSWAP:
1754 result = wide_int (op0).bswap ();
1755 break;
1756
1757 case TRUNCATE:
1758 case ZERO_EXTEND:
1759 result = wide_int::from (op0, width, UNSIGNED);
1760 break;
1761
1762 case SIGN_EXTEND:
1763 result = wide_int::from (op0, width, SIGNED);
1764 break;
1765
1766 case SQRT:
1767 default:
1768 return 0;
1769 }
1770
1771 return immed_wide_int_const (result, mode);
1772 }
1773
1774 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1775 && SCALAR_FLOAT_MODE_P (mode)
1776 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1777 {
1778 REAL_VALUE_TYPE d;
1779 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1780
1781 switch (code)
1782 {
1783 case SQRT:
1784 return 0;
1785 case ABS:
1786 d = real_value_abs (&d);
1787 break;
1788 case NEG:
1789 d = real_value_negate (&d);
1790 break;
1791 case FLOAT_TRUNCATE:
1792 d = real_value_truncate (mode, d);
1793 break;
1794 case FLOAT_EXTEND:
1795 /* All this does is change the mode, unless changing
1796 mode class. */
1797 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1798 real_convert (&d, mode, &d);
1799 break;
1800 case FIX:
1801 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1802 break;
1803 case NOT:
1804 {
1805 long tmp[4];
1806 int i;
1807
1808 real_to_target (tmp, &d, GET_MODE (op));
1809 for (i = 0; i < 4; i++)
1810 tmp[i] = ~tmp[i];
1811 real_from_target (&d, tmp, mode);
1812 break;
1813 }
1814 default:
1815 gcc_unreachable ();
1816 }
1817 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1818 }
1819 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1820 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1821 && GET_MODE_CLASS (mode) == MODE_INT
1822 && width > 0)
1823 {
1824 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1825 operators are intentionally left unspecified (to ease implementation
1826 by target backends), for consistency, this routine implements the
1827 same semantics for constant folding as used by the middle-end. */
1828
1829 /* This was formerly used only for non-IEEE float.
1830 eggert@twinsun.com says it is safe for IEEE also. */
1831 REAL_VALUE_TYPE x, t;
1832 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1833 wide_int wmax, wmin;
1834 /* This is part of the abi to real_to_integer, but we check
1835 things before making this call. */
1836 bool fail;
1837
1838 switch (code)
1839 {
1840 case FIX:
1841 if (REAL_VALUE_ISNAN (x))
1842 return const0_rtx;
1843
1844 /* Test against the signed upper bound. */
1845 wmax = wi::max_value (width, SIGNED);
1846 real_from_integer (&t, VOIDmode, wmax, SIGNED);
1847 if (REAL_VALUES_LESS (t, x))
1848 return immed_wide_int_const (wmax, mode);
1849
1850 /* Test against the signed lower bound. */
1851 wmin = wi::min_value (width, SIGNED);
1852 real_from_integer (&t, VOIDmode, wmin, SIGNED);
1853 if (REAL_VALUES_LESS (x, t))
1854 return immed_wide_int_const (wmin, mode);
1855
1856 return immed_wide_int_const (real_to_integer (&x, &fail, width), mode);
1857 break;
1858
1859 case UNSIGNED_FIX:
1860 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1861 return const0_rtx;
1862
1863 /* Test against the unsigned upper bound. */
1864 wmax = wi::max_value (width, UNSIGNED);
1865 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
1866 if (REAL_VALUES_LESS (t, x))
1867 return immed_wide_int_const (wmax, mode);
1868
1869 return immed_wide_int_const (real_to_integer (&x, &fail, width),
1870 mode);
1871 break;
1872
1873 default:
1874 gcc_unreachable ();
1875 }
1876 }
1877
1878 return NULL_RTX;
1879 }
1880 \f
1881 /* Subroutine of simplify_binary_operation to simplify a binary operation
1882 CODE that can commute with byte swapping, with result mode MODE and
1883 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
1884 Return zero if no simplification or canonicalization is possible. */
1885
1886 static rtx
1887 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
1888 rtx op0, rtx op1)
1889 {
1890 rtx tem;
1891
1892 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
1893 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
1894 {
1895 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
1896 simplify_gen_unary (BSWAP, mode, op1, mode));
1897 return simplify_gen_unary (BSWAP, mode, tem, mode);
1898 }
1899
1900 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
1901 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
1902 {
1903 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
1904 return simplify_gen_unary (BSWAP, mode, tem, mode);
1905 }
1906
1907 return NULL_RTX;
1908 }
1909
1910 /* Subroutine of simplify_binary_operation to simplify a commutative,
1911 associative binary operation CODE with result mode MODE, operating
1912 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1913 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1914 canonicalization is possible. */
1915
1916 static rtx
1917 simplify_associative_operation (enum rtx_code code, machine_mode mode,
1918 rtx op0, rtx op1)
1919 {
1920 rtx tem;
1921
1922 /* Linearize the operator to the left. */
1923 if (GET_CODE (op1) == code)
1924 {
1925 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1926 if (GET_CODE (op0) == code)
1927 {
1928 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1929 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1930 }
1931
1932 /* "a op (b op c)" becomes "(b op c) op a". */
1933 if (! swap_commutative_operands_p (op1, op0))
1934 return simplify_gen_binary (code, mode, op1, op0);
1935
1936 std::swap (op0, op1);
1937 }
1938
1939 if (GET_CODE (op0) == code)
1940 {
1941 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1942 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1943 {
1944 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1945 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1946 }
1947
1948 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1949 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1950 if (tem != 0)
1951 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1952
1953 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1954 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1955 if (tem != 0)
1956 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1957 }
1958
1959 return 0;
1960 }
1961
1962
1963 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1964 and OP1. Return 0 if no simplification is possible.
1965
1966 Don't use this for relational operations such as EQ or LT.
1967 Use simplify_relational_operation instead. */
1968 rtx
1969 simplify_binary_operation (enum rtx_code code, machine_mode mode,
1970 rtx op0, rtx op1)
1971 {
1972 rtx trueop0, trueop1;
1973 rtx tem;
1974
1975 /* Relational operations don't work here. We must know the mode
1976 of the operands in order to do the comparison correctly.
1977 Assuming a full word can give incorrect results.
1978 Consider comparing 128 with -128 in QImode. */
1979 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1980 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1981
1982 /* Make sure the constant is second. */
1983 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1984 && swap_commutative_operands_p (op0, op1))
1985 std::swap (op0, op1);
1986
1987 trueop0 = avoid_constant_pool_reference (op0);
1988 trueop1 = avoid_constant_pool_reference (op1);
1989
1990 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1991 if (tem)
1992 return tem;
1993 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1994 }
1995
1996 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1997 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1998 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1999 actual constants. */
2000
2001 static rtx
2002 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
2003 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2004 {
2005 rtx tem, reversed, opleft, opright;
2006 HOST_WIDE_INT val;
2007 unsigned int width = GET_MODE_PRECISION (mode);
2008
2009 /* Even if we can't compute a constant result,
2010 there are some cases worth simplifying. */
2011
2012 switch (code)
2013 {
2014 case PLUS:
2015 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2016 when x is NaN, infinite, or finite and nonzero. They aren't
2017 when x is -0 and the rounding mode is not towards -infinity,
2018 since (-0) + 0 is then 0. */
2019 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2020 return op0;
2021
2022 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2023 transformations are safe even for IEEE. */
2024 if (GET_CODE (op0) == NEG)
2025 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2026 else if (GET_CODE (op1) == NEG)
2027 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2028
2029 /* (~a) + 1 -> -a */
2030 if (INTEGRAL_MODE_P (mode)
2031 && GET_CODE (op0) == NOT
2032 && trueop1 == const1_rtx)
2033 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2034
2035 /* Handle both-operands-constant cases. We can only add
2036 CONST_INTs to constants since the sum of relocatable symbols
2037 can't be handled by most assemblers. Don't add CONST_INT
2038 to CONST_INT since overflow won't be computed properly if wider
2039 than HOST_BITS_PER_WIDE_INT. */
2040
2041 if ((GET_CODE (op0) == CONST
2042 || GET_CODE (op0) == SYMBOL_REF
2043 || GET_CODE (op0) == LABEL_REF)
2044 && CONST_INT_P (op1))
2045 return plus_constant (mode, op0, INTVAL (op1));
2046 else if ((GET_CODE (op1) == CONST
2047 || GET_CODE (op1) == SYMBOL_REF
2048 || GET_CODE (op1) == LABEL_REF)
2049 && CONST_INT_P (op0))
2050 return plus_constant (mode, op1, INTVAL (op0));
2051
2052 /* See if this is something like X * C - X or vice versa or
2053 if the multiplication is written as a shift. If so, we can
2054 distribute and make a new multiply, shift, or maybe just
2055 have X (if C is 2 in the example above). But don't make
2056 something more expensive than we had before. */
2057
2058 if (SCALAR_INT_MODE_P (mode))
2059 {
2060 rtx lhs = op0, rhs = op1;
2061
2062 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2063 wide_int coeff1 = wi::one (GET_MODE_PRECISION (mode));
2064
2065 if (GET_CODE (lhs) == NEG)
2066 {
2067 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2068 lhs = XEXP (lhs, 0);
2069 }
2070 else if (GET_CODE (lhs) == MULT
2071 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2072 {
2073 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2074 lhs = XEXP (lhs, 0);
2075 }
2076 else if (GET_CODE (lhs) == ASHIFT
2077 && CONST_INT_P (XEXP (lhs, 1))
2078 && INTVAL (XEXP (lhs, 1)) >= 0
2079 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2080 {
2081 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2082 GET_MODE_PRECISION (mode));
2083 lhs = XEXP (lhs, 0);
2084 }
2085
2086 if (GET_CODE (rhs) == NEG)
2087 {
2088 coeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2089 rhs = XEXP (rhs, 0);
2090 }
2091 else if (GET_CODE (rhs) == MULT
2092 && CONST_INT_P (XEXP (rhs, 1)))
2093 {
2094 coeff1 = std::make_pair (XEXP (rhs, 1), mode);
2095 rhs = XEXP (rhs, 0);
2096 }
2097 else if (GET_CODE (rhs) == ASHIFT
2098 && CONST_INT_P (XEXP (rhs, 1))
2099 && INTVAL (XEXP (rhs, 1)) >= 0
2100 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2101 {
2102 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2103 GET_MODE_PRECISION (mode));
2104 rhs = XEXP (rhs, 0);
2105 }
2106
2107 if (rtx_equal_p (lhs, rhs))
2108 {
2109 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2110 rtx coeff;
2111 bool speed = optimize_function_for_speed_p (cfun);
2112
2113 coeff = immed_wide_int_const (coeff0 + coeff1, mode);
2114
2115 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2116 return (set_src_cost (tem, mode, speed)
2117 <= set_src_cost (orig, mode, speed) ? tem : 0);
2118 }
2119 }
2120
2121 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2122 if (CONST_SCALAR_INT_P (op1)
2123 && GET_CODE (op0) == XOR
2124 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2125 && mode_signbit_p (mode, op1))
2126 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2127 simplify_gen_binary (XOR, mode, op1,
2128 XEXP (op0, 1)));
2129
2130 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2131 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2132 && GET_CODE (op0) == MULT
2133 && GET_CODE (XEXP (op0, 0)) == NEG)
2134 {
2135 rtx in1, in2;
2136
2137 in1 = XEXP (XEXP (op0, 0), 0);
2138 in2 = XEXP (op0, 1);
2139 return simplify_gen_binary (MINUS, mode, op1,
2140 simplify_gen_binary (MULT, mode,
2141 in1, in2));
2142 }
2143
2144 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2145 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2146 is 1. */
2147 if (COMPARISON_P (op0)
2148 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2149 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2150 && (reversed = reversed_comparison (op0, mode)))
2151 return
2152 simplify_gen_unary (NEG, mode, reversed, mode);
2153
2154 /* If one of the operands is a PLUS or a MINUS, see if we can
2155 simplify this by the associative law.
2156 Don't use the associative law for floating point.
2157 The inaccuracy makes it nonassociative,
2158 and subtle programs can break if operations are associated. */
2159
2160 if (INTEGRAL_MODE_P (mode)
2161 && (plus_minus_operand_p (op0)
2162 || plus_minus_operand_p (op1))
2163 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2164 return tem;
2165
2166 /* Reassociate floating point addition only when the user
2167 specifies associative math operations. */
2168 if (FLOAT_MODE_P (mode)
2169 && flag_associative_math)
2170 {
2171 tem = simplify_associative_operation (code, mode, op0, op1);
2172 if (tem)
2173 return tem;
2174 }
2175 break;
2176
2177 case COMPARE:
2178 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2179 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2180 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2181 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2182 {
2183 rtx xop00 = XEXP (op0, 0);
2184 rtx xop10 = XEXP (op1, 0);
2185
2186 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2187 return xop00;
2188
2189 if (REG_P (xop00) && REG_P (xop10)
2190 && GET_MODE (xop00) == GET_MODE (xop10)
2191 && REGNO (xop00) == REGNO (xop10)
2192 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2193 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2194 return xop00;
2195 }
2196 break;
2197
2198 case MINUS:
2199 /* We can't assume x-x is 0 even with non-IEEE floating point,
2200 but since it is zero except in very strange circumstances, we
2201 will treat it as zero with -ffinite-math-only. */
2202 if (rtx_equal_p (trueop0, trueop1)
2203 && ! side_effects_p (op0)
2204 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2205 return CONST0_RTX (mode);
2206
2207 /* Change subtraction from zero into negation. (0 - x) is the
2208 same as -x when x is NaN, infinite, or finite and nonzero.
2209 But if the mode has signed zeros, and does not round towards
2210 -infinity, then 0 - 0 is 0, not -0. */
2211 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2212 return simplify_gen_unary (NEG, mode, op1, mode);
2213
2214 /* (-1 - a) is ~a. */
2215 if (trueop0 == constm1_rtx)
2216 return simplify_gen_unary (NOT, mode, op1, mode);
2217
2218 /* Subtracting 0 has no effect unless the mode has signed zeros
2219 and supports rounding towards -infinity. In such a case,
2220 0 - 0 is -0. */
2221 if (!(HONOR_SIGNED_ZEROS (mode)
2222 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2223 && trueop1 == CONST0_RTX (mode))
2224 return op0;
2225
2226 /* See if this is something like X * C - X or vice versa or
2227 if the multiplication is written as a shift. If so, we can
2228 distribute and make a new multiply, shift, or maybe just
2229 have X (if C is 2 in the example above). But don't make
2230 something more expensive than we had before. */
2231
2232 if (SCALAR_INT_MODE_P (mode))
2233 {
2234 rtx lhs = op0, rhs = op1;
2235
2236 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2237 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2238
2239 if (GET_CODE (lhs) == NEG)
2240 {
2241 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2242 lhs = XEXP (lhs, 0);
2243 }
2244 else if (GET_CODE (lhs) == MULT
2245 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2246 {
2247 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2248 lhs = XEXP (lhs, 0);
2249 }
2250 else if (GET_CODE (lhs) == ASHIFT
2251 && CONST_INT_P (XEXP (lhs, 1))
2252 && INTVAL (XEXP (lhs, 1)) >= 0
2253 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2254 {
2255 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2256 GET_MODE_PRECISION (mode));
2257 lhs = XEXP (lhs, 0);
2258 }
2259
2260 if (GET_CODE (rhs) == NEG)
2261 {
2262 negcoeff1 = wi::one (GET_MODE_PRECISION (mode));
2263 rhs = XEXP (rhs, 0);
2264 }
2265 else if (GET_CODE (rhs) == MULT
2266 && CONST_INT_P (XEXP (rhs, 1)))
2267 {
2268 negcoeff1 = wi::neg (std::make_pair (XEXP (rhs, 1), mode));
2269 rhs = XEXP (rhs, 0);
2270 }
2271 else if (GET_CODE (rhs) == ASHIFT
2272 && CONST_INT_P (XEXP (rhs, 1))
2273 && INTVAL (XEXP (rhs, 1)) >= 0
2274 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2275 {
2276 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2277 GET_MODE_PRECISION (mode));
2278 negcoeff1 = -negcoeff1;
2279 rhs = XEXP (rhs, 0);
2280 }
2281
2282 if (rtx_equal_p (lhs, rhs))
2283 {
2284 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2285 rtx coeff;
2286 bool speed = optimize_function_for_speed_p (cfun);
2287
2288 coeff = immed_wide_int_const (coeff0 + negcoeff1, mode);
2289
2290 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2291 return (set_src_cost (tem, mode, speed)
2292 <= set_src_cost (orig, mode, speed) ? tem : 0);
2293 }
2294 }
2295
2296 /* (a - (-b)) -> (a + b). True even for IEEE. */
2297 if (GET_CODE (op1) == NEG)
2298 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2299
2300 /* (-x - c) may be simplified as (-c - x). */
2301 if (GET_CODE (op0) == NEG
2302 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2303 {
2304 tem = simplify_unary_operation (NEG, mode, op1, mode);
2305 if (tem)
2306 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2307 }
2308
2309 /* Don't let a relocatable value get a negative coeff. */
2310 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2311 return simplify_gen_binary (PLUS, mode,
2312 op0,
2313 neg_const_int (mode, op1));
2314
2315 /* (x - (x & y)) -> (x & ~y) */
2316 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2317 {
2318 if (rtx_equal_p (op0, XEXP (op1, 0)))
2319 {
2320 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2321 GET_MODE (XEXP (op1, 1)));
2322 return simplify_gen_binary (AND, mode, op0, tem);
2323 }
2324 if (rtx_equal_p (op0, XEXP (op1, 1)))
2325 {
2326 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2327 GET_MODE (XEXP (op1, 0)));
2328 return simplify_gen_binary (AND, mode, op0, tem);
2329 }
2330 }
2331
2332 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2333 by reversing the comparison code if valid. */
2334 if (STORE_FLAG_VALUE == 1
2335 && trueop0 == const1_rtx
2336 && COMPARISON_P (op1)
2337 && (reversed = reversed_comparison (op1, mode)))
2338 return reversed;
2339
2340 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2341 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2342 && GET_CODE (op1) == MULT
2343 && GET_CODE (XEXP (op1, 0)) == NEG)
2344 {
2345 rtx in1, in2;
2346
2347 in1 = XEXP (XEXP (op1, 0), 0);
2348 in2 = XEXP (op1, 1);
2349 return simplify_gen_binary (PLUS, mode,
2350 simplify_gen_binary (MULT, mode,
2351 in1, in2),
2352 op0);
2353 }
2354
2355 /* Canonicalize (minus (neg A) (mult B C)) to
2356 (minus (mult (neg B) C) A). */
2357 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2358 && GET_CODE (op1) == MULT
2359 && GET_CODE (op0) == NEG)
2360 {
2361 rtx in1, in2;
2362
2363 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2364 in2 = XEXP (op1, 1);
2365 return simplify_gen_binary (MINUS, mode,
2366 simplify_gen_binary (MULT, mode,
2367 in1, in2),
2368 XEXP (op0, 0));
2369 }
2370
2371 /* If one of the operands is a PLUS or a MINUS, see if we can
2372 simplify this by the associative law. This will, for example,
2373 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2374 Don't use the associative law for floating point.
2375 The inaccuracy makes it nonassociative,
2376 and subtle programs can break if operations are associated. */
2377
2378 if (INTEGRAL_MODE_P (mode)
2379 && (plus_minus_operand_p (op0)
2380 || plus_minus_operand_p (op1))
2381 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2382 return tem;
2383 break;
2384
2385 case MULT:
2386 if (trueop1 == constm1_rtx)
2387 return simplify_gen_unary (NEG, mode, op0, mode);
2388
2389 if (GET_CODE (op0) == NEG)
2390 {
2391 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2392 /* If op1 is a MULT as well and simplify_unary_operation
2393 just moved the NEG to the second operand, simplify_gen_binary
2394 below could through simplify_associative_operation move
2395 the NEG around again and recurse endlessly. */
2396 if (temp
2397 && GET_CODE (op1) == MULT
2398 && GET_CODE (temp) == MULT
2399 && XEXP (op1, 0) == XEXP (temp, 0)
2400 && GET_CODE (XEXP (temp, 1)) == NEG
2401 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2402 temp = NULL_RTX;
2403 if (temp)
2404 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2405 }
2406 if (GET_CODE (op1) == NEG)
2407 {
2408 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2409 /* If op0 is a MULT as well and simplify_unary_operation
2410 just moved the NEG to the second operand, simplify_gen_binary
2411 below could through simplify_associative_operation move
2412 the NEG around again and recurse endlessly. */
2413 if (temp
2414 && GET_CODE (op0) == MULT
2415 && GET_CODE (temp) == MULT
2416 && XEXP (op0, 0) == XEXP (temp, 0)
2417 && GET_CODE (XEXP (temp, 1)) == NEG
2418 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2419 temp = NULL_RTX;
2420 if (temp)
2421 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2422 }
2423
2424 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2425 x is NaN, since x * 0 is then also NaN. Nor is it valid
2426 when the mode has signed zeros, since multiplying a negative
2427 number by 0 will give -0, not 0. */
2428 if (!HONOR_NANS (mode)
2429 && !HONOR_SIGNED_ZEROS (mode)
2430 && trueop1 == CONST0_RTX (mode)
2431 && ! side_effects_p (op0))
2432 return op1;
2433
2434 /* In IEEE floating point, x*1 is not equivalent to x for
2435 signalling NaNs. */
2436 if (!HONOR_SNANS (mode)
2437 && trueop1 == CONST1_RTX (mode))
2438 return op0;
2439
2440 /* Convert multiply by constant power of two into shift. */
2441 if (CONST_SCALAR_INT_P (trueop1))
2442 {
2443 val = wi::exact_log2 (std::make_pair (trueop1, mode));
2444 if (val >= 0)
2445 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2446 }
2447
2448 /* x*2 is x+x and x*(-1) is -x */
2449 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2450 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2451 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2452 && GET_MODE (op0) == mode)
2453 {
2454 REAL_VALUE_TYPE d;
2455 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2456
2457 if (REAL_VALUES_EQUAL (d, dconst2))
2458 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2459
2460 if (!HONOR_SNANS (mode)
2461 && REAL_VALUES_EQUAL (d, dconstm1))
2462 return simplify_gen_unary (NEG, mode, op0, mode);
2463 }
2464
2465 /* Optimize -x * -x as x * x. */
2466 if (FLOAT_MODE_P (mode)
2467 && GET_CODE (op0) == NEG
2468 && GET_CODE (op1) == NEG
2469 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2470 && !side_effects_p (XEXP (op0, 0)))
2471 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2472
2473 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2474 if (SCALAR_FLOAT_MODE_P (mode)
2475 && GET_CODE (op0) == ABS
2476 && GET_CODE (op1) == ABS
2477 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2478 && !side_effects_p (XEXP (op0, 0)))
2479 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2480
2481 /* Reassociate multiplication, but for floating point MULTs
2482 only when the user specifies unsafe math optimizations. */
2483 if (! FLOAT_MODE_P (mode)
2484 || flag_unsafe_math_optimizations)
2485 {
2486 tem = simplify_associative_operation (code, mode, op0, op1);
2487 if (tem)
2488 return tem;
2489 }
2490 break;
2491
2492 case IOR:
2493 if (trueop1 == CONST0_RTX (mode))
2494 return op0;
2495 if (INTEGRAL_MODE_P (mode)
2496 && trueop1 == CONSTM1_RTX (mode)
2497 && !side_effects_p (op0))
2498 return op1;
2499 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2500 return op0;
2501 /* A | (~A) -> -1 */
2502 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2503 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2504 && ! side_effects_p (op0)
2505 && SCALAR_INT_MODE_P (mode))
2506 return constm1_rtx;
2507
2508 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2509 if (CONST_INT_P (op1)
2510 && HWI_COMPUTABLE_MODE_P (mode)
2511 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2512 && !side_effects_p (op0))
2513 return op1;
2514
2515 /* Canonicalize (X & C1) | C2. */
2516 if (GET_CODE (op0) == AND
2517 && CONST_INT_P (trueop1)
2518 && CONST_INT_P (XEXP (op0, 1)))
2519 {
2520 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2521 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2522 HOST_WIDE_INT c2 = INTVAL (trueop1);
2523
2524 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2525 if ((c1 & c2) == c1
2526 && !side_effects_p (XEXP (op0, 0)))
2527 return trueop1;
2528
2529 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2530 if (((c1|c2) & mask) == mask)
2531 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2532
2533 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2534 if (((c1 & ~c2) & mask) != (c1 & mask))
2535 {
2536 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2537 gen_int_mode (c1 & ~c2, mode));
2538 return simplify_gen_binary (IOR, mode, tem, op1);
2539 }
2540 }
2541
2542 /* Convert (A & B) | A to A. */
2543 if (GET_CODE (op0) == AND
2544 && (rtx_equal_p (XEXP (op0, 0), op1)
2545 || rtx_equal_p (XEXP (op0, 1), op1))
2546 && ! side_effects_p (XEXP (op0, 0))
2547 && ! side_effects_p (XEXP (op0, 1)))
2548 return op1;
2549
2550 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2551 mode size to (rotate A CX). */
2552
2553 if (GET_CODE (op1) == ASHIFT
2554 || GET_CODE (op1) == SUBREG)
2555 {
2556 opleft = op1;
2557 opright = op0;
2558 }
2559 else
2560 {
2561 opright = op1;
2562 opleft = op0;
2563 }
2564
2565 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2566 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2567 && CONST_INT_P (XEXP (opleft, 1))
2568 && CONST_INT_P (XEXP (opright, 1))
2569 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2570 == GET_MODE_PRECISION (mode)))
2571 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2572
2573 /* Same, but for ashift that has been "simplified" to a wider mode
2574 by simplify_shift_const. */
2575
2576 if (GET_CODE (opleft) == SUBREG
2577 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2578 && GET_CODE (opright) == LSHIFTRT
2579 && GET_CODE (XEXP (opright, 0)) == SUBREG
2580 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2581 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2582 && (GET_MODE_SIZE (GET_MODE (opleft))
2583 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2584 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2585 SUBREG_REG (XEXP (opright, 0)))
2586 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2587 && CONST_INT_P (XEXP (opright, 1))
2588 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2589 == GET_MODE_PRECISION (mode)))
2590 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2591 XEXP (SUBREG_REG (opleft), 1));
2592
2593 /* If we have (ior (and (X C1) C2)), simplify this by making
2594 C1 as small as possible if C1 actually changes. */
2595 if (CONST_INT_P (op1)
2596 && (HWI_COMPUTABLE_MODE_P (mode)
2597 || INTVAL (op1) > 0)
2598 && GET_CODE (op0) == AND
2599 && CONST_INT_P (XEXP (op0, 1))
2600 && CONST_INT_P (op1)
2601 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2602 {
2603 rtx tmp = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2604 gen_int_mode (UINTVAL (XEXP (op0, 1))
2605 & ~UINTVAL (op1),
2606 mode));
2607 return simplify_gen_binary (IOR, mode, tmp, op1);
2608 }
2609
2610 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2611 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2612 the PLUS does not affect any of the bits in OP1: then we can do
2613 the IOR as a PLUS and we can associate. This is valid if OP1
2614 can be safely shifted left C bits. */
2615 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2616 && GET_CODE (XEXP (op0, 0)) == PLUS
2617 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2618 && CONST_INT_P (XEXP (op0, 1))
2619 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2620 {
2621 int count = INTVAL (XEXP (op0, 1));
2622 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2623
2624 if (mask >> count == INTVAL (trueop1)
2625 && trunc_int_for_mode (mask, mode) == mask
2626 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2627 return simplify_gen_binary (ASHIFTRT, mode,
2628 plus_constant (mode, XEXP (op0, 0),
2629 mask),
2630 XEXP (op0, 1));
2631 }
2632
2633 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2634 if (tem)
2635 return tem;
2636
2637 tem = simplify_associative_operation (code, mode, op0, op1);
2638 if (tem)
2639 return tem;
2640 break;
2641
2642 case XOR:
2643 if (trueop1 == CONST0_RTX (mode))
2644 return op0;
2645 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2646 return simplify_gen_unary (NOT, mode, op0, mode);
2647 if (rtx_equal_p (trueop0, trueop1)
2648 && ! side_effects_p (op0)
2649 && GET_MODE_CLASS (mode) != MODE_CC)
2650 return CONST0_RTX (mode);
2651
2652 /* Canonicalize XOR of the most significant bit to PLUS. */
2653 if (CONST_SCALAR_INT_P (op1)
2654 && mode_signbit_p (mode, op1))
2655 return simplify_gen_binary (PLUS, mode, op0, op1);
2656 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2657 if (CONST_SCALAR_INT_P (op1)
2658 && GET_CODE (op0) == PLUS
2659 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2660 && mode_signbit_p (mode, XEXP (op0, 1)))
2661 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2662 simplify_gen_binary (XOR, mode, op1,
2663 XEXP (op0, 1)));
2664
2665 /* If we are XORing two things that have no bits in common,
2666 convert them into an IOR. This helps to detect rotation encoded
2667 using those methods and possibly other simplifications. */
2668
2669 if (HWI_COMPUTABLE_MODE_P (mode)
2670 && (nonzero_bits (op0, mode)
2671 & nonzero_bits (op1, mode)) == 0)
2672 return (simplify_gen_binary (IOR, mode, op0, op1));
2673
2674 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2675 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2676 (NOT y). */
2677 {
2678 int num_negated = 0;
2679
2680 if (GET_CODE (op0) == NOT)
2681 num_negated++, op0 = XEXP (op0, 0);
2682 if (GET_CODE (op1) == NOT)
2683 num_negated++, op1 = XEXP (op1, 0);
2684
2685 if (num_negated == 2)
2686 return simplify_gen_binary (XOR, mode, op0, op1);
2687 else if (num_negated == 1)
2688 return simplify_gen_unary (NOT, mode,
2689 simplify_gen_binary (XOR, mode, op0, op1),
2690 mode);
2691 }
2692
2693 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2694 correspond to a machine insn or result in further simplifications
2695 if B is a constant. */
2696
2697 if (GET_CODE (op0) == AND
2698 && rtx_equal_p (XEXP (op0, 1), op1)
2699 && ! side_effects_p (op1))
2700 return simplify_gen_binary (AND, mode,
2701 simplify_gen_unary (NOT, mode,
2702 XEXP (op0, 0), mode),
2703 op1);
2704
2705 else if (GET_CODE (op0) == AND
2706 && rtx_equal_p (XEXP (op0, 0), op1)
2707 && ! side_effects_p (op1))
2708 return simplify_gen_binary (AND, mode,
2709 simplify_gen_unary (NOT, mode,
2710 XEXP (op0, 1), mode),
2711 op1);
2712
2713 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2714 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2715 out bits inverted twice and not set by C. Similarly, given
2716 (xor (and (xor A B) C) D), simplify without inverting C in
2717 the xor operand: (xor (and A C) (B&C)^D).
2718 */
2719 else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
2720 && GET_CODE (XEXP (op0, 0)) == XOR
2721 && CONST_INT_P (op1)
2722 && CONST_INT_P (XEXP (op0, 1))
2723 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
2724 {
2725 enum rtx_code op = GET_CODE (op0);
2726 rtx a = XEXP (XEXP (op0, 0), 0);
2727 rtx b = XEXP (XEXP (op0, 0), 1);
2728 rtx c = XEXP (op0, 1);
2729 rtx d = op1;
2730 HOST_WIDE_INT bval = INTVAL (b);
2731 HOST_WIDE_INT cval = INTVAL (c);
2732 HOST_WIDE_INT dval = INTVAL (d);
2733 HOST_WIDE_INT xcval;
2734
2735 if (op == IOR)
2736 xcval = ~cval;
2737 else
2738 xcval = cval;
2739
2740 return simplify_gen_binary (XOR, mode,
2741 simplify_gen_binary (op, mode, a, c),
2742 gen_int_mode ((bval & xcval) ^ dval,
2743 mode));
2744 }
2745
2746 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2747 we can transform like this:
2748 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2749 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2750 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2751 Attempt a few simplifications when B and C are both constants. */
2752 if (GET_CODE (op0) == AND
2753 && CONST_INT_P (op1)
2754 && CONST_INT_P (XEXP (op0, 1)))
2755 {
2756 rtx a = XEXP (op0, 0);
2757 rtx b = XEXP (op0, 1);
2758 rtx c = op1;
2759 HOST_WIDE_INT bval = INTVAL (b);
2760 HOST_WIDE_INT cval = INTVAL (c);
2761
2762 /* Instead of computing ~A&C, we compute its negated value,
2763 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2764 optimize for sure. If it does not simplify, we still try
2765 to compute ~A&C below, but since that always allocates
2766 RTL, we don't try that before committing to returning a
2767 simplified expression. */
2768 rtx n_na_c = simplify_binary_operation (IOR, mode, a,
2769 GEN_INT (~cval));
2770
2771 if ((~cval & bval) == 0)
2772 {
2773 rtx na_c = NULL_RTX;
2774 if (n_na_c)
2775 na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
2776 else
2777 {
2778 /* If ~A does not simplify, don't bother: we don't
2779 want to simplify 2 operations into 3, and if na_c
2780 were to simplify with na, n_na_c would have
2781 simplified as well. */
2782 rtx na = simplify_unary_operation (NOT, mode, a, mode);
2783 if (na)
2784 na_c = simplify_gen_binary (AND, mode, na, c);
2785 }
2786
2787 /* Try to simplify ~A&C | ~B&C. */
2788 if (na_c != NULL_RTX)
2789 return simplify_gen_binary (IOR, mode, na_c,
2790 gen_int_mode (~bval & cval, mode));
2791 }
2792 else
2793 {
2794 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2795 if (n_na_c == CONSTM1_RTX (mode))
2796 {
2797 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2798 gen_int_mode (~cval & bval,
2799 mode));
2800 return simplify_gen_binary (IOR, mode, a_nc_b,
2801 gen_int_mode (~bval & cval,
2802 mode));
2803 }
2804 }
2805 }
2806
2807 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2808 comparison if STORE_FLAG_VALUE is 1. */
2809 if (STORE_FLAG_VALUE == 1
2810 && trueop1 == const1_rtx
2811 && COMPARISON_P (op0)
2812 && (reversed = reversed_comparison (op0, mode)))
2813 return reversed;
2814
2815 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2816 is (lt foo (const_int 0)), so we can perform the above
2817 simplification if STORE_FLAG_VALUE is 1. */
2818
2819 if (STORE_FLAG_VALUE == 1
2820 && trueop1 == const1_rtx
2821 && GET_CODE (op0) == LSHIFTRT
2822 && CONST_INT_P (XEXP (op0, 1))
2823 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2824 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2825
2826 /* (xor (comparison foo bar) (const_int sign-bit))
2827 when STORE_FLAG_VALUE is the sign bit. */
2828 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2829 && trueop1 == const_true_rtx
2830 && COMPARISON_P (op0)
2831 && (reversed = reversed_comparison (op0, mode)))
2832 return reversed;
2833
2834 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2835 if (tem)
2836 return tem;
2837
2838 tem = simplify_associative_operation (code, mode, op0, op1);
2839 if (tem)
2840 return tem;
2841 break;
2842
2843 case AND:
2844 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2845 return trueop1;
2846 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2847 return op0;
2848 if (HWI_COMPUTABLE_MODE_P (mode))
2849 {
2850 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2851 HOST_WIDE_INT nzop1;
2852 if (CONST_INT_P (trueop1))
2853 {
2854 HOST_WIDE_INT val1 = INTVAL (trueop1);
2855 /* If we are turning off bits already known off in OP0, we need
2856 not do an AND. */
2857 if ((nzop0 & ~val1) == 0)
2858 return op0;
2859 }
2860 nzop1 = nonzero_bits (trueop1, mode);
2861 /* If we are clearing all the nonzero bits, the result is zero. */
2862 if ((nzop1 & nzop0) == 0
2863 && !side_effects_p (op0) && !side_effects_p (op1))
2864 return CONST0_RTX (mode);
2865 }
2866 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2867 && GET_MODE_CLASS (mode) != MODE_CC)
2868 return op0;
2869 /* A & (~A) -> 0 */
2870 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2871 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2872 && ! side_effects_p (op0)
2873 && GET_MODE_CLASS (mode) != MODE_CC)
2874 return CONST0_RTX (mode);
2875
2876 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2877 there are no nonzero bits of C outside of X's mode. */
2878 if ((GET_CODE (op0) == SIGN_EXTEND
2879 || GET_CODE (op0) == ZERO_EXTEND)
2880 && CONST_INT_P (trueop1)
2881 && HWI_COMPUTABLE_MODE_P (mode)
2882 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2883 & UINTVAL (trueop1)) == 0)
2884 {
2885 machine_mode imode = GET_MODE (XEXP (op0, 0));
2886 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2887 gen_int_mode (INTVAL (trueop1),
2888 imode));
2889 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2890 }
2891
2892 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2893 we might be able to further simplify the AND with X and potentially
2894 remove the truncation altogether. */
2895 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2896 {
2897 rtx x = XEXP (op0, 0);
2898 machine_mode xmode = GET_MODE (x);
2899 tem = simplify_gen_binary (AND, xmode, x,
2900 gen_int_mode (INTVAL (trueop1), xmode));
2901 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2902 }
2903
2904 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2905 if (GET_CODE (op0) == IOR
2906 && CONST_INT_P (trueop1)
2907 && CONST_INT_P (XEXP (op0, 1)))
2908 {
2909 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2910 return simplify_gen_binary (IOR, mode,
2911 simplify_gen_binary (AND, mode,
2912 XEXP (op0, 0), op1),
2913 gen_int_mode (tmp, mode));
2914 }
2915
2916 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2917 insn (and may simplify more). */
2918 if (GET_CODE (op0) == XOR
2919 && rtx_equal_p (XEXP (op0, 0), op1)
2920 && ! side_effects_p (op1))
2921 return simplify_gen_binary (AND, mode,
2922 simplify_gen_unary (NOT, mode,
2923 XEXP (op0, 1), mode),
2924 op1);
2925
2926 if (GET_CODE (op0) == XOR
2927 && rtx_equal_p (XEXP (op0, 1), op1)
2928 && ! side_effects_p (op1))
2929 return simplify_gen_binary (AND, mode,
2930 simplify_gen_unary (NOT, mode,
2931 XEXP (op0, 0), mode),
2932 op1);
2933
2934 /* Similarly for (~(A ^ B)) & A. */
2935 if (GET_CODE (op0) == NOT
2936 && GET_CODE (XEXP (op0, 0)) == XOR
2937 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2938 && ! side_effects_p (op1))
2939 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2940
2941 if (GET_CODE (op0) == NOT
2942 && GET_CODE (XEXP (op0, 0)) == XOR
2943 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2944 && ! side_effects_p (op1))
2945 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2946
2947 /* Convert (A | B) & A to A. */
2948 if (GET_CODE (op0) == IOR
2949 && (rtx_equal_p (XEXP (op0, 0), op1)
2950 || rtx_equal_p (XEXP (op0, 1), op1))
2951 && ! side_effects_p (XEXP (op0, 0))
2952 && ! side_effects_p (XEXP (op0, 1)))
2953 return op1;
2954
2955 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2956 ((A & N) + B) & M -> (A + B) & M
2957 Similarly if (N & M) == 0,
2958 ((A | N) + B) & M -> (A + B) & M
2959 and for - instead of + and/or ^ instead of |.
2960 Also, if (N & M) == 0, then
2961 (A +- N) & M -> A & M. */
2962 if (CONST_INT_P (trueop1)
2963 && HWI_COMPUTABLE_MODE_P (mode)
2964 && ~UINTVAL (trueop1)
2965 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
2966 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2967 {
2968 rtx pmop[2];
2969 int which;
2970
2971 pmop[0] = XEXP (op0, 0);
2972 pmop[1] = XEXP (op0, 1);
2973
2974 if (CONST_INT_P (pmop[1])
2975 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
2976 return simplify_gen_binary (AND, mode, pmop[0], op1);
2977
2978 for (which = 0; which < 2; which++)
2979 {
2980 tem = pmop[which];
2981 switch (GET_CODE (tem))
2982 {
2983 case AND:
2984 if (CONST_INT_P (XEXP (tem, 1))
2985 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
2986 == UINTVAL (trueop1))
2987 pmop[which] = XEXP (tem, 0);
2988 break;
2989 case IOR:
2990 case XOR:
2991 if (CONST_INT_P (XEXP (tem, 1))
2992 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
2993 pmop[which] = XEXP (tem, 0);
2994 break;
2995 default:
2996 break;
2997 }
2998 }
2999
3000 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3001 {
3002 tem = simplify_gen_binary (GET_CODE (op0), mode,
3003 pmop[0], pmop[1]);
3004 return simplify_gen_binary (code, mode, tem, op1);
3005 }
3006 }
3007
3008 /* (and X (ior (not X) Y) -> (and X Y) */
3009 if (GET_CODE (op1) == IOR
3010 && GET_CODE (XEXP (op1, 0)) == NOT
3011 && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
3012 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3013
3014 /* (and (ior (not X) Y) X) -> (and X Y) */
3015 if (GET_CODE (op0) == IOR
3016 && GET_CODE (XEXP (op0, 0)) == NOT
3017 && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
3018 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3019
3020 /* (and X (ior Y (not X)) -> (and X Y) */
3021 if (GET_CODE (op1) == IOR
3022 && GET_CODE (XEXP (op1, 1)) == NOT
3023 && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3024 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3025
3026 /* (and (ior Y (not X)) X) -> (and X Y) */
3027 if (GET_CODE (op0) == IOR
3028 && GET_CODE (XEXP (op0, 1)) == NOT
3029 && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3030 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3031
3032 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3033 if (tem)
3034 return tem;
3035
3036 tem = simplify_associative_operation (code, mode, op0, op1);
3037 if (tem)
3038 return tem;
3039 break;
3040
3041 case UDIV:
3042 /* 0/x is 0 (or x&0 if x has side-effects). */
3043 if (trueop0 == CONST0_RTX (mode))
3044 {
3045 if (side_effects_p (op1))
3046 return simplify_gen_binary (AND, mode, op1, trueop0);
3047 return trueop0;
3048 }
3049 /* x/1 is x. */
3050 if (trueop1 == CONST1_RTX (mode))
3051 {
3052 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3053 if (tem)
3054 return tem;
3055 }
3056 /* Convert divide by power of two into shift. */
3057 if (CONST_INT_P (trueop1)
3058 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3059 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3060 break;
3061
3062 case DIV:
3063 /* Handle floating point and integers separately. */
3064 if (SCALAR_FLOAT_MODE_P (mode))
3065 {
3066 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3067 safe for modes with NaNs, since 0.0 / 0.0 will then be
3068 NaN rather than 0.0. Nor is it safe for modes with signed
3069 zeros, since dividing 0 by a negative number gives -0.0 */
3070 if (trueop0 == CONST0_RTX (mode)
3071 && !HONOR_NANS (mode)
3072 && !HONOR_SIGNED_ZEROS (mode)
3073 && ! side_effects_p (op1))
3074 return op0;
3075 /* x/1.0 is x. */
3076 if (trueop1 == CONST1_RTX (mode)
3077 && !HONOR_SNANS (mode))
3078 return op0;
3079
3080 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3081 && trueop1 != CONST0_RTX (mode))
3082 {
3083 REAL_VALUE_TYPE d;
3084 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
3085
3086 /* x/-1.0 is -x. */
3087 if (REAL_VALUES_EQUAL (d, dconstm1)
3088 && !HONOR_SNANS (mode))
3089 return simplify_gen_unary (NEG, mode, op0, mode);
3090
3091 /* Change FP division by a constant into multiplication.
3092 Only do this with -freciprocal-math. */
3093 if (flag_reciprocal_math
3094 && !REAL_VALUES_EQUAL (d, dconst0))
3095 {
3096 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
3097 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3098 return simplify_gen_binary (MULT, mode, op0, tem);
3099 }
3100 }
3101 }
3102 else if (SCALAR_INT_MODE_P (mode))
3103 {
3104 /* 0/x is 0 (or x&0 if x has side-effects). */
3105 if (trueop0 == CONST0_RTX (mode)
3106 && !cfun->can_throw_non_call_exceptions)
3107 {
3108 if (side_effects_p (op1))
3109 return simplify_gen_binary (AND, mode, op1, trueop0);
3110 return trueop0;
3111 }
3112 /* x/1 is x. */
3113 if (trueop1 == CONST1_RTX (mode))
3114 {
3115 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3116 if (tem)
3117 return tem;
3118 }
3119 /* x/-1 is -x. */
3120 if (trueop1 == constm1_rtx)
3121 {
3122 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3123 if (x)
3124 return simplify_gen_unary (NEG, mode, x, mode);
3125 }
3126 }
3127 break;
3128
3129 case UMOD:
3130 /* 0%x is 0 (or x&0 if x has side-effects). */
3131 if (trueop0 == CONST0_RTX (mode))
3132 {
3133 if (side_effects_p (op1))
3134 return simplify_gen_binary (AND, mode, op1, trueop0);
3135 return trueop0;
3136 }
3137 /* x%1 is 0 (of x&0 if x has side-effects). */
3138 if (trueop1 == CONST1_RTX (mode))
3139 {
3140 if (side_effects_p (op0))
3141 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3142 return CONST0_RTX (mode);
3143 }
3144 /* Implement modulus by power of two as AND. */
3145 if (CONST_INT_P (trueop1)
3146 && exact_log2 (UINTVAL (trueop1)) > 0)
3147 return simplify_gen_binary (AND, mode, op0,
3148 gen_int_mode (INTVAL (op1) - 1, mode));
3149 break;
3150
3151 case MOD:
3152 /* 0%x is 0 (or x&0 if x has side-effects). */
3153 if (trueop0 == CONST0_RTX (mode))
3154 {
3155 if (side_effects_p (op1))
3156 return simplify_gen_binary (AND, mode, op1, trueop0);
3157 return trueop0;
3158 }
3159 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3160 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3161 {
3162 if (side_effects_p (op0))
3163 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3164 return CONST0_RTX (mode);
3165 }
3166 break;
3167
3168 case ROTATERT:
3169 case ROTATE:
3170 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3171 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3172 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3173 amount instead. */
3174 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3175 if (CONST_INT_P (trueop1)
3176 && IN_RANGE (INTVAL (trueop1),
3177 GET_MODE_PRECISION (mode) / 2 + (code == ROTATE),
3178 GET_MODE_PRECISION (mode) - 1))
3179 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3180 mode, op0, GEN_INT (GET_MODE_PRECISION (mode)
3181 - INTVAL (trueop1)));
3182 #endif
3183 /* FALLTHRU */
3184 case ASHIFTRT:
3185 if (trueop1 == CONST0_RTX (mode))
3186 return op0;
3187 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3188 return op0;
3189 /* Rotating ~0 always results in ~0. */
3190 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3191 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3192 && ! side_effects_p (op1))
3193 return op0;
3194 /* Given:
3195 scalar modes M1, M2
3196 scalar constants c1, c2
3197 size (M2) > size (M1)
3198 c1 == size (M2) - size (M1)
3199 optimize:
3200 (ashiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3201 <low_part>)
3202 (const_int <c2>))
3203 to:
3204 (subreg:M1 (ashiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3205 <low_part>). */
3206 if (code == ASHIFTRT
3207 && !VECTOR_MODE_P (mode)
3208 && SUBREG_P (op0)
3209 && CONST_INT_P (op1)
3210 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3211 && !VECTOR_MODE_P (GET_MODE (SUBREG_REG (op0)))
3212 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3213 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3214 > GET_MODE_BITSIZE (mode))
3215 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3216 == (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3217 - GET_MODE_BITSIZE (mode)))
3218 && subreg_lowpart_p (op0))
3219 {
3220 rtx tmp = GEN_INT (INTVAL (XEXP (SUBREG_REG (op0), 1))
3221 + INTVAL (op1));
3222 machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
3223 tmp = simplify_gen_binary (ASHIFTRT,
3224 GET_MODE (SUBREG_REG (op0)),
3225 XEXP (SUBREG_REG (op0), 0),
3226 tmp);
3227 return simplify_gen_subreg (mode, tmp, inner_mode,
3228 subreg_lowpart_offset (mode,
3229 inner_mode));
3230 }
3231 canonicalize_shift:
3232 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3233 {
3234 val = INTVAL (op1) & (GET_MODE_PRECISION (mode) - 1);
3235 if (val != INTVAL (op1))
3236 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3237 }
3238 break;
3239
3240 case ASHIFT:
3241 case SS_ASHIFT:
3242 case US_ASHIFT:
3243 if (trueop1 == CONST0_RTX (mode))
3244 return op0;
3245 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3246 return op0;
3247 goto canonicalize_shift;
3248
3249 case LSHIFTRT:
3250 if (trueop1 == CONST0_RTX (mode))
3251 return op0;
3252 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3253 return op0;
3254 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3255 if (GET_CODE (op0) == CLZ
3256 && CONST_INT_P (trueop1)
3257 && STORE_FLAG_VALUE == 1
3258 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3259 {
3260 machine_mode imode = GET_MODE (XEXP (op0, 0));
3261 unsigned HOST_WIDE_INT zero_val = 0;
3262
3263 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3264 && zero_val == GET_MODE_PRECISION (imode)
3265 && INTVAL (trueop1) == exact_log2 (zero_val))
3266 return simplify_gen_relational (EQ, mode, imode,
3267 XEXP (op0, 0), const0_rtx);
3268 }
3269 goto canonicalize_shift;
3270
3271 case SMIN:
3272 if (width <= HOST_BITS_PER_WIDE_INT
3273 && mode_signbit_p (mode, trueop1)
3274 && ! side_effects_p (op0))
3275 return op1;
3276 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3277 return op0;
3278 tem = simplify_associative_operation (code, mode, op0, op1);
3279 if (tem)
3280 return tem;
3281 break;
3282
3283 case SMAX:
3284 if (width <= HOST_BITS_PER_WIDE_INT
3285 && CONST_INT_P (trueop1)
3286 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3287 && ! side_effects_p (op0))
3288 return op1;
3289 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3290 return op0;
3291 tem = simplify_associative_operation (code, mode, op0, op1);
3292 if (tem)
3293 return tem;
3294 break;
3295
3296 case UMIN:
3297 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3298 return op1;
3299 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3300 return op0;
3301 tem = simplify_associative_operation (code, mode, op0, op1);
3302 if (tem)
3303 return tem;
3304 break;
3305
3306 case UMAX:
3307 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3308 return op1;
3309 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3310 return op0;
3311 tem = simplify_associative_operation (code, mode, op0, op1);
3312 if (tem)
3313 return tem;
3314 break;
3315
3316 case SS_PLUS:
3317 case US_PLUS:
3318 case SS_MINUS:
3319 case US_MINUS:
3320 case SS_MULT:
3321 case US_MULT:
3322 case SS_DIV:
3323 case US_DIV:
3324 /* ??? There are simplifications that can be done. */
3325 return 0;
3326
3327 case VEC_SELECT:
3328 if (!VECTOR_MODE_P (mode))
3329 {
3330 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3331 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3332 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3333 gcc_assert (XVECLEN (trueop1, 0) == 1);
3334 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3335
3336 if (GET_CODE (trueop0) == CONST_VECTOR)
3337 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3338 (trueop1, 0, 0)));
3339
3340 /* Extract a scalar element from a nested VEC_SELECT expression
3341 (with optional nested VEC_CONCAT expression). Some targets
3342 (i386) extract scalar element from a vector using chain of
3343 nested VEC_SELECT expressions. When input operand is a memory
3344 operand, this operation can be simplified to a simple scalar
3345 load from an offseted memory address. */
3346 if (GET_CODE (trueop0) == VEC_SELECT)
3347 {
3348 rtx op0 = XEXP (trueop0, 0);
3349 rtx op1 = XEXP (trueop0, 1);
3350
3351 machine_mode opmode = GET_MODE (op0);
3352 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3353 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3354
3355 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3356 int elem;
3357
3358 rtvec vec;
3359 rtx tmp_op, tmp;
3360
3361 gcc_assert (GET_CODE (op1) == PARALLEL);
3362 gcc_assert (i < n_elts);
3363
3364 /* Select element, pointed by nested selector. */
3365 elem = INTVAL (XVECEXP (op1, 0, i));
3366
3367 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3368 if (GET_CODE (op0) == VEC_CONCAT)
3369 {
3370 rtx op00 = XEXP (op0, 0);
3371 rtx op01 = XEXP (op0, 1);
3372
3373 machine_mode mode00, mode01;
3374 int n_elts00, n_elts01;
3375
3376 mode00 = GET_MODE (op00);
3377 mode01 = GET_MODE (op01);
3378
3379 /* Find out number of elements of each operand. */
3380 if (VECTOR_MODE_P (mode00))
3381 {
3382 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3383 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3384 }
3385 else
3386 n_elts00 = 1;
3387
3388 if (VECTOR_MODE_P (mode01))
3389 {
3390 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3391 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3392 }
3393 else
3394 n_elts01 = 1;
3395
3396 gcc_assert (n_elts == n_elts00 + n_elts01);
3397
3398 /* Select correct operand of VEC_CONCAT
3399 and adjust selector. */
3400 if (elem < n_elts01)
3401 tmp_op = op00;
3402 else
3403 {
3404 tmp_op = op01;
3405 elem -= n_elts00;
3406 }
3407 }
3408 else
3409 tmp_op = op0;
3410
3411 vec = rtvec_alloc (1);
3412 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3413
3414 tmp = gen_rtx_fmt_ee (code, mode,
3415 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3416 return tmp;
3417 }
3418 if (GET_CODE (trueop0) == VEC_DUPLICATE
3419 && GET_MODE (XEXP (trueop0, 0)) == mode)
3420 return XEXP (trueop0, 0);
3421 }
3422 else
3423 {
3424 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3425 gcc_assert (GET_MODE_INNER (mode)
3426 == GET_MODE_INNER (GET_MODE (trueop0)));
3427 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3428
3429 if (GET_CODE (trueop0) == CONST_VECTOR)
3430 {
3431 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3432 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3433 rtvec v = rtvec_alloc (n_elts);
3434 unsigned int i;
3435
3436 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3437 for (i = 0; i < n_elts; i++)
3438 {
3439 rtx x = XVECEXP (trueop1, 0, i);
3440
3441 gcc_assert (CONST_INT_P (x));
3442 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3443 INTVAL (x));
3444 }
3445
3446 return gen_rtx_CONST_VECTOR (mode, v);
3447 }
3448
3449 /* Recognize the identity. */
3450 if (GET_MODE (trueop0) == mode)
3451 {
3452 bool maybe_ident = true;
3453 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3454 {
3455 rtx j = XVECEXP (trueop1, 0, i);
3456 if (!CONST_INT_P (j) || INTVAL (j) != i)
3457 {
3458 maybe_ident = false;
3459 break;
3460 }
3461 }
3462 if (maybe_ident)
3463 return trueop0;
3464 }
3465
3466 /* If we build {a,b} then permute it, build the result directly. */
3467 if (XVECLEN (trueop1, 0) == 2
3468 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3469 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3470 && GET_CODE (trueop0) == VEC_CONCAT
3471 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3472 && GET_MODE (XEXP (trueop0, 0)) == mode
3473 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3474 && GET_MODE (XEXP (trueop0, 1)) == mode)
3475 {
3476 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3477 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3478 rtx subop0, subop1;
3479
3480 gcc_assert (i0 < 4 && i1 < 4);
3481 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3482 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3483
3484 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3485 }
3486
3487 if (XVECLEN (trueop1, 0) == 2
3488 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3489 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3490 && GET_CODE (trueop0) == VEC_CONCAT
3491 && GET_MODE (trueop0) == mode)
3492 {
3493 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3494 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3495 rtx subop0, subop1;
3496
3497 gcc_assert (i0 < 2 && i1 < 2);
3498 subop0 = XEXP (trueop0, i0);
3499 subop1 = XEXP (trueop0, i1);
3500
3501 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3502 }
3503
3504 /* If we select one half of a vec_concat, return that. */
3505 if (GET_CODE (trueop0) == VEC_CONCAT
3506 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3507 {
3508 rtx subop0 = XEXP (trueop0, 0);
3509 rtx subop1 = XEXP (trueop0, 1);
3510 machine_mode mode0 = GET_MODE (subop0);
3511 machine_mode mode1 = GET_MODE (subop1);
3512 int li = GET_MODE_SIZE (GET_MODE_INNER (mode0));
3513 int l0 = GET_MODE_SIZE (mode0) / li;
3514 int l1 = GET_MODE_SIZE (mode1) / li;
3515 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3516 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3517 {
3518 bool success = true;
3519 for (int i = 1; i < l0; ++i)
3520 {
3521 rtx j = XVECEXP (trueop1, 0, i);
3522 if (!CONST_INT_P (j) || INTVAL (j) != i)
3523 {
3524 success = false;
3525 break;
3526 }
3527 }
3528 if (success)
3529 return subop0;
3530 }
3531 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3532 {
3533 bool success = true;
3534 for (int i = 1; i < l1; ++i)
3535 {
3536 rtx j = XVECEXP (trueop1, 0, i);
3537 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3538 {
3539 success = false;
3540 break;
3541 }
3542 }
3543 if (success)
3544 return subop1;
3545 }
3546 }
3547 }
3548
3549 if (XVECLEN (trueop1, 0) == 1
3550 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3551 && GET_CODE (trueop0) == VEC_CONCAT)
3552 {
3553 rtx vec = trueop0;
3554 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3555
3556 /* Try to find the element in the VEC_CONCAT. */
3557 while (GET_MODE (vec) != mode
3558 && GET_CODE (vec) == VEC_CONCAT)
3559 {
3560 HOST_WIDE_INT vec_size;
3561
3562 if (CONST_INT_P (XEXP (vec, 0)))
3563 {
3564 /* vec_concat of two const_ints doesn't make sense with
3565 respect to modes. */
3566 if (CONST_INT_P (XEXP (vec, 1)))
3567 return 0;
3568
3569 vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
3570 - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
3571 }
3572 else
3573 vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3574
3575 if (offset < vec_size)
3576 vec = XEXP (vec, 0);
3577 else
3578 {
3579 offset -= vec_size;
3580 vec = XEXP (vec, 1);
3581 }
3582 vec = avoid_constant_pool_reference (vec);
3583 }
3584
3585 if (GET_MODE (vec) == mode)
3586 return vec;
3587 }
3588
3589 /* If we select elements in a vec_merge that all come from the same
3590 operand, select from that operand directly. */
3591 if (GET_CODE (op0) == VEC_MERGE)
3592 {
3593 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3594 if (CONST_INT_P (trueop02))
3595 {
3596 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3597 bool all_operand0 = true;
3598 bool all_operand1 = true;
3599 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3600 {
3601 rtx j = XVECEXP (trueop1, 0, i);
3602 if (sel & (1 << UINTVAL (j)))
3603 all_operand1 = false;
3604 else
3605 all_operand0 = false;
3606 }
3607 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3608 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3609 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3610 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3611 }
3612 }
3613
3614 /* If we have two nested selects that are inverses of each
3615 other, replace them with the source operand. */
3616 if (GET_CODE (trueop0) == VEC_SELECT
3617 && GET_MODE (XEXP (trueop0, 0)) == mode)
3618 {
3619 rtx op0_subop1 = XEXP (trueop0, 1);
3620 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3621 gcc_assert (XVECLEN (trueop1, 0) == GET_MODE_NUNITS (mode));
3622
3623 /* Apply the outer ordering vector to the inner one. (The inner
3624 ordering vector is expressly permitted to be of a different
3625 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3626 then the two VEC_SELECTs cancel. */
3627 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3628 {
3629 rtx x = XVECEXP (trueop1, 0, i);
3630 if (!CONST_INT_P (x))
3631 return 0;
3632 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3633 if (!CONST_INT_P (y) || i != INTVAL (y))
3634 return 0;
3635 }
3636 return XEXP (trueop0, 0);
3637 }
3638
3639 return 0;
3640 case VEC_CONCAT:
3641 {
3642 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3643 ? GET_MODE (trueop0)
3644 : GET_MODE_INNER (mode));
3645 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3646 ? GET_MODE (trueop1)
3647 : GET_MODE_INNER (mode));
3648
3649 gcc_assert (VECTOR_MODE_P (mode));
3650 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3651 == GET_MODE_SIZE (mode));
3652
3653 if (VECTOR_MODE_P (op0_mode))
3654 gcc_assert (GET_MODE_INNER (mode)
3655 == GET_MODE_INNER (op0_mode));
3656 else
3657 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3658
3659 if (VECTOR_MODE_P (op1_mode))
3660 gcc_assert (GET_MODE_INNER (mode)
3661 == GET_MODE_INNER (op1_mode));
3662 else
3663 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3664
3665 if ((GET_CODE (trueop0) == CONST_VECTOR
3666 || CONST_SCALAR_INT_P (trueop0)
3667 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3668 && (GET_CODE (trueop1) == CONST_VECTOR
3669 || CONST_SCALAR_INT_P (trueop1)
3670 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3671 {
3672 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3673 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3674 rtvec v = rtvec_alloc (n_elts);
3675 unsigned int i;
3676 unsigned in_n_elts = 1;
3677
3678 if (VECTOR_MODE_P (op0_mode))
3679 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3680 for (i = 0; i < n_elts; i++)
3681 {
3682 if (i < in_n_elts)
3683 {
3684 if (!VECTOR_MODE_P (op0_mode))
3685 RTVEC_ELT (v, i) = trueop0;
3686 else
3687 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3688 }
3689 else
3690 {
3691 if (!VECTOR_MODE_P (op1_mode))
3692 RTVEC_ELT (v, i) = trueop1;
3693 else
3694 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3695 i - in_n_elts);
3696 }
3697 }
3698
3699 return gen_rtx_CONST_VECTOR (mode, v);
3700 }
3701
3702 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3703 Restrict the transformation to avoid generating a VEC_SELECT with a
3704 mode unrelated to its operand. */
3705 if (GET_CODE (trueop0) == VEC_SELECT
3706 && GET_CODE (trueop1) == VEC_SELECT
3707 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3708 && GET_MODE (XEXP (trueop0, 0)) == mode)
3709 {
3710 rtx par0 = XEXP (trueop0, 1);
3711 rtx par1 = XEXP (trueop1, 1);
3712 int len0 = XVECLEN (par0, 0);
3713 int len1 = XVECLEN (par1, 0);
3714 rtvec vec = rtvec_alloc (len0 + len1);
3715 for (int i = 0; i < len0; i++)
3716 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3717 for (int i = 0; i < len1; i++)
3718 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3719 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3720 gen_rtx_PARALLEL (VOIDmode, vec));
3721 }
3722 }
3723 return 0;
3724
3725 default:
3726 gcc_unreachable ();
3727 }
3728
3729 return 0;
3730 }
3731
3732 rtx
3733 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
3734 rtx op0, rtx op1)
3735 {
3736 unsigned int width = GET_MODE_PRECISION (mode);
3737
3738 if (VECTOR_MODE_P (mode)
3739 && code != VEC_CONCAT
3740 && GET_CODE (op0) == CONST_VECTOR
3741 && GET_CODE (op1) == CONST_VECTOR)
3742 {
3743 unsigned n_elts = GET_MODE_NUNITS (mode);
3744 machine_mode op0mode = GET_MODE (op0);
3745 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3746 machine_mode op1mode = GET_MODE (op1);
3747 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3748 rtvec v = rtvec_alloc (n_elts);
3749 unsigned int i;
3750
3751 gcc_assert (op0_n_elts == n_elts);
3752 gcc_assert (op1_n_elts == n_elts);
3753 for (i = 0; i < n_elts; i++)
3754 {
3755 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3756 CONST_VECTOR_ELT (op0, i),
3757 CONST_VECTOR_ELT (op1, i));
3758 if (!x)
3759 return 0;
3760 RTVEC_ELT (v, i) = x;
3761 }
3762
3763 return gen_rtx_CONST_VECTOR (mode, v);
3764 }
3765
3766 if (VECTOR_MODE_P (mode)
3767 && code == VEC_CONCAT
3768 && (CONST_SCALAR_INT_P (op0)
3769 || GET_CODE (op0) == CONST_FIXED
3770 || CONST_DOUBLE_AS_FLOAT_P (op0))
3771 && (CONST_SCALAR_INT_P (op1)
3772 || CONST_DOUBLE_AS_FLOAT_P (op1)
3773 || GET_CODE (op1) == CONST_FIXED))
3774 {
3775 unsigned n_elts = GET_MODE_NUNITS (mode);
3776 rtvec v = rtvec_alloc (n_elts);
3777
3778 gcc_assert (n_elts >= 2);
3779 if (n_elts == 2)
3780 {
3781 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3782 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3783
3784 RTVEC_ELT (v, 0) = op0;
3785 RTVEC_ELT (v, 1) = op1;
3786 }
3787 else
3788 {
3789 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3790 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3791 unsigned i;
3792
3793 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3794 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3795 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3796
3797 for (i = 0; i < op0_n_elts; ++i)
3798 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3799 for (i = 0; i < op1_n_elts; ++i)
3800 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3801 }
3802
3803 return gen_rtx_CONST_VECTOR (mode, v);
3804 }
3805
3806 if (SCALAR_FLOAT_MODE_P (mode)
3807 && CONST_DOUBLE_AS_FLOAT_P (op0)
3808 && CONST_DOUBLE_AS_FLOAT_P (op1)
3809 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3810 {
3811 if (code == AND
3812 || code == IOR
3813 || code == XOR)
3814 {
3815 long tmp0[4];
3816 long tmp1[4];
3817 REAL_VALUE_TYPE r;
3818 int i;
3819
3820 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3821 GET_MODE (op0));
3822 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3823 GET_MODE (op1));
3824 for (i = 0; i < 4; i++)
3825 {
3826 switch (code)
3827 {
3828 case AND:
3829 tmp0[i] &= tmp1[i];
3830 break;
3831 case IOR:
3832 tmp0[i] |= tmp1[i];
3833 break;
3834 case XOR:
3835 tmp0[i] ^= tmp1[i];
3836 break;
3837 default:
3838 gcc_unreachable ();
3839 }
3840 }
3841 real_from_target (&r, tmp0, mode);
3842 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3843 }
3844 else
3845 {
3846 REAL_VALUE_TYPE f0, f1, value, result;
3847 bool inexact;
3848
3849 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3850 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3851 real_convert (&f0, mode, &f0);
3852 real_convert (&f1, mode, &f1);
3853
3854 if (HONOR_SNANS (mode)
3855 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3856 return 0;
3857
3858 if (code == DIV
3859 && REAL_VALUES_EQUAL (f1, dconst0)
3860 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3861 return 0;
3862
3863 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3864 && flag_trapping_math
3865 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3866 {
3867 int s0 = REAL_VALUE_NEGATIVE (f0);
3868 int s1 = REAL_VALUE_NEGATIVE (f1);
3869
3870 switch (code)
3871 {
3872 case PLUS:
3873 /* Inf + -Inf = NaN plus exception. */
3874 if (s0 != s1)
3875 return 0;
3876 break;
3877 case MINUS:
3878 /* Inf - Inf = NaN plus exception. */
3879 if (s0 == s1)
3880 return 0;
3881 break;
3882 case DIV:
3883 /* Inf / Inf = NaN plus exception. */
3884 return 0;
3885 default:
3886 break;
3887 }
3888 }
3889
3890 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3891 && flag_trapping_math
3892 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3893 || (REAL_VALUE_ISINF (f1)
3894 && REAL_VALUES_EQUAL (f0, dconst0))))
3895 /* Inf * 0 = NaN plus exception. */
3896 return 0;
3897
3898 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3899 &f0, &f1);
3900 real_convert (&result, mode, &value);
3901
3902 /* Don't constant fold this floating point operation if
3903 the result has overflowed and flag_trapping_math. */
3904
3905 if (flag_trapping_math
3906 && MODE_HAS_INFINITIES (mode)
3907 && REAL_VALUE_ISINF (result)
3908 && !REAL_VALUE_ISINF (f0)
3909 && !REAL_VALUE_ISINF (f1))
3910 /* Overflow plus exception. */
3911 return 0;
3912
3913 /* Don't constant fold this floating point operation if the
3914 result may dependent upon the run-time rounding mode and
3915 flag_rounding_math is set, or if GCC's software emulation
3916 is unable to accurately represent the result. */
3917
3918 if ((flag_rounding_math
3919 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3920 && (inexact || !real_identical (&result, &value)))
3921 return NULL_RTX;
3922
3923 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3924 }
3925 }
3926
3927 /* We can fold some multi-word operations. */
3928 if ((GET_MODE_CLASS (mode) == MODE_INT
3929 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
3930 && CONST_SCALAR_INT_P (op0)
3931 && CONST_SCALAR_INT_P (op1))
3932 {
3933 wide_int result;
3934 bool overflow;
3935 rtx_mode_t pop0 = std::make_pair (op0, mode);
3936 rtx_mode_t pop1 = std::make_pair (op1, mode);
3937
3938 #if TARGET_SUPPORTS_WIDE_INT == 0
3939 /* This assert keeps the simplification from producing a result
3940 that cannot be represented in a CONST_DOUBLE but a lot of
3941 upstream callers expect that this function never fails to
3942 simplify something and so you if you added this to the test
3943 above the code would die later anyway. If this assert
3944 happens, you just need to make the port support wide int. */
3945 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
3946 #endif
3947 switch (code)
3948 {
3949 case MINUS:
3950 result = wi::sub (pop0, pop1);
3951 break;
3952
3953 case PLUS:
3954 result = wi::add (pop0, pop1);
3955 break;
3956
3957 case MULT:
3958 result = wi::mul (pop0, pop1);
3959 break;
3960
3961 case DIV:
3962 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
3963 if (overflow)
3964 return NULL_RTX;
3965 break;
3966
3967 case MOD:
3968 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
3969 if (overflow)
3970 return NULL_RTX;
3971 break;
3972
3973 case UDIV:
3974 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
3975 if (overflow)
3976 return NULL_RTX;
3977 break;
3978
3979 case UMOD:
3980 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
3981 if (overflow)
3982 return NULL_RTX;
3983 break;
3984
3985 case AND:
3986 result = wi::bit_and (pop0, pop1);
3987 break;
3988
3989 case IOR:
3990 result = wi::bit_or (pop0, pop1);
3991 break;
3992
3993 case XOR:
3994 result = wi::bit_xor (pop0, pop1);
3995 break;
3996
3997 case SMIN:
3998 result = wi::smin (pop0, pop1);
3999 break;
4000
4001 case SMAX:
4002 result = wi::smax (pop0, pop1);
4003 break;
4004
4005 case UMIN:
4006 result = wi::umin (pop0, pop1);
4007 break;
4008
4009 case UMAX:
4010 result = wi::umax (pop0, pop1);
4011 break;
4012
4013 case LSHIFTRT:
4014 case ASHIFTRT:
4015 case ASHIFT:
4016 {
4017 wide_int wop1 = pop1;
4018 if (SHIFT_COUNT_TRUNCATED)
4019 wop1 = wi::umod_trunc (wop1, width);
4020 else if (wi::geu_p (wop1, width))
4021 return NULL_RTX;
4022
4023 switch (code)
4024 {
4025 case LSHIFTRT:
4026 result = wi::lrshift (pop0, wop1);
4027 break;
4028
4029 case ASHIFTRT:
4030 result = wi::arshift (pop0, wop1);
4031 break;
4032
4033 case ASHIFT:
4034 result = wi::lshift (pop0, wop1);
4035 break;
4036
4037 default:
4038 gcc_unreachable ();
4039 }
4040 break;
4041 }
4042 case ROTATE:
4043 case ROTATERT:
4044 {
4045 if (wi::neg_p (pop1))
4046 return NULL_RTX;
4047
4048 switch (code)
4049 {
4050 case ROTATE:
4051 result = wi::lrotate (pop0, pop1);
4052 break;
4053
4054 case ROTATERT:
4055 result = wi::rrotate (pop0, pop1);
4056 break;
4057
4058 default:
4059 gcc_unreachable ();
4060 }
4061 break;
4062 }
4063 default:
4064 return NULL_RTX;
4065 }
4066 return immed_wide_int_const (result, mode);
4067 }
4068
4069 return NULL_RTX;
4070 }
4071
4072
4073 \f
4074 /* Return a positive integer if X should sort after Y. The value
4075 returned is 1 if and only if X and Y are both regs. */
4076
4077 static int
4078 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4079 {
4080 int result;
4081
4082 result = (commutative_operand_precedence (y)
4083 - commutative_operand_precedence (x));
4084 if (result)
4085 return result + result;
4086
4087 /* Group together equal REGs to do more simplification. */
4088 if (REG_P (x) && REG_P (y))
4089 return REGNO (x) > REGNO (y);
4090
4091 return 0;
4092 }
4093
4094 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4095 operands may be another PLUS or MINUS.
4096
4097 Rather than test for specific case, we do this by a brute-force method
4098 and do all possible simplifications until no more changes occur. Then
4099 we rebuild the operation.
4100
4101 May return NULL_RTX when no changes were made. */
4102
4103 static rtx
4104 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
4105 rtx op1)
4106 {
4107 struct simplify_plus_minus_op_data
4108 {
4109 rtx op;
4110 short neg;
4111 } ops[16];
4112 rtx result, tem;
4113 int n_ops = 2;
4114 int changed, n_constants, canonicalized = 0;
4115 int i, j;
4116
4117 memset (ops, 0, sizeof ops);
4118
4119 /* Set up the two operands and then expand them until nothing has been
4120 changed. If we run out of room in our array, give up; this should
4121 almost never happen. */
4122
4123 ops[0].op = op0;
4124 ops[0].neg = 0;
4125 ops[1].op = op1;
4126 ops[1].neg = (code == MINUS);
4127
4128 do
4129 {
4130 changed = 0;
4131 n_constants = 0;
4132
4133 for (i = 0; i < n_ops; i++)
4134 {
4135 rtx this_op = ops[i].op;
4136 int this_neg = ops[i].neg;
4137 enum rtx_code this_code = GET_CODE (this_op);
4138
4139 switch (this_code)
4140 {
4141 case PLUS:
4142 case MINUS:
4143 if (n_ops == ARRAY_SIZE (ops))
4144 return NULL_RTX;
4145
4146 ops[n_ops].op = XEXP (this_op, 1);
4147 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4148 n_ops++;
4149
4150 ops[i].op = XEXP (this_op, 0);
4151 changed = 1;
4152 /* If this operand was negated then we will potentially
4153 canonicalize the expression. Similarly if we don't
4154 place the operands adjacent we're re-ordering the
4155 expression and thus might be performing a
4156 canonicalization. Ignore register re-ordering.
4157 ??? It might be better to shuffle the ops array here,
4158 but then (plus (plus (A, B), plus (C, D))) wouldn't
4159 be seen as non-canonical. */
4160 if (this_neg
4161 || (i != n_ops - 2
4162 && !(REG_P (ops[i].op) && REG_P (ops[n_ops - 1].op))))
4163 canonicalized = 1;
4164 break;
4165
4166 case NEG:
4167 ops[i].op = XEXP (this_op, 0);
4168 ops[i].neg = ! this_neg;
4169 changed = 1;
4170 canonicalized = 1;
4171 break;
4172
4173 case CONST:
4174 if (n_ops != ARRAY_SIZE (ops)
4175 && GET_CODE (XEXP (this_op, 0)) == PLUS
4176 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4177 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4178 {
4179 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4180 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4181 ops[n_ops].neg = this_neg;
4182 n_ops++;
4183 changed = 1;
4184 canonicalized = 1;
4185 }
4186 break;
4187
4188 case NOT:
4189 /* ~a -> (-a - 1) */
4190 if (n_ops != ARRAY_SIZE (ops))
4191 {
4192 ops[n_ops].op = CONSTM1_RTX (mode);
4193 ops[n_ops++].neg = this_neg;
4194 ops[i].op = XEXP (this_op, 0);
4195 ops[i].neg = !this_neg;
4196 changed = 1;
4197 canonicalized = 1;
4198 }
4199 break;
4200
4201 case CONST_INT:
4202 n_constants++;
4203 if (this_neg)
4204 {
4205 ops[i].op = neg_const_int (mode, this_op);
4206 ops[i].neg = 0;
4207 changed = 1;
4208 canonicalized = 1;
4209 }
4210 break;
4211
4212 default:
4213 break;
4214 }
4215 }
4216 }
4217 while (changed);
4218
4219 if (n_constants > 1)
4220 canonicalized = 1;
4221
4222 gcc_assert (n_ops >= 2);
4223
4224 /* If we only have two operands, we can avoid the loops. */
4225 if (n_ops == 2)
4226 {
4227 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4228 rtx lhs, rhs;
4229
4230 /* Get the two operands. Be careful with the order, especially for
4231 the cases where code == MINUS. */
4232 if (ops[0].neg && ops[1].neg)
4233 {
4234 lhs = gen_rtx_NEG (mode, ops[0].op);
4235 rhs = ops[1].op;
4236 }
4237 else if (ops[0].neg)
4238 {
4239 lhs = ops[1].op;
4240 rhs = ops[0].op;
4241 }
4242 else
4243 {
4244 lhs = ops[0].op;
4245 rhs = ops[1].op;
4246 }
4247
4248 return simplify_const_binary_operation (code, mode, lhs, rhs);
4249 }
4250
4251 /* Now simplify each pair of operands until nothing changes. */
4252 while (1)
4253 {
4254 /* Insertion sort is good enough for a small array. */
4255 for (i = 1; i < n_ops; i++)
4256 {
4257 struct simplify_plus_minus_op_data save;
4258 int cmp;
4259
4260 j = i - 1;
4261 cmp = simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op);
4262 if (cmp <= 0)
4263 continue;
4264 /* Just swapping registers doesn't count as canonicalization. */
4265 if (cmp != 1)
4266 canonicalized = 1;
4267
4268 save = ops[i];
4269 do
4270 ops[j + 1] = ops[j];
4271 while (j--
4272 && simplify_plus_minus_op_data_cmp (ops[j].op, save.op) > 0);
4273 ops[j + 1] = save;
4274 }
4275
4276 changed = 0;
4277 for (i = n_ops - 1; i > 0; i--)
4278 for (j = i - 1; j >= 0; j--)
4279 {
4280 rtx lhs = ops[j].op, rhs = ops[i].op;
4281 int lneg = ops[j].neg, rneg = ops[i].neg;
4282
4283 if (lhs != 0 && rhs != 0)
4284 {
4285 enum rtx_code ncode = PLUS;
4286
4287 if (lneg != rneg)
4288 {
4289 ncode = MINUS;
4290 if (lneg)
4291 std::swap (lhs, rhs);
4292 }
4293 else if (swap_commutative_operands_p (lhs, rhs))
4294 std::swap (lhs, rhs);
4295
4296 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4297 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4298 {
4299 rtx tem_lhs, tem_rhs;
4300
4301 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4302 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4303 tem = simplify_binary_operation (ncode, mode, tem_lhs,
4304 tem_rhs);
4305
4306 if (tem && !CONSTANT_P (tem))
4307 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4308 }
4309 else
4310 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4311
4312 if (tem)
4313 {
4314 /* Reject "simplifications" that just wrap the two
4315 arguments in a CONST. Failure to do so can result
4316 in infinite recursion with simplify_binary_operation
4317 when it calls us to simplify CONST operations.
4318 Also, if we find such a simplification, don't try
4319 any more combinations with this rhs: We must have
4320 something like symbol+offset, ie. one of the
4321 trivial CONST expressions we handle later. */
4322 if (GET_CODE (tem) == CONST
4323 && GET_CODE (XEXP (tem, 0)) == ncode
4324 && XEXP (XEXP (tem, 0), 0) == lhs
4325 && XEXP (XEXP (tem, 0), 1) == rhs)
4326 break;
4327 lneg &= rneg;
4328 if (GET_CODE (tem) == NEG)
4329 tem = XEXP (tem, 0), lneg = !lneg;
4330 if (CONST_INT_P (tem) && lneg)
4331 tem = neg_const_int (mode, tem), lneg = 0;
4332
4333 ops[i].op = tem;
4334 ops[i].neg = lneg;
4335 ops[j].op = NULL_RTX;
4336 changed = 1;
4337 canonicalized = 1;
4338 }
4339 }
4340 }
4341
4342 if (!changed)
4343 break;
4344
4345 /* Pack all the operands to the lower-numbered entries. */
4346 for (i = 0, j = 0; j < n_ops; j++)
4347 if (ops[j].op)
4348 {
4349 ops[i] = ops[j];
4350 i++;
4351 }
4352 n_ops = i;
4353 }
4354
4355 /* If nothing changed, fail. */
4356 if (!canonicalized)
4357 return NULL_RTX;
4358
4359 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4360 if (n_ops == 2
4361 && CONST_INT_P (ops[1].op)
4362 && CONSTANT_P (ops[0].op)
4363 && ops[0].neg)
4364 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4365
4366 /* We suppressed creation of trivial CONST expressions in the
4367 combination loop to avoid recursion. Create one manually now.
4368 The combination loop should have ensured that there is exactly
4369 one CONST_INT, and the sort will have ensured that it is last
4370 in the array and that any other constant will be next-to-last. */
4371
4372 if (n_ops > 1
4373 && CONST_INT_P (ops[n_ops - 1].op)
4374 && CONSTANT_P (ops[n_ops - 2].op))
4375 {
4376 rtx value = ops[n_ops - 1].op;
4377 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4378 value = neg_const_int (mode, value);
4379 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4380 INTVAL (value));
4381 n_ops--;
4382 }
4383
4384 /* Put a non-negated operand first, if possible. */
4385
4386 for (i = 0; i < n_ops && ops[i].neg; i++)
4387 continue;
4388 if (i == n_ops)
4389 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4390 else if (i != 0)
4391 {
4392 tem = ops[0].op;
4393 ops[0] = ops[i];
4394 ops[i].op = tem;
4395 ops[i].neg = 1;
4396 }
4397
4398 /* Now make the result by performing the requested operations. */
4399 result = ops[0].op;
4400 for (i = 1; i < n_ops; i++)
4401 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4402 mode, result, ops[i].op);
4403
4404 return result;
4405 }
4406
4407 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4408 static bool
4409 plus_minus_operand_p (const_rtx x)
4410 {
4411 return GET_CODE (x) == PLUS
4412 || GET_CODE (x) == MINUS
4413 || (GET_CODE (x) == CONST
4414 && GET_CODE (XEXP (x, 0)) == PLUS
4415 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4416 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4417 }
4418
4419 /* Like simplify_binary_operation except used for relational operators.
4420 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4421 not also be VOIDmode.
4422
4423 CMP_MODE specifies in which mode the comparison is done in, so it is
4424 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4425 the operands or, if both are VOIDmode, the operands are compared in
4426 "infinite precision". */
4427 rtx
4428 simplify_relational_operation (enum rtx_code code, machine_mode mode,
4429 machine_mode cmp_mode, rtx op0, rtx op1)
4430 {
4431 rtx tem, trueop0, trueop1;
4432
4433 if (cmp_mode == VOIDmode)
4434 cmp_mode = GET_MODE (op0);
4435 if (cmp_mode == VOIDmode)
4436 cmp_mode = GET_MODE (op1);
4437
4438 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4439 if (tem)
4440 {
4441 if (SCALAR_FLOAT_MODE_P (mode))
4442 {
4443 if (tem == const0_rtx)
4444 return CONST0_RTX (mode);
4445 #ifdef FLOAT_STORE_FLAG_VALUE
4446 {
4447 REAL_VALUE_TYPE val;
4448 val = FLOAT_STORE_FLAG_VALUE (mode);
4449 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4450 }
4451 #else
4452 return NULL_RTX;
4453 #endif
4454 }
4455 if (VECTOR_MODE_P (mode))
4456 {
4457 if (tem == const0_rtx)
4458 return CONST0_RTX (mode);
4459 #ifdef VECTOR_STORE_FLAG_VALUE
4460 {
4461 int i, units;
4462 rtvec v;
4463
4464 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4465 if (val == NULL_RTX)
4466 return NULL_RTX;
4467 if (val == const1_rtx)
4468 return CONST1_RTX (mode);
4469
4470 units = GET_MODE_NUNITS (mode);
4471 v = rtvec_alloc (units);
4472 for (i = 0; i < units; i++)
4473 RTVEC_ELT (v, i) = val;
4474 return gen_rtx_raw_CONST_VECTOR (mode, v);
4475 }
4476 #else
4477 return NULL_RTX;
4478 #endif
4479 }
4480
4481 return tem;
4482 }
4483
4484 /* For the following tests, ensure const0_rtx is op1. */
4485 if (swap_commutative_operands_p (op0, op1)
4486 || (op0 == const0_rtx && op1 != const0_rtx))
4487 std::swap (op0, op1), code = swap_condition (code);
4488
4489 /* If op0 is a compare, extract the comparison arguments from it. */
4490 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4491 return simplify_gen_relational (code, mode, VOIDmode,
4492 XEXP (op0, 0), XEXP (op0, 1));
4493
4494 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4495 || CC0_P (op0))
4496 return NULL_RTX;
4497
4498 trueop0 = avoid_constant_pool_reference (op0);
4499 trueop1 = avoid_constant_pool_reference (op1);
4500 return simplify_relational_operation_1 (code, mode, cmp_mode,
4501 trueop0, trueop1);
4502 }
4503
4504 /* This part of simplify_relational_operation is only used when CMP_MODE
4505 is not in class MODE_CC (i.e. it is a real comparison).
4506
4507 MODE is the mode of the result, while CMP_MODE specifies in which
4508 mode the comparison is done in, so it is the mode of the operands. */
4509
4510 static rtx
4511 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4512 machine_mode cmp_mode, rtx op0, rtx op1)
4513 {
4514 enum rtx_code op0code = GET_CODE (op0);
4515
4516 if (op1 == const0_rtx && COMPARISON_P (op0))
4517 {
4518 /* If op0 is a comparison, extract the comparison arguments
4519 from it. */
4520 if (code == NE)
4521 {
4522 if (GET_MODE (op0) == mode)
4523 return simplify_rtx (op0);
4524 else
4525 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4526 XEXP (op0, 0), XEXP (op0, 1));
4527 }
4528 else if (code == EQ)
4529 {
4530 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4531 if (new_code != UNKNOWN)
4532 return simplify_gen_relational (new_code, mode, VOIDmode,
4533 XEXP (op0, 0), XEXP (op0, 1));
4534 }
4535 }
4536
4537 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4538 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4539 if ((code == LTU || code == GEU)
4540 && GET_CODE (op0) == PLUS
4541 && CONST_INT_P (XEXP (op0, 1))
4542 && (rtx_equal_p (op1, XEXP (op0, 0))
4543 || rtx_equal_p (op1, XEXP (op0, 1)))
4544 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4545 && XEXP (op0, 1) != const0_rtx)
4546 {
4547 rtx new_cmp
4548 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4549 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4550 cmp_mode, XEXP (op0, 0), new_cmp);
4551 }
4552
4553 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4554 if ((code == LTU || code == GEU)
4555 && GET_CODE (op0) == PLUS
4556 && rtx_equal_p (op1, XEXP (op0, 1))
4557 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4558 && !rtx_equal_p (op1, XEXP (op0, 0)))
4559 return simplify_gen_relational (code, mode, cmp_mode, op0,
4560 copy_rtx (XEXP (op0, 0)));
4561
4562 if (op1 == const0_rtx)
4563 {
4564 /* Canonicalize (GTU x 0) as (NE x 0). */
4565 if (code == GTU)
4566 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4567 /* Canonicalize (LEU x 0) as (EQ x 0). */
4568 if (code == LEU)
4569 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4570 }
4571 else if (op1 == const1_rtx)
4572 {
4573 switch (code)
4574 {
4575 case GE:
4576 /* Canonicalize (GE x 1) as (GT x 0). */
4577 return simplify_gen_relational (GT, mode, cmp_mode,
4578 op0, const0_rtx);
4579 case GEU:
4580 /* Canonicalize (GEU x 1) as (NE x 0). */
4581 return simplify_gen_relational (NE, mode, cmp_mode,
4582 op0, const0_rtx);
4583 case LT:
4584 /* Canonicalize (LT x 1) as (LE x 0). */
4585 return simplify_gen_relational (LE, mode, cmp_mode,
4586 op0, const0_rtx);
4587 case LTU:
4588 /* Canonicalize (LTU x 1) as (EQ x 0). */
4589 return simplify_gen_relational (EQ, mode, cmp_mode,
4590 op0, const0_rtx);
4591 default:
4592 break;
4593 }
4594 }
4595 else if (op1 == constm1_rtx)
4596 {
4597 /* Canonicalize (LE x -1) as (LT x 0). */
4598 if (code == LE)
4599 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4600 /* Canonicalize (GT x -1) as (GE x 0). */
4601 if (code == GT)
4602 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4603 }
4604
4605 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4606 if ((code == EQ || code == NE)
4607 && (op0code == PLUS || op0code == MINUS)
4608 && CONSTANT_P (op1)
4609 && CONSTANT_P (XEXP (op0, 1))
4610 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4611 {
4612 rtx x = XEXP (op0, 0);
4613 rtx c = XEXP (op0, 1);
4614 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4615 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4616
4617 /* Detect an infinite recursive condition, where we oscillate at this
4618 simplification case between:
4619 A + B == C <---> C - B == A,
4620 where A, B, and C are all constants with non-simplifiable expressions,
4621 usually SYMBOL_REFs. */
4622 if (GET_CODE (tem) == invcode
4623 && CONSTANT_P (x)
4624 && rtx_equal_p (c, XEXP (tem, 1)))
4625 return NULL_RTX;
4626
4627 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4628 }
4629
4630 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4631 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4632 if (code == NE
4633 && op1 == const0_rtx
4634 && GET_MODE_CLASS (mode) == MODE_INT
4635 && cmp_mode != VOIDmode
4636 /* ??? Work-around BImode bugs in the ia64 backend. */
4637 && mode != BImode
4638 && cmp_mode != BImode
4639 && nonzero_bits (op0, cmp_mode) == 1
4640 && STORE_FLAG_VALUE == 1)
4641 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4642 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4643 : lowpart_subreg (mode, op0, cmp_mode);
4644
4645 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4646 if ((code == EQ || code == NE)
4647 && op1 == const0_rtx
4648 && op0code == XOR)
4649 return simplify_gen_relational (code, mode, cmp_mode,
4650 XEXP (op0, 0), XEXP (op0, 1));
4651
4652 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4653 if ((code == EQ || code == NE)
4654 && op0code == XOR
4655 && rtx_equal_p (XEXP (op0, 0), op1)
4656 && !side_effects_p (XEXP (op0, 0)))
4657 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
4658 CONST0_RTX (mode));
4659
4660 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4661 if ((code == EQ || code == NE)
4662 && op0code == XOR
4663 && rtx_equal_p (XEXP (op0, 1), op1)
4664 && !side_effects_p (XEXP (op0, 1)))
4665 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4666 CONST0_RTX (mode));
4667
4668 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4669 if ((code == EQ || code == NE)
4670 && op0code == XOR
4671 && CONST_SCALAR_INT_P (op1)
4672 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4673 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4674 simplify_gen_binary (XOR, cmp_mode,
4675 XEXP (op0, 1), op1));
4676
4677 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4678 can be implemented with a BICS instruction on some targets, or
4679 constant-folded if y is a constant. */
4680 if ((code == EQ || code == NE)
4681 && op0code == AND
4682 && rtx_equal_p (XEXP (op0, 0), op1)
4683 && !side_effects_p (op1)
4684 && op1 != CONST0_RTX (cmp_mode))
4685 {
4686 rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1), cmp_mode);
4687 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
4688
4689 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4690 CONST0_RTX (cmp_mode));
4691 }
4692
4693 /* Likewise for (eq/ne (and x y) y). */
4694 if ((code == EQ || code == NE)
4695 && op0code == AND
4696 && rtx_equal_p (XEXP (op0, 1), op1)
4697 && !side_effects_p (op1)
4698 && op1 != CONST0_RTX (cmp_mode))
4699 {
4700 rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0), cmp_mode);
4701 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
4702
4703 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4704 CONST0_RTX (cmp_mode));
4705 }
4706
4707 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4708 if ((code == EQ || code == NE)
4709 && GET_CODE (op0) == BSWAP
4710 && CONST_SCALAR_INT_P (op1))
4711 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4712 simplify_gen_unary (BSWAP, cmp_mode,
4713 op1, cmp_mode));
4714
4715 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4716 if ((code == EQ || code == NE)
4717 && GET_CODE (op0) == BSWAP
4718 && GET_CODE (op1) == BSWAP)
4719 return simplify_gen_relational (code, mode, cmp_mode,
4720 XEXP (op0, 0), XEXP (op1, 0));
4721
4722 if (op0code == POPCOUNT && op1 == const0_rtx)
4723 switch (code)
4724 {
4725 case EQ:
4726 case LE:
4727 case LEU:
4728 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4729 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4730 XEXP (op0, 0), const0_rtx);
4731
4732 case NE:
4733 case GT:
4734 case GTU:
4735 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4736 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4737 XEXP (op0, 0), const0_rtx);
4738
4739 default:
4740 break;
4741 }
4742
4743 return NULL_RTX;
4744 }
4745
4746 enum
4747 {
4748 CMP_EQ = 1,
4749 CMP_LT = 2,
4750 CMP_GT = 4,
4751 CMP_LTU = 8,
4752 CMP_GTU = 16
4753 };
4754
4755
4756 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4757 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4758 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4759 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4760 For floating-point comparisons, assume that the operands were ordered. */
4761
4762 static rtx
4763 comparison_result (enum rtx_code code, int known_results)
4764 {
4765 switch (code)
4766 {
4767 case EQ:
4768 case UNEQ:
4769 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4770 case NE:
4771 case LTGT:
4772 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4773
4774 case LT:
4775 case UNLT:
4776 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4777 case GE:
4778 case UNGE:
4779 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4780
4781 case GT:
4782 case UNGT:
4783 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4784 case LE:
4785 case UNLE:
4786 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4787
4788 case LTU:
4789 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4790 case GEU:
4791 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4792
4793 case GTU:
4794 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4795 case LEU:
4796 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4797
4798 case ORDERED:
4799 return const_true_rtx;
4800 case UNORDERED:
4801 return const0_rtx;
4802 default:
4803 gcc_unreachable ();
4804 }
4805 }
4806
4807 /* Check if the given comparison (done in the given MODE) is actually
4808 a tautology or a contradiction. If the mode is VOID_mode, the
4809 comparison is done in "infinite precision". If no simplification
4810 is possible, this function returns zero. Otherwise, it returns
4811 either const_true_rtx or const0_rtx. */
4812
4813 rtx
4814 simplify_const_relational_operation (enum rtx_code code,
4815 machine_mode mode,
4816 rtx op0, rtx op1)
4817 {
4818 rtx tem;
4819 rtx trueop0;
4820 rtx trueop1;
4821
4822 gcc_assert (mode != VOIDmode
4823 || (GET_MODE (op0) == VOIDmode
4824 && GET_MODE (op1) == VOIDmode));
4825
4826 /* If op0 is a compare, extract the comparison arguments from it. */
4827 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4828 {
4829 op1 = XEXP (op0, 1);
4830 op0 = XEXP (op0, 0);
4831
4832 if (GET_MODE (op0) != VOIDmode)
4833 mode = GET_MODE (op0);
4834 else if (GET_MODE (op1) != VOIDmode)
4835 mode = GET_MODE (op1);
4836 else
4837 return 0;
4838 }
4839
4840 /* We can't simplify MODE_CC values since we don't know what the
4841 actual comparison is. */
4842 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4843 return 0;
4844
4845 /* Make sure the constant is second. */
4846 if (swap_commutative_operands_p (op0, op1))
4847 {
4848 std::swap (op0, op1);
4849 code = swap_condition (code);
4850 }
4851
4852 trueop0 = avoid_constant_pool_reference (op0);
4853 trueop1 = avoid_constant_pool_reference (op1);
4854
4855 /* For integer comparisons of A and B maybe we can simplify A - B and can
4856 then simplify a comparison of that with zero. If A and B are both either
4857 a register or a CONST_INT, this can't help; testing for these cases will
4858 prevent infinite recursion here and speed things up.
4859
4860 We can only do this for EQ and NE comparisons as otherwise we may
4861 lose or introduce overflow which we cannot disregard as undefined as
4862 we do not know the signedness of the operation on either the left or
4863 the right hand side of the comparison. */
4864
4865 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4866 && (code == EQ || code == NE)
4867 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4868 && (REG_P (op1) || CONST_INT_P (trueop1)))
4869 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4870 /* We cannot do this if tem is a nonzero address. */
4871 && ! nonzero_address_p (tem))
4872 return simplify_const_relational_operation (signed_condition (code),
4873 mode, tem, const0_rtx);
4874
4875 if (! HONOR_NANS (mode) && code == ORDERED)
4876 return const_true_rtx;
4877
4878 if (! HONOR_NANS (mode) && code == UNORDERED)
4879 return const0_rtx;
4880
4881 /* For modes without NaNs, if the two operands are equal, we know the
4882 result except if they have side-effects. Even with NaNs we know
4883 the result of unordered comparisons and, if signaling NaNs are
4884 irrelevant, also the result of LT/GT/LTGT. */
4885 if ((! HONOR_NANS (trueop0)
4886 || code == UNEQ || code == UNLE || code == UNGE
4887 || ((code == LT || code == GT || code == LTGT)
4888 && ! HONOR_SNANS (trueop0)))
4889 && rtx_equal_p (trueop0, trueop1)
4890 && ! side_effects_p (trueop0))
4891 return comparison_result (code, CMP_EQ);
4892
4893 /* If the operands are floating-point constants, see if we can fold
4894 the result. */
4895 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
4896 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
4897 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4898 {
4899 REAL_VALUE_TYPE d0, d1;
4900
4901 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4902 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4903
4904 /* Comparisons are unordered iff at least one of the values is NaN. */
4905 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4906 switch (code)
4907 {
4908 case UNEQ:
4909 case UNLT:
4910 case UNGT:
4911 case UNLE:
4912 case UNGE:
4913 case NE:
4914 case UNORDERED:
4915 return const_true_rtx;
4916 case EQ:
4917 case LT:
4918 case GT:
4919 case LE:
4920 case GE:
4921 case LTGT:
4922 case ORDERED:
4923 return const0_rtx;
4924 default:
4925 return 0;
4926 }
4927
4928 return comparison_result (code,
4929 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4930 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4931 }
4932
4933 /* Otherwise, see if the operands are both integers. */
4934 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4935 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
4936 {
4937 /* It would be nice if we really had a mode here. However, the
4938 largest int representable on the target is as good as
4939 infinite. */
4940 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
4941 rtx_mode_t ptrueop0 = std::make_pair (trueop0, cmode);
4942 rtx_mode_t ptrueop1 = std::make_pair (trueop1, cmode);
4943
4944 if (wi::eq_p (ptrueop0, ptrueop1))
4945 return comparison_result (code, CMP_EQ);
4946 else
4947 {
4948 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
4949 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
4950 return comparison_result (code, cr);
4951 }
4952 }
4953
4954 /* Optimize comparisons with upper and lower bounds. */
4955 if (HWI_COMPUTABLE_MODE_P (mode)
4956 && CONST_INT_P (trueop1)
4957 && !side_effects_p (trueop0))
4958 {
4959 int sign;
4960 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4961 HOST_WIDE_INT val = INTVAL (trueop1);
4962 HOST_WIDE_INT mmin, mmax;
4963
4964 if (code == GEU
4965 || code == LEU
4966 || code == GTU
4967 || code == LTU)
4968 sign = 0;
4969 else
4970 sign = 1;
4971
4972 /* Get a reduced range if the sign bit is zero. */
4973 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4974 {
4975 mmin = 0;
4976 mmax = nonzero;
4977 }
4978 else
4979 {
4980 rtx mmin_rtx, mmax_rtx;
4981 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4982
4983 mmin = INTVAL (mmin_rtx);
4984 mmax = INTVAL (mmax_rtx);
4985 if (sign)
4986 {
4987 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4988
4989 mmin >>= (sign_copies - 1);
4990 mmax >>= (sign_copies - 1);
4991 }
4992 }
4993
4994 switch (code)
4995 {
4996 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4997 case GEU:
4998 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4999 return const_true_rtx;
5000 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5001 return const0_rtx;
5002 break;
5003 case GE:
5004 if (val <= mmin)
5005 return const_true_rtx;
5006 if (val > mmax)
5007 return const0_rtx;
5008 break;
5009
5010 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5011 case LEU:
5012 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5013 return const_true_rtx;
5014 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5015 return const0_rtx;
5016 break;
5017 case LE:
5018 if (val >= mmax)
5019 return const_true_rtx;
5020 if (val < mmin)
5021 return const0_rtx;
5022 break;
5023
5024 case EQ:
5025 /* x == y is always false for y out of range. */
5026 if (val < mmin || val > mmax)
5027 return const0_rtx;
5028 break;
5029
5030 /* x > y is always false for y >= mmax, always true for y < mmin. */
5031 case GTU:
5032 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5033 return const0_rtx;
5034 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5035 return const_true_rtx;
5036 break;
5037 case GT:
5038 if (val >= mmax)
5039 return const0_rtx;
5040 if (val < mmin)
5041 return const_true_rtx;
5042 break;
5043
5044 /* x < y is always false for y <= mmin, always true for y > mmax. */
5045 case LTU:
5046 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5047 return const0_rtx;
5048 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5049 return const_true_rtx;
5050 break;
5051 case LT:
5052 if (val <= mmin)
5053 return const0_rtx;
5054 if (val > mmax)
5055 return const_true_rtx;
5056 break;
5057
5058 case NE:
5059 /* x != y is always true for y out of range. */
5060 if (val < mmin || val > mmax)
5061 return const_true_rtx;
5062 break;
5063
5064 default:
5065 break;
5066 }
5067 }
5068
5069 /* Optimize integer comparisons with zero. */
5070 if (trueop1 == const0_rtx && !side_effects_p (trueop0))
5071 {
5072 /* Some addresses are known to be nonzero. We don't know
5073 their sign, but equality comparisons are known. */
5074 if (nonzero_address_p (trueop0))
5075 {
5076 if (code == EQ || code == LEU)
5077 return const0_rtx;
5078 if (code == NE || code == GTU)
5079 return const_true_rtx;
5080 }
5081
5082 /* See if the first operand is an IOR with a constant. If so, we
5083 may be able to determine the result of this comparison. */
5084 if (GET_CODE (op0) == IOR)
5085 {
5086 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5087 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5088 {
5089 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
5090 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5091 && (UINTVAL (inner_const)
5092 & ((unsigned HOST_WIDE_INT) 1
5093 << sign_bitnum)));
5094
5095 switch (code)
5096 {
5097 case EQ:
5098 case LEU:
5099 return const0_rtx;
5100 case NE:
5101 case GTU:
5102 return const_true_rtx;
5103 case LT:
5104 case LE:
5105 if (has_sign)
5106 return const_true_rtx;
5107 break;
5108 case GT:
5109 case GE:
5110 if (has_sign)
5111 return const0_rtx;
5112 break;
5113 default:
5114 break;
5115 }
5116 }
5117 }
5118 }
5119
5120 /* Optimize comparison of ABS with zero. */
5121 if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
5122 && (GET_CODE (trueop0) == ABS
5123 || (GET_CODE (trueop0) == FLOAT_EXTEND
5124 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5125 {
5126 switch (code)
5127 {
5128 case LT:
5129 /* Optimize abs(x) < 0.0. */
5130 if (!HONOR_SNANS (mode)
5131 && (!INTEGRAL_MODE_P (mode)
5132 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5133 {
5134 if (INTEGRAL_MODE_P (mode)
5135 && (issue_strict_overflow_warning
5136 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5137 warning (OPT_Wstrict_overflow,
5138 ("assuming signed overflow does not occur when "
5139 "assuming abs (x) < 0 is false"));
5140 return const0_rtx;
5141 }
5142 break;
5143
5144 case GE:
5145 /* Optimize abs(x) >= 0.0. */
5146 if (!HONOR_NANS (mode)
5147 && (!INTEGRAL_MODE_P (mode)
5148 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5149 {
5150 if (INTEGRAL_MODE_P (mode)
5151 && (issue_strict_overflow_warning
5152 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5153 warning (OPT_Wstrict_overflow,
5154 ("assuming signed overflow does not occur when "
5155 "assuming abs (x) >= 0 is true"));
5156 return const_true_rtx;
5157 }
5158 break;
5159
5160 case UNGE:
5161 /* Optimize ! (abs(x) < 0.0). */
5162 return const_true_rtx;
5163
5164 default:
5165 break;
5166 }
5167 }
5168
5169 return 0;
5170 }
5171 \f
5172 /* Simplify CODE, an operation with result mode MODE and three operands,
5173 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5174 a constant. Return 0 if no simplifications is possible. */
5175
5176 rtx
5177 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5178 machine_mode op0_mode, rtx op0, rtx op1,
5179 rtx op2)
5180 {
5181 unsigned int width = GET_MODE_PRECISION (mode);
5182 bool any_change = false;
5183 rtx tem, trueop2;
5184
5185 /* VOIDmode means "infinite" precision. */
5186 if (width == 0)
5187 width = HOST_BITS_PER_WIDE_INT;
5188
5189 switch (code)
5190 {
5191 case FMA:
5192 /* Simplify negations around the multiplication. */
5193 /* -a * -b + c => a * b + c. */
5194 if (GET_CODE (op0) == NEG)
5195 {
5196 tem = simplify_unary_operation (NEG, mode, op1, mode);
5197 if (tem)
5198 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5199 }
5200 else if (GET_CODE (op1) == NEG)
5201 {
5202 tem = simplify_unary_operation (NEG, mode, op0, mode);
5203 if (tem)
5204 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5205 }
5206
5207 /* Canonicalize the two multiplication operands. */
5208 /* a * -b + c => -b * a + c. */
5209 if (swap_commutative_operands_p (op0, op1))
5210 std::swap (op0, op1), any_change = true;
5211
5212 if (any_change)
5213 return gen_rtx_FMA (mode, op0, op1, op2);
5214 return NULL_RTX;
5215
5216 case SIGN_EXTRACT:
5217 case ZERO_EXTRACT:
5218 if (CONST_INT_P (op0)
5219 && CONST_INT_P (op1)
5220 && CONST_INT_P (op2)
5221 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5222 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5223 {
5224 /* Extracting a bit-field from a constant */
5225 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5226 HOST_WIDE_INT op1val = INTVAL (op1);
5227 HOST_WIDE_INT op2val = INTVAL (op2);
5228 if (BITS_BIG_ENDIAN)
5229 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5230 else
5231 val >>= op2val;
5232
5233 if (HOST_BITS_PER_WIDE_INT != op1val)
5234 {
5235 /* First zero-extend. */
5236 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
5237 /* If desired, propagate sign bit. */
5238 if (code == SIGN_EXTRACT
5239 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
5240 != 0)
5241 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
5242 }
5243
5244 return gen_int_mode (val, mode);
5245 }
5246 break;
5247
5248 case IF_THEN_ELSE:
5249 if (CONST_INT_P (op0))
5250 return op0 != const0_rtx ? op1 : op2;
5251
5252 /* Convert c ? a : a into "a". */
5253 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5254 return op1;
5255
5256 /* Convert a != b ? a : b into "a". */
5257 if (GET_CODE (op0) == NE
5258 && ! side_effects_p (op0)
5259 && ! HONOR_NANS (mode)
5260 && ! HONOR_SIGNED_ZEROS (mode)
5261 && ((rtx_equal_p (XEXP (op0, 0), op1)
5262 && rtx_equal_p (XEXP (op0, 1), op2))
5263 || (rtx_equal_p (XEXP (op0, 0), op2)
5264 && rtx_equal_p (XEXP (op0, 1), op1))))
5265 return op1;
5266
5267 /* Convert a == b ? a : b into "b". */
5268 if (GET_CODE (op0) == EQ
5269 && ! side_effects_p (op0)
5270 && ! HONOR_NANS (mode)
5271 && ! HONOR_SIGNED_ZEROS (mode)
5272 && ((rtx_equal_p (XEXP (op0, 0), op1)
5273 && rtx_equal_p (XEXP (op0, 1), op2))
5274 || (rtx_equal_p (XEXP (op0, 0), op2)
5275 && rtx_equal_p (XEXP (op0, 1), op1))))
5276 return op2;
5277
5278 /* Convert (!c) != {0,...,0} ? a : b into
5279 c != {0,...,0} ? b : a for vector modes. */
5280 if (VECTOR_MODE_P (GET_MODE (op1))
5281 && GET_CODE (op0) == NE
5282 && GET_CODE (XEXP (op0, 0)) == NOT
5283 && GET_CODE (XEXP (op0, 1)) == CONST_VECTOR)
5284 {
5285 rtx cv = XEXP (op0, 1);
5286 int nunits = CONST_VECTOR_NUNITS (cv);
5287 bool ok = true;
5288 for (int i = 0; i < nunits; ++i)
5289 if (CONST_VECTOR_ELT (cv, i) != const0_rtx)
5290 {
5291 ok = false;
5292 break;
5293 }
5294 if (ok)
5295 {
5296 rtx new_op0 = gen_rtx_NE (GET_MODE (op0),
5297 XEXP (XEXP (op0, 0), 0),
5298 XEXP (op0, 1));
5299 rtx retval = gen_rtx_IF_THEN_ELSE (mode, new_op0, op2, op1);
5300 return retval;
5301 }
5302 }
5303
5304 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5305 {
5306 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5307 ? GET_MODE (XEXP (op0, 1))
5308 : GET_MODE (XEXP (op0, 0)));
5309 rtx temp;
5310
5311 /* Look for happy constants in op1 and op2. */
5312 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5313 {
5314 HOST_WIDE_INT t = INTVAL (op1);
5315 HOST_WIDE_INT f = INTVAL (op2);
5316
5317 if (t == STORE_FLAG_VALUE && f == 0)
5318 code = GET_CODE (op0);
5319 else if (t == 0 && f == STORE_FLAG_VALUE)
5320 {
5321 enum rtx_code tmp;
5322 tmp = reversed_comparison_code (op0, NULL_RTX);
5323 if (tmp == UNKNOWN)
5324 break;
5325 code = tmp;
5326 }
5327 else
5328 break;
5329
5330 return simplify_gen_relational (code, mode, cmp_mode,
5331 XEXP (op0, 0), XEXP (op0, 1));
5332 }
5333
5334 if (cmp_mode == VOIDmode)
5335 cmp_mode = op0_mode;
5336 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5337 cmp_mode, XEXP (op0, 0),
5338 XEXP (op0, 1));
5339
5340 /* See if any simplifications were possible. */
5341 if (temp)
5342 {
5343 if (CONST_INT_P (temp))
5344 return temp == const0_rtx ? op2 : op1;
5345 else if (temp)
5346 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5347 }
5348 }
5349 break;
5350
5351 case VEC_MERGE:
5352 gcc_assert (GET_MODE (op0) == mode);
5353 gcc_assert (GET_MODE (op1) == mode);
5354 gcc_assert (VECTOR_MODE_P (mode));
5355 trueop2 = avoid_constant_pool_reference (op2);
5356 if (CONST_INT_P (trueop2))
5357 {
5358 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
5359 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5360 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5361 unsigned HOST_WIDE_INT mask;
5362 if (n_elts == HOST_BITS_PER_WIDE_INT)
5363 mask = -1;
5364 else
5365 mask = ((unsigned HOST_WIDE_INT) 1 << n_elts) - 1;
5366
5367 if (!(sel & mask) && !side_effects_p (op0))
5368 return op1;
5369 if ((sel & mask) == mask && !side_effects_p (op1))
5370 return op0;
5371
5372 rtx trueop0 = avoid_constant_pool_reference (op0);
5373 rtx trueop1 = avoid_constant_pool_reference (op1);
5374 if (GET_CODE (trueop0) == CONST_VECTOR
5375 && GET_CODE (trueop1) == CONST_VECTOR)
5376 {
5377 rtvec v = rtvec_alloc (n_elts);
5378 unsigned int i;
5379
5380 for (i = 0; i < n_elts; i++)
5381 RTVEC_ELT (v, i) = ((sel & ((unsigned HOST_WIDE_INT) 1 << i))
5382 ? CONST_VECTOR_ELT (trueop0, i)
5383 : CONST_VECTOR_ELT (trueop1, i));
5384 return gen_rtx_CONST_VECTOR (mode, v);
5385 }
5386
5387 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5388 if no element from a appears in the result. */
5389 if (GET_CODE (op0) == VEC_MERGE)
5390 {
5391 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5392 if (CONST_INT_P (tem))
5393 {
5394 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5395 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5396 return simplify_gen_ternary (code, mode, mode,
5397 XEXP (op0, 1), op1, op2);
5398 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5399 return simplify_gen_ternary (code, mode, mode,
5400 XEXP (op0, 0), op1, op2);
5401 }
5402 }
5403 if (GET_CODE (op1) == VEC_MERGE)
5404 {
5405 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5406 if (CONST_INT_P (tem))
5407 {
5408 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5409 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5410 return simplify_gen_ternary (code, mode, mode,
5411 op0, XEXP (op1, 1), op2);
5412 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5413 return simplify_gen_ternary (code, mode, mode,
5414 op0, XEXP (op1, 0), op2);
5415 }
5416 }
5417
5418 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5419 with a. */
5420 if (GET_CODE (op0) == VEC_DUPLICATE
5421 && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
5422 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
5423 && mode_nunits[GET_MODE (XEXP (op0, 0))] == 1)
5424 {
5425 tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
5426 if (CONST_INT_P (tem) && CONST_INT_P (op2))
5427 {
5428 if (XEXP (XEXP (op0, 0), 0) == op1
5429 && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
5430 return op1;
5431 }
5432 }
5433 }
5434
5435 if (rtx_equal_p (op0, op1)
5436 && !side_effects_p (op2) && !side_effects_p (op1))
5437 return op0;
5438
5439 break;
5440
5441 default:
5442 gcc_unreachable ();
5443 }
5444
5445 return 0;
5446 }
5447
5448 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5449 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5450 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5451
5452 Works by unpacking OP into a collection of 8-bit values
5453 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5454 and then repacking them again for OUTERMODE. */
5455
5456 static rtx
5457 simplify_immed_subreg (machine_mode outermode, rtx op,
5458 machine_mode innermode, unsigned int byte)
5459 {
5460 enum {
5461 value_bit = 8,
5462 value_mask = (1 << value_bit) - 1
5463 };
5464 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
5465 int value_start;
5466 int i;
5467 int elem;
5468
5469 int num_elem;
5470 rtx * elems;
5471 int elem_bitsize;
5472 rtx result_s;
5473 rtvec result_v = NULL;
5474 enum mode_class outer_class;
5475 machine_mode outer_submode;
5476 int max_bitsize;
5477
5478 /* Some ports misuse CCmode. */
5479 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5480 return op;
5481
5482 /* We have no way to represent a complex constant at the rtl level. */
5483 if (COMPLEX_MODE_P (outermode))
5484 return NULL_RTX;
5485
5486 /* We support any size mode. */
5487 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5488 GET_MODE_BITSIZE (innermode));
5489
5490 /* Unpack the value. */
5491
5492 if (GET_CODE (op) == CONST_VECTOR)
5493 {
5494 num_elem = CONST_VECTOR_NUNITS (op);
5495 elems = &CONST_VECTOR_ELT (op, 0);
5496 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5497 }
5498 else
5499 {
5500 num_elem = 1;
5501 elems = &op;
5502 elem_bitsize = max_bitsize;
5503 }
5504 /* If this asserts, it is too complicated; reducing value_bit may help. */
5505 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5506 /* I don't know how to handle endianness of sub-units. */
5507 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5508
5509 for (elem = 0; elem < num_elem; elem++)
5510 {
5511 unsigned char * vp;
5512 rtx el = elems[elem];
5513
5514 /* Vectors are kept in target memory order. (This is probably
5515 a mistake.) */
5516 {
5517 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5518 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5519 / BITS_PER_UNIT);
5520 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5521 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5522 unsigned bytele = (subword_byte % UNITS_PER_WORD
5523 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5524 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5525 }
5526
5527 switch (GET_CODE (el))
5528 {
5529 case CONST_INT:
5530 for (i = 0;
5531 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5532 i += value_bit)
5533 *vp++ = INTVAL (el) >> i;
5534 /* CONST_INTs are always logically sign-extended. */
5535 for (; i < elem_bitsize; i += value_bit)
5536 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5537 break;
5538
5539 case CONST_WIDE_INT:
5540 {
5541 rtx_mode_t val = std::make_pair (el, innermode);
5542 unsigned char extend = wi::sign_mask (val);
5543
5544 for (i = 0; i < elem_bitsize; i += value_bit)
5545 *vp++ = wi::extract_uhwi (val, i, value_bit);
5546 for (; i < elem_bitsize; i += value_bit)
5547 *vp++ = extend;
5548 }
5549 break;
5550
5551 case CONST_DOUBLE:
5552 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
5553 {
5554 unsigned char extend = 0;
5555 /* If this triggers, someone should have generated a
5556 CONST_INT instead. */
5557 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5558
5559 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5560 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5561 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5562 {
5563 *vp++
5564 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5565 i += value_bit;
5566 }
5567
5568 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5569 extend = -1;
5570 for (; i < elem_bitsize; i += value_bit)
5571 *vp++ = extend;
5572 }
5573 else
5574 {
5575 /* This is big enough for anything on the platform. */
5576 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5577 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5578
5579 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5580 gcc_assert (bitsize <= elem_bitsize);
5581 gcc_assert (bitsize % value_bit == 0);
5582
5583 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5584 GET_MODE (el));
5585
5586 /* real_to_target produces its result in words affected by
5587 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5588 and use WORDS_BIG_ENDIAN instead; see the documentation
5589 of SUBREG in rtl.texi. */
5590 for (i = 0; i < bitsize; i += value_bit)
5591 {
5592 int ibase;
5593 if (WORDS_BIG_ENDIAN)
5594 ibase = bitsize - 1 - i;
5595 else
5596 ibase = i;
5597 *vp++ = tmp[ibase / 32] >> i % 32;
5598 }
5599
5600 /* It shouldn't matter what's done here, so fill it with
5601 zero. */
5602 for (; i < elem_bitsize; i += value_bit)
5603 *vp++ = 0;
5604 }
5605 break;
5606
5607 case CONST_FIXED:
5608 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5609 {
5610 for (i = 0; i < elem_bitsize; i += value_bit)
5611 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5612 }
5613 else
5614 {
5615 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5616 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5617 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5618 i += value_bit)
5619 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5620 >> (i - HOST_BITS_PER_WIDE_INT);
5621 for (; i < elem_bitsize; i += value_bit)
5622 *vp++ = 0;
5623 }
5624 break;
5625
5626 default:
5627 gcc_unreachable ();
5628 }
5629 }
5630
5631 /* Now, pick the right byte to start with. */
5632 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5633 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5634 will already have offset 0. */
5635 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5636 {
5637 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5638 - byte);
5639 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5640 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5641 byte = (subword_byte % UNITS_PER_WORD
5642 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5643 }
5644
5645 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5646 so if it's become negative it will instead be very large.) */
5647 gcc_assert (byte < GET_MODE_SIZE (innermode));
5648
5649 /* Convert from bytes to chunks of size value_bit. */
5650 value_start = byte * (BITS_PER_UNIT / value_bit);
5651
5652 /* Re-pack the value. */
5653
5654 if (VECTOR_MODE_P (outermode))
5655 {
5656 num_elem = GET_MODE_NUNITS (outermode);
5657 result_v = rtvec_alloc (num_elem);
5658 elems = &RTVEC_ELT (result_v, 0);
5659 outer_submode = GET_MODE_INNER (outermode);
5660 }
5661 else
5662 {
5663 num_elem = 1;
5664 elems = &result_s;
5665 outer_submode = outermode;
5666 }
5667
5668 outer_class = GET_MODE_CLASS (outer_submode);
5669 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5670
5671 gcc_assert (elem_bitsize % value_bit == 0);
5672 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5673
5674 for (elem = 0; elem < num_elem; elem++)
5675 {
5676 unsigned char *vp;
5677
5678 /* Vectors are stored in target memory order. (This is probably
5679 a mistake.) */
5680 {
5681 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5682 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5683 / BITS_PER_UNIT);
5684 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5685 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5686 unsigned bytele = (subword_byte % UNITS_PER_WORD
5687 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5688 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5689 }
5690
5691 switch (outer_class)
5692 {
5693 case MODE_INT:
5694 case MODE_PARTIAL_INT:
5695 {
5696 int u;
5697 int base = 0;
5698 int units
5699 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
5700 / HOST_BITS_PER_WIDE_INT;
5701 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
5702 wide_int r;
5703
5704 if (GET_MODE_PRECISION (outer_submode) > MAX_BITSIZE_MODE_ANY_INT)
5705 return NULL_RTX;
5706 for (u = 0; u < units; u++)
5707 {
5708 unsigned HOST_WIDE_INT buf = 0;
5709 for (i = 0;
5710 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
5711 i += value_bit)
5712 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5713
5714 tmp[u] = buf;
5715 base += HOST_BITS_PER_WIDE_INT;
5716 }
5717 r = wide_int::from_array (tmp, units,
5718 GET_MODE_PRECISION (outer_submode));
5719 #if TARGET_SUPPORTS_WIDE_INT == 0
5720 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
5721 if (wi::min_precision (r, SIGNED) > HOST_BITS_PER_DOUBLE_INT)
5722 return NULL_RTX;
5723 #endif
5724 elems[elem] = immed_wide_int_const (r, outer_submode);
5725 }
5726 break;
5727
5728 case MODE_FLOAT:
5729 case MODE_DECIMAL_FLOAT:
5730 {
5731 REAL_VALUE_TYPE r;
5732 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5733
5734 /* real_from_target wants its input in words affected by
5735 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5736 and use WORDS_BIG_ENDIAN instead; see the documentation
5737 of SUBREG in rtl.texi. */
5738 for (i = 0; i < max_bitsize / 32; i++)
5739 tmp[i] = 0;
5740 for (i = 0; i < elem_bitsize; i += value_bit)
5741 {
5742 int ibase;
5743 if (WORDS_BIG_ENDIAN)
5744 ibase = elem_bitsize - 1 - i;
5745 else
5746 ibase = i;
5747 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5748 }
5749
5750 real_from_target (&r, tmp, outer_submode);
5751 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5752 }
5753 break;
5754
5755 case MODE_FRACT:
5756 case MODE_UFRACT:
5757 case MODE_ACCUM:
5758 case MODE_UACCUM:
5759 {
5760 FIXED_VALUE_TYPE f;
5761 f.data.low = 0;
5762 f.data.high = 0;
5763 f.mode = outer_submode;
5764
5765 for (i = 0;
5766 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5767 i += value_bit)
5768 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5769 for (; i < elem_bitsize; i += value_bit)
5770 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5771 << (i - HOST_BITS_PER_WIDE_INT));
5772
5773 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5774 }
5775 break;
5776
5777 default:
5778 gcc_unreachable ();
5779 }
5780 }
5781 if (VECTOR_MODE_P (outermode))
5782 return gen_rtx_CONST_VECTOR (outermode, result_v);
5783 else
5784 return result_s;
5785 }
5786
5787 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5788 Return 0 if no simplifications are possible. */
5789 rtx
5790 simplify_subreg (machine_mode outermode, rtx op,
5791 machine_mode innermode, unsigned int byte)
5792 {
5793 /* Little bit of sanity checking. */
5794 gcc_assert (innermode != VOIDmode);
5795 gcc_assert (outermode != VOIDmode);
5796 gcc_assert (innermode != BLKmode);
5797 gcc_assert (outermode != BLKmode);
5798
5799 gcc_assert (GET_MODE (op) == innermode
5800 || GET_MODE (op) == VOIDmode);
5801
5802 if ((byte % GET_MODE_SIZE (outermode)) != 0)
5803 return NULL_RTX;
5804
5805 if (byte >= GET_MODE_SIZE (innermode))
5806 return NULL_RTX;
5807
5808 if (outermode == innermode && !byte)
5809 return op;
5810
5811 if (CONST_SCALAR_INT_P (op)
5812 || CONST_DOUBLE_AS_FLOAT_P (op)
5813 || GET_CODE (op) == CONST_FIXED
5814 || GET_CODE (op) == CONST_VECTOR)
5815 return simplify_immed_subreg (outermode, op, innermode, byte);
5816
5817 /* Changing mode twice with SUBREG => just change it once,
5818 or not at all if changing back op starting mode. */
5819 if (GET_CODE (op) == SUBREG)
5820 {
5821 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5822 int final_offset = byte + SUBREG_BYTE (op);
5823 rtx newx;
5824
5825 if (outermode == innermostmode
5826 && byte == 0 && SUBREG_BYTE (op) == 0)
5827 return SUBREG_REG (op);
5828
5829 /* The SUBREG_BYTE represents offset, as if the value were stored
5830 in memory. Irritating exception is paradoxical subreg, where
5831 we define SUBREG_BYTE to be 0. On big endian machines, this
5832 value should be negative. For a moment, undo this exception. */
5833 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5834 {
5835 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5836 if (WORDS_BIG_ENDIAN)
5837 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5838 if (BYTES_BIG_ENDIAN)
5839 final_offset += difference % UNITS_PER_WORD;
5840 }
5841 if (SUBREG_BYTE (op) == 0
5842 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5843 {
5844 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5845 if (WORDS_BIG_ENDIAN)
5846 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5847 if (BYTES_BIG_ENDIAN)
5848 final_offset += difference % UNITS_PER_WORD;
5849 }
5850
5851 /* See whether resulting subreg will be paradoxical. */
5852 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5853 {
5854 /* In nonparadoxical subregs we can't handle negative offsets. */
5855 if (final_offset < 0)
5856 return NULL_RTX;
5857 /* Bail out in case resulting subreg would be incorrect. */
5858 if (final_offset % GET_MODE_SIZE (outermode)
5859 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5860 return NULL_RTX;
5861 }
5862 else
5863 {
5864 int offset = 0;
5865 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5866
5867 /* In paradoxical subreg, see if we are still looking on lower part.
5868 If so, our SUBREG_BYTE will be 0. */
5869 if (WORDS_BIG_ENDIAN)
5870 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5871 if (BYTES_BIG_ENDIAN)
5872 offset += difference % UNITS_PER_WORD;
5873 if (offset == final_offset)
5874 final_offset = 0;
5875 else
5876 return NULL_RTX;
5877 }
5878
5879 /* Recurse for further possible simplifications. */
5880 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5881 final_offset);
5882 if (newx)
5883 return newx;
5884 if (validate_subreg (outermode, innermostmode,
5885 SUBREG_REG (op), final_offset))
5886 {
5887 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5888 if (SUBREG_PROMOTED_VAR_P (op)
5889 && SUBREG_PROMOTED_SIGN (op) >= 0
5890 && GET_MODE_CLASS (outermode) == MODE_INT
5891 && IN_RANGE (GET_MODE_SIZE (outermode),
5892 GET_MODE_SIZE (innermode),
5893 GET_MODE_SIZE (innermostmode))
5894 && subreg_lowpart_p (newx))
5895 {
5896 SUBREG_PROMOTED_VAR_P (newx) = 1;
5897 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
5898 }
5899 return newx;
5900 }
5901 return NULL_RTX;
5902 }
5903
5904 /* SUBREG of a hard register => just change the register number
5905 and/or mode. If the hard register is not valid in that mode,
5906 suppress this simplification. If the hard register is the stack,
5907 frame, or argument pointer, leave this as a SUBREG. */
5908
5909 if (REG_P (op) && HARD_REGISTER_P (op))
5910 {
5911 unsigned int regno, final_regno;
5912
5913 regno = REGNO (op);
5914 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5915 if (HARD_REGISTER_NUM_P (final_regno))
5916 {
5917 rtx x;
5918 int final_offset = byte;
5919
5920 /* Adjust offset for paradoxical subregs. */
5921 if (byte == 0
5922 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5923 {
5924 int difference = (GET_MODE_SIZE (innermode)
5925 - GET_MODE_SIZE (outermode));
5926 if (WORDS_BIG_ENDIAN)
5927 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5928 if (BYTES_BIG_ENDIAN)
5929 final_offset += difference % UNITS_PER_WORD;
5930 }
5931
5932 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5933
5934 /* Propagate original regno. We don't have any way to specify
5935 the offset inside original regno, so do so only for lowpart.
5936 The information is used only by alias analysis that can not
5937 grog partial register anyway. */
5938
5939 if (subreg_lowpart_offset (outermode, innermode) == byte)
5940 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5941 return x;
5942 }
5943 }
5944
5945 /* If we have a SUBREG of a register that we are replacing and we are
5946 replacing it with a MEM, make a new MEM and try replacing the
5947 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5948 or if we would be widening it. */
5949
5950 if (MEM_P (op)
5951 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
5952 /* Allow splitting of volatile memory references in case we don't
5953 have instruction to move the whole thing. */
5954 && (! MEM_VOLATILE_P (op)
5955 || ! have_insn_for (SET, innermode))
5956 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5957 return adjust_address_nv (op, outermode, byte);
5958
5959 /* Handle complex values represented as CONCAT
5960 of real and imaginary part. */
5961 if (GET_CODE (op) == CONCAT)
5962 {
5963 unsigned int part_size, final_offset;
5964 rtx part, res;
5965
5966 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5967 if (byte < part_size)
5968 {
5969 part = XEXP (op, 0);
5970 final_offset = byte;
5971 }
5972 else
5973 {
5974 part = XEXP (op, 1);
5975 final_offset = byte - part_size;
5976 }
5977
5978 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5979 return NULL_RTX;
5980
5981 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5982 if (res)
5983 return res;
5984 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5985 return gen_rtx_SUBREG (outermode, part, final_offset);
5986 return NULL_RTX;
5987 }
5988
5989 /* A SUBREG resulting from a zero extension may fold to zero if
5990 it extracts higher bits that the ZERO_EXTEND's source bits. */
5991 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
5992 {
5993 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5994 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
5995 return CONST0_RTX (outermode);
5996 }
5997
5998 if (SCALAR_INT_MODE_P (outermode)
5999 && SCALAR_INT_MODE_P (innermode)
6000 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
6001 && byte == subreg_lowpart_offset (outermode, innermode))
6002 {
6003 rtx tem = simplify_truncation (outermode, op, innermode);
6004 if (tem)
6005 return tem;
6006 }
6007
6008 return NULL_RTX;
6009 }
6010
6011 /* Make a SUBREG operation or equivalent if it folds. */
6012
6013 rtx
6014 simplify_gen_subreg (machine_mode outermode, rtx op,
6015 machine_mode innermode, unsigned int byte)
6016 {
6017 rtx newx;
6018
6019 newx = simplify_subreg (outermode, op, innermode, byte);
6020 if (newx)
6021 return newx;
6022
6023 if (GET_CODE (op) == SUBREG
6024 || GET_CODE (op) == CONCAT
6025 || GET_MODE (op) == VOIDmode)
6026 return NULL_RTX;
6027
6028 if (validate_subreg (outermode, innermode, op, byte))
6029 return gen_rtx_SUBREG (outermode, op, byte);
6030
6031 return NULL_RTX;
6032 }
6033
6034 /* Simplify X, an rtx expression.
6035
6036 Return the simplified expression or NULL if no simplifications
6037 were possible.
6038
6039 This is the preferred entry point into the simplification routines;
6040 however, we still allow passes to call the more specific routines.
6041
6042 Right now GCC has three (yes, three) major bodies of RTL simplification
6043 code that need to be unified.
6044
6045 1. fold_rtx in cse.c. This code uses various CSE specific
6046 information to aid in RTL simplification.
6047
6048 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6049 it uses combine specific information to aid in RTL
6050 simplification.
6051
6052 3. The routines in this file.
6053
6054
6055 Long term we want to only have one body of simplification code; to
6056 get to that state I recommend the following steps:
6057
6058 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6059 which are not pass dependent state into these routines.
6060
6061 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6062 use this routine whenever possible.
6063
6064 3. Allow for pass dependent state to be provided to these
6065 routines and add simplifications based on the pass dependent
6066 state. Remove code from cse.c & combine.c that becomes
6067 redundant/dead.
6068
6069 It will take time, but ultimately the compiler will be easier to
6070 maintain and improve. It's totally silly that when we add a
6071 simplification that it needs to be added to 4 places (3 for RTL
6072 simplification and 1 for tree simplification. */
6073
6074 rtx
6075 simplify_rtx (const_rtx x)
6076 {
6077 const enum rtx_code code = GET_CODE (x);
6078 const machine_mode mode = GET_MODE (x);
6079
6080 switch (GET_RTX_CLASS (code))
6081 {
6082 case RTX_UNARY:
6083 return simplify_unary_operation (code, mode,
6084 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6085 case RTX_COMM_ARITH:
6086 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6087 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6088
6089 /* Fall through.... */
6090
6091 case RTX_BIN_ARITH:
6092 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6093
6094 case RTX_TERNARY:
6095 case RTX_BITFIELD_OPS:
6096 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6097 XEXP (x, 0), XEXP (x, 1),
6098 XEXP (x, 2));
6099
6100 case RTX_COMPARE:
6101 case RTX_COMM_COMPARE:
6102 return simplify_relational_operation (code, mode,
6103 ((GET_MODE (XEXP (x, 0))
6104 != VOIDmode)
6105 ? GET_MODE (XEXP (x, 0))
6106 : GET_MODE (XEXP (x, 1))),
6107 XEXP (x, 0),
6108 XEXP (x, 1));
6109
6110 case RTX_EXTRA:
6111 if (code == SUBREG)
6112 return simplify_subreg (mode, SUBREG_REG (x),
6113 GET_MODE (SUBREG_REG (x)),
6114 SUBREG_BYTE (x));
6115 break;
6116
6117 case RTX_OBJ:
6118 if (code == LO_SUM)
6119 {
6120 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6121 if (GET_CODE (XEXP (x, 0)) == HIGH
6122 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6123 return XEXP (x, 1);
6124 }
6125 break;
6126
6127 default:
6128 break;
6129 }
6130 return NULL;
6131 }