poly_int: GET_MODE_SIZE
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2018 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "memmodel.h"
30 #include "optabs.h"
31 #include "emit-rtl.h"
32 #include "recog.h"
33 #include "diagnostic-core.h"
34 #include "varasm.h"
35 #include "flags.h"
36 #include "selftest.h"
37 #include "selftest-rtl.h"
38
39 /* Simplification and canonicalization of RTL. */
40
41 /* Much code operates on (low, high) pairs; the low value is an
42 unsigned wide int, the high value a signed wide int. We
43 occasionally need to sign extend from low to high as if low were a
44 signed wide int. */
45 #define HWI_SIGN_EXTEND(low) \
46 ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
47
48 static rtx neg_const_int (machine_mode, const_rtx);
49 static bool plus_minus_operand_p (const_rtx);
50 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
51 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
52 rtx, rtx);
53 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
54 machine_mode, rtx, rtx);
55 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
56 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
57 rtx, rtx, rtx, rtx);
58 \f
59 /* Negate a CONST_INT rtx. */
60 static rtx
61 neg_const_int (machine_mode mode, const_rtx i)
62 {
63 unsigned HOST_WIDE_INT val = -UINTVAL (i);
64
65 if (!HWI_COMPUTABLE_MODE_P (mode)
66 && val == UINTVAL (i))
67 return simplify_const_unary_operation (NEG, mode, CONST_CAST_RTX (i),
68 mode);
69 return gen_int_mode (val, mode);
70 }
71
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
74
75 bool
76 mode_signbit_p (machine_mode mode, const_rtx x)
77 {
78 unsigned HOST_WIDE_INT val;
79 unsigned int width;
80 scalar_int_mode int_mode;
81
82 if (!is_int_mode (mode, &int_mode))
83 return false;
84
85 width = GET_MODE_PRECISION (int_mode);
86 if (width == 0)
87 return false;
88
89 if (width <= HOST_BITS_PER_WIDE_INT
90 && CONST_INT_P (x))
91 val = INTVAL (x);
92 #if TARGET_SUPPORTS_WIDE_INT
93 else if (CONST_WIDE_INT_P (x))
94 {
95 unsigned int i;
96 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
97 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
98 return false;
99 for (i = 0; i < elts - 1; i++)
100 if (CONST_WIDE_INT_ELT (x, i) != 0)
101 return false;
102 val = CONST_WIDE_INT_ELT (x, elts - 1);
103 width %= HOST_BITS_PER_WIDE_INT;
104 if (width == 0)
105 width = HOST_BITS_PER_WIDE_INT;
106 }
107 #else
108 else if (width <= HOST_BITS_PER_DOUBLE_INT
109 && CONST_DOUBLE_AS_INT_P (x)
110 && CONST_DOUBLE_LOW (x) == 0)
111 {
112 val = CONST_DOUBLE_HIGH (x);
113 width -= HOST_BITS_PER_WIDE_INT;
114 }
115 #endif
116 else
117 /* X is not an integer constant. */
118 return false;
119
120 if (width < HOST_BITS_PER_WIDE_INT)
121 val &= (HOST_WIDE_INT_1U << width) - 1;
122 return val == (HOST_WIDE_INT_1U << (width - 1));
123 }
124
125 /* Test whether VAL is equal to the most significant bit of mode MODE
126 (after masking with the mode mask of MODE). Returns false if the
127 precision of MODE is too large to handle. */
128
129 bool
130 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
131 {
132 unsigned int width;
133 scalar_int_mode int_mode;
134
135 if (!is_int_mode (mode, &int_mode))
136 return false;
137
138 width = GET_MODE_PRECISION (int_mode);
139 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
140 return false;
141
142 val &= GET_MODE_MASK (int_mode);
143 return val == (HOST_WIDE_INT_1U << (width - 1));
144 }
145
146 /* Test whether the most significant bit of mode MODE is set in VAL.
147 Returns false if the precision of MODE is too large to handle. */
148 bool
149 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
150 {
151 unsigned int width;
152
153 scalar_int_mode int_mode;
154 if (!is_int_mode (mode, &int_mode))
155 return false;
156
157 width = GET_MODE_PRECISION (int_mode);
158 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
159 return false;
160
161 val &= HOST_WIDE_INT_1U << (width - 1);
162 return val != 0;
163 }
164
165 /* Test whether the most significant bit of mode MODE is clear in VAL.
166 Returns false if the precision of MODE is too large to handle. */
167 bool
168 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
169 {
170 unsigned int width;
171
172 scalar_int_mode int_mode;
173 if (!is_int_mode (mode, &int_mode))
174 return false;
175
176 width = GET_MODE_PRECISION (int_mode);
177 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
178 return false;
179
180 val &= HOST_WIDE_INT_1U << (width - 1);
181 return val == 0;
182 }
183 \f
184 /* Make a binary operation by properly ordering the operands and
185 seeing if the expression folds. */
186
187 rtx
188 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
189 rtx op1)
190 {
191 rtx tem;
192
193 /* If this simplifies, do it. */
194 tem = simplify_binary_operation (code, mode, op0, op1);
195 if (tem)
196 return tem;
197
198 /* Put complex operands first and constants second if commutative. */
199 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
200 && swap_commutative_operands_p (op0, op1))
201 std::swap (op0, op1);
202
203 return gen_rtx_fmt_ee (code, mode, op0, op1);
204 }
205 \f
206 /* If X is a MEM referencing the constant pool, return the real value.
207 Otherwise return X. */
208 rtx
209 avoid_constant_pool_reference (rtx x)
210 {
211 rtx c, tmp, addr;
212 machine_mode cmode;
213 HOST_WIDE_INT offset = 0;
214
215 switch (GET_CODE (x))
216 {
217 case MEM:
218 break;
219
220 case FLOAT_EXTEND:
221 /* Handle float extensions of constant pool references. */
222 tmp = XEXP (x, 0);
223 c = avoid_constant_pool_reference (tmp);
224 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
225 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c),
226 GET_MODE (x));
227 return x;
228
229 default:
230 return x;
231 }
232
233 if (GET_MODE (x) == BLKmode)
234 return x;
235
236 addr = XEXP (x, 0);
237
238 /* Call target hook to avoid the effects of -fpic etc.... */
239 addr = targetm.delegitimize_address (addr);
240
241 /* Split the address into a base and integer offset. */
242 if (GET_CODE (addr) == CONST
243 && GET_CODE (XEXP (addr, 0)) == PLUS
244 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
245 {
246 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
247 addr = XEXP (XEXP (addr, 0), 0);
248 }
249
250 if (GET_CODE (addr) == LO_SUM)
251 addr = XEXP (addr, 1);
252
253 /* If this is a constant pool reference, we can turn it into its
254 constant and hope that simplifications happen. */
255 if (GET_CODE (addr) == SYMBOL_REF
256 && CONSTANT_POOL_ADDRESS_P (addr))
257 {
258 c = get_pool_constant (addr);
259 cmode = get_pool_mode (addr);
260
261 /* If we're accessing the constant in a different mode than it was
262 originally stored, attempt to fix that up via subreg simplifications.
263 If that fails we have no choice but to return the original memory. */
264 if (offset == 0 && cmode == GET_MODE (x))
265 return c;
266 else if (known_in_range_p (offset, 0, GET_MODE_SIZE (cmode)))
267 {
268 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
269 if (tem && CONSTANT_P (tem))
270 return tem;
271 }
272 }
273
274 return x;
275 }
276 \f
277 /* Simplify a MEM based on its attributes. This is the default
278 delegitimize_address target hook, and it's recommended that every
279 overrider call it. */
280
281 rtx
282 delegitimize_mem_from_attrs (rtx x)
283 {
284 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
285 use their base addresses as equivalent. */
286 if (MEM_P (x)
287 && MEM_EXPR (x)
288 && MEM_OFFSET_KNOWN_P (x))
289 {
290 tree decl = MEM_EXPR (x);
291 machine_mode mode = GET_MODE (x);
292 poly_int64 offset = 0;
293
294 switch (TREE_CODE (decl))
295 {
296 default:
297 decl = NULL;
298 break;
299
300 case VAR_DECL:
301 break;
302
303 case ARRAY_REF:
304 case ARRAY_RANGE_REF:
305 case COMPONENT_REF:
306 case BIT_FIELD_REF:
307 case REALPART_EXPR:
308 case IMAGPART_EXPR:
309 case VIEW_CONVERT_EXPR:
310 {
311 poly_int64 bitsize, bitpos, bytepos, toffset_val = 0;
312 tree toffset;
313 int unsignedp, reversep, volatilep = 0;
314
315 decl
316 = get_inner_reference (decl, &bitsize, &bitpos, &toffset, &mode,
317 &unsignedp, &reversep, &volatilep);
318 if (maybe_ne (bitsize, GET_MODE_BITSIZE (mode))
319 || !multiple_p (bitpos, BITS_PER_UNIT, &bytepos)
320 || (toffset && !poly_int_tree_p (toffset, &toffset_val)))
321 decl = NULL;
322 else
323 offset += bytepos + toffset_val;
324 break;
325 }
326 }
327
328 if (decl
329 && mode == GET_MODE (x)
330 && VAR_P (decl)
331 && (TREE_STATIC (decl)
332 || DECL_THREAD_LOCAL_P (decl))
333 && DECL_RTL_SET_P (decl)
334 && MEM_P (DECL_RTL (decl)))
335 {
336 rtx newx;
337
338 offset += MEM_OFFSET (x);
339
340 newx = DECL_RTL (decl);
341
342 if (MEM_P (newx))
343 {
344 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
345 poly_int64 n_offset, o_offset;
346
347 /* Avoid creating a new MEM needlessly if we already had
348 the same address. We do if there's no OFFSET and the
349 old address X is identical to NEWX, or if X is of the
350 form (plus NEWX OFFSET), or the NEWX is of the form
351 (plus Y (const_int Z)) and X is that with the offset
352 added: (plus Y (const_int Z+OFFSET)). */
353 n = strip_offset (n, &n_offset);
354 o = strip_offset (o, &o_offset);
355 if (!(known_eq (o_offset, n_offset + offset)
356 && rtx_equal_p (o, n)))
357 x = adjust_address_nv (newx, mode, offset);
358 }
359 else if (GET_MODE (x) == GET_MODE (newx)
360 && known_eq (offset, 0))
361 x = newx;
362 }
363 }
364
365 return x;
366 }
367 \f
368 /* Make a unary operation by first seeing if it folds and otherwise making
369 the specified operation. */
370
371 rtx
372 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
373 machine_mode op_mode)
374 {
375 rtx tem;
376
377 /* If this simplifies, use it. */
378 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
379 return tem;
380
381 return gen_rtx_fmt_e (code, mode, op);
382 }
383
384 /* Likewise for ternary operations. */
385
386 rtx
387 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
388 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
389 {
390 rtx tem;
391
392 /* If this simplifies, use it. */
393 if ((tem = simplify_ternary_operation (code, mode, op0_mode,
394 op0, op1, op2)) != 0)
395 return tem;
396
397 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
398 }
399
400 /* Likewise, for relational operations.
401 CMP_MODE specifies mode comparison is done in. */
402
403 rtx
404 simplify_gen_relational (enum rtx_code code, machine_mode mode,
405 machine_mode cmp_mode, rtx op0, rtx op1)
406 {
407 rtx tem;
408
409 if ((tem = simplify_relational_operation (code, mode, cmp_mode,
410 op0, op1)) != 0)
411 return tem;
412
413 return gen_rtx_fmt_ee (code, mode, op0, op1);
414 }
415 \f
416 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
417 and simplify the result. If FN is non-NULL, call this callback on each
418 X, if it returns non-NULL, replace X with its return value and simplify the
419 result. */
420
421 rtx
422 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
423 rtx (*fn) (rtx, const_rtx, void *), void *data)
424 {
425 enum rtx_code code = GET_CODE (x);
426 machine_mode mode = GET_MODE (x);
427 machine_mode op_mode;
428 const char *fmt;
429 rtx op0, op1, op2, newx, op;
430 rtvec vec, newvec;
431 int i, j;
432
433 if (__builtin_expect (fn != NULL, 0))
434 {
435 newx = fn (x, old_rtx, data);
436 if (newx)
437 return newx;
438 }
439 else if (rtx_equal_p (x, old_rtx))
440 return copy_rtx ((rtx) data);
441
442 switch (GET_RTX_CLASS (code))
443 {
444 case RTX_UNARY:
445 op0 = XEXP (x, 0);
446 op_mode = GET_MODE (op0);
447 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
448 if (op0 == XEXP (x, 0))
449 return x;
450 return simplify_gen_unary (code, mode, op0, op_mode);
451
452 case RTX_BIN_ARITH:
453 case RTX_COMM_ARITH:
454 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
455 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
456 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
457 return x;
458 return simplify_gen_binary (code, mode, op0, op1);
459
460 case RTX_COMPARE:
461 case RTX_COMM_COMPARE:
462 op0 = XEXP (x, 0);
463 op1 = XEXP (x, 1);
464 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
465 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
466 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
467 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
468 return x;
469 return simplify_gen_relational (code, mode, op_mode, op0, op1);
470
471 case RTX_TERNARY:
472 case RTX_BITFIELD_OPS:
473 op0 = XEXP (x, 0);
474 op_mode = GET_MODE (op0);
475 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
476 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
477 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
478 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
479 return x;
480 if (op_mode == VOIDmode)
481 op_mode = GET_MODE (op0);
482 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
483
484 case RTX_EXTRA:
485 if (code == SUBREG)
486 {
487 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
488 if (op0 == SUBREG_REG (x))
489 return x;
490 op0 = simplify_gen_subreg (GET_MODE (x), op0,
491 GET_MODE (SUBREG_REG (x)),
492 SUBREG_BYTE (x));
493 return op0 ? op0 : x;
494 }
495 break;
496
497 case RTX_OBJ:
498 if (code == MEM)
499 {
500 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
501 if (op0 == XEXP (x, 0))
502 return x;
503 return replace_equiv_address_nv (x, op0);
504 }
505 else if (code == LO_SUM)
506 {
507 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
508 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
509
510 /* (lo_sum (high x) y) -> y where x and y have the same base. */
511 if (GET_CODE (op0) == HIGH)
512 {
513 rtx base0, base1, offset0, offset1;
514 split_const (XEXP (op0, 0), &base0, &offset0);
515 split_const (op1, &base1, &offset1);
516 if (rtx_equal_p (base0, base1))
517 return op1;
518 }
519
520 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
521 return x;
522 return gen_rtx_LO_SUM (mode, op0, op1);
523 }
524 break;
525
526 default:
527 break;
528 }
529
530 newx = x;
531 fmt = GET_RTX_FORMAT (code);
532 for (i = 0; fmt[i]; i++)
533 switch (fmt[i])
534 {
535 case 'E':
536 vec = XVEC (x, i);
537 newvec = XVEC (newx, i);
538 for (j = 0; j < GET_NUM_ELEM (vec); j++)
539 {
540 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
541 old_rtx, fn, data);
542 if (op != RTVEC_ELT (vec, j))
543 {
544 if (newvec == vec)
545 {
546 newvec = shallow_copy_rtvec (vec);
547 if (x == newx)
548 newx = shallow_copy_rtx (x);
549 XVEC (newx, i) = newvec;
550 }
551 RTVEC_ELT (newvec, j) = op;
552 }
553 }
554 break;
555
556 case 'e':
557 if (XEXP (x, i))
558 {
559 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
560 if (op != XEXP (x, i))
561 {
562 if (x == newx)
563 newx = shallow_copy_rtx (x);
564 XEXP (newx, i) = op;
565 }
566 }
567 break;
568 }
569 return newx;
570 }
571
572 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
573 resulting RTX. Return a new RTX which is as simplified as possible. */
574
575 rtx
576 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
577 {
578 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
579 }
580 \f
581 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
582 Only handle cases where the truncated value is inherently an rvalue.
583
584 RTL provides two ways of truncating a value:
585
586 1. a lowpart subreg. This form is only a truncation when both
587 the outer and inner modes (here MODE and OP_MODE respectively)
588 are scalar integers, and only then when the subreg is used as
589 an rvalue.
590
591 It is only valid to form such truncating subregs if the
592 truncation requires no action by the target. The onus for
593 proving this is on the creator of the subreg -- e.g. the
594 caller to simplify_subreg or simplify_gen_subreg -- and typically
595 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
596
597 2. a TRUNCATE. This form handles both scalar and compound integers.
598
599 The first form is preferred where valid. However, the TRUNCATE
600 handling in simplify_unary_operation turns the second form into the
601 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
602 so it is generally safe to form rvalue truncations using:
603
604 simplify_gen_unary (TRUNCATE, ...)
605
606 and leave simplify_unary_operation to work out which representation
607 should be used.
608
609 Because of the proof requirements on (1), simplify_truncation must
610 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
611 regardless of whether the outer truncation came from a SUBREG or a
612 TRUNCATE. For example, if the caller has proven that an SImode
613 truncation of:
614
615 (and:DI X Y)
616
617 is a no-op and can be represented as a subreg, it does not follow
618 that SImode truncations of X and Y are also no-ops. On a target
619 like 64-bit MIPS that requires SImode values to be stored in
620 sign-extended form, an SImode truncation of:
621
622 (and:DI (reg:DI X) (const_int 63))
623
624 is trivially a no-op because only the lower 6 bits can be set.
625 However, X is still an arbitrary 64-bit number and so we cannot
626 assume that truncating it too is a no-op. */
627
628 static rtx
629 simplify_truncation (machine_mode mode, rtx op,
630 machine_mode op_mode)
631 {
632 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
633 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
634 scalar_int_mode int_mode, int_op_mode, subreg_mode;
635
636 gcc_assert (precision <= op_precision);
637
638 /* Optimize truncations of zero and sign extended values. */
639 if (GET_CODE (op) == ZERO_EXTEND
640 || GET_CODE (op) == SIGN_EXTEND)
641 {
642 /* There are three possibilities. If MODE is the same as the
643 origmode, we can omit both the extension and the subreg.
644 If MODE is not larger than the origmode, we can apply the
645 truncation without the extension. Finally, if the outermode
646 is larger than the origmode, we can just extend to the appropriate
647 mode. */
648 machine_mode origmode = GET_MODE (XEXP (op, 0));
649 if (mode == origmode)
650 return XEXP (op, 0);
651 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
652 return simplify_gen_unary (TRUNCATE, mode,
653 XEXP (op, 0), origmode);
654 else
655 return simplify_gen_unary (GET_CODE (op), mode,
656 XEXP (op, 0), origmode);
657 }
658
659 /* If the machine can perform operations in the truncated mode, distribute
660 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
661 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
662 if (1
663 && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD)
664 && (GET_CODE (op) == PLUS
665 || GET_CODE (op) == MINUS
666 || GET_CODE (op) == MULT))
667 {
668 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
669 if (op0)
670 {
671 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
672 if (op1)
673 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
674 }
675 }
676
677 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
678 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
679 the outer subreg is effectively a truncation to the original mode. */
680 if ((GET_CODE (op) == LSHIFTRT
681 || GET_CODE (op) == ASHIFTRT)
682 /* Ensure that OP_MODE is at least twice as wide as MODE
683 to avoid the possibility that an outer LSHIFTRT shifts by more
684 than the sign extension's sign_bit_copies and introduces zeros
685 into the high bits of the result. */
686 && 2 * precision <= op_precision
687 && CONST_INT_P (XEXP (op, 1))
688 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
689 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
690 && UINTVAL (XEXP (op, 1)) < precision)
691 return simplify_gen_binary (ASHIFTRT, mode,
692 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
693
694 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
695 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
696 the outer subreg is effectively a truncation to the original mode. */
697 if ((GET_CODE (op) == LSHIFTRT
698 || GET_CODE (op) == ASHIFTRT)
699 && CONST_INT_P (XEXP (op, 1))
700 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
701 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
702 && UINTVAL (XEXP (op, 1)) < precision)
703 return simplify_gen_binary (LSHIFTRT, mode,
704 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
705
706 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
707 to (ashift:QI (x:QI) C), where C is a suitable small constant and
708 the outer subreg is effectively a truncation to the original mode. */
709 if (GET_CODE (op) == ASHIFT
710 && CONST_INT_P (XEXP (op, 1))
711 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
712 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
713 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
714 && UINTVAL (XEXP (op, 1)) < precision)
715 return simplify_gen_binary (ASHIFT, mode,
716 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
717
718 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
719 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
720 and C2. */
721 if (GET_CODE (op) == AND
722 && (GET_CODE (XEXP (op, 0)) == LSHIFTRT
723 || GET_CODE (XEXP (op, 0)) == ASHIFTRT)
724 && CONST_INT_P (XEXP (XEXP (op, 0), 1))
725 && CONST_INT_P (XEXP (op, 1)))
726 {
727 rtx op0 = (XEXP (XEXP (op, 0), 0));
728 rtx shift_op = XEXP (XEXP (op, 0), 1);
729 rtx mask_op = XEXP (op, 1);
730 unsigned HOST_WIDE_INT shift = UINTVAL (shift_op);
731 unsigned HOST_WIDE_INT mask = UINTVAL (mask_op);
732
733 if (shift < precision
734 /* If doing this transform works for an X with all bits set,
735 it works for any X. */
736 && ((GET_MODE_MASK (mode) >> shift) & mask)
737 == ((GET_MODE_MASK (op_mode) >> shift) & mask)
738 && (op0 = simplify_gen_unary (TRUNCATE, mode, op0, op_mode))
739 && (op0 = simplify_gen_binary (LSHIFTRT, mode, op0, shift_op)))
740 {
741 mask_op = GEN_INT (trunc_int_for_mode (mask, mode));
742 return simplify_gen_binary (AND, mode, op0, mask_op);
743 }
744 }
745
746 /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
747 (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
748 changing len. */
749 if ((GET_CODE (op) == ZERO_EXTRACT || GET_CODE (op) == SIGN_EXTRACT)
750 && REG_P (XEXP (op, 0))
751 && GET_MODE (XEXP (op, 0)) == GET_MODE (op)
752 && CONST_INT_P (XEXP (op, 1))
753 && CONST_INT_P (XEXP (op, 2)))
754 {
755 rtx op0 = XEXP (op, 0);
756 unsigned HOST_WIDE_INT len = UINTVAL (XEXP (op, 1));
757 unsigned HOST_WIDE_INT pos = UINTVAL (XEXP (op, 2));
758 if (BITS_BIG_ENDIAN && pos >= op_precision - precision)
759 {
760 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
761 if (op0)
762 {
763 pos -= op_precision - precision;
764 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
765 XEXP (op, 1), GEN_INT (pos));
766 }
767 }
768 else if (!BITS_BIG_ENDIAN && precision >= len + pos)
769 {
770 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
771 if (op0)
772 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
773 XEXP (op, 1), XEXP (op, 2));
774 }
775 }
776
777 /* Recognize a word extraction from a multi-word subreg. */
778 if ((GET_CODE (op) == LSHIFTRT
779 || GET_CODE (op) == ASHIFTRT)
780 && SCALAR_INT_MODE_P (mode)
781 && SCALAR_INT_MODE_P (op_mode)
782 && precision >= BITS_PER_WORD
783 && 2 * precision <= op_precision
784 && CONST_INT_P (XEXP (op, 1))
785 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
786 && UINTVAL (XEXP (op, 1)) < op_precision)
787 {
788 poly_int64 byte = subreg_lowpart_offset (mode, op_mode);
789 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
790 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
791 (WORDS_BIG_ENDIAN
792 ? byte - shifted_bytes
793 : byte + shifted_bytes));
794 }
795
796 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
797 and try replacing the TRUNCATE and shift with it. Don't do this
798 if the MEM has a mode-dependent address. */
799 if ((GET_CODE (op) == LSHIFTRT
800 || GET_CODE (op) == ASHIFTRT)
801 && is_a <scalar_int_mode> (mode, &int_mode)
802 && is_a <scalar_int_mode> (op_mode, &int_op_mode)
803 && MEM_P (XEXP (op, 0))
804 && CONST_INT_P (XEXP (op, 1))
805 && INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (int_mode) == 0
806 && INTVAL (XEXP (op, 1)) > 0
807 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (int_op_mode)
808 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
809 MEM_ADDR_SPACE (XEXP (op, 0)))
810 && ! MEM_VOLATILE_P (XEXP (op, 0))
811 && (GET_MODE_SIZE (int_mode) >= UNITS_PER_WORD
812 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
813 {
814 poly_int64 byte = subreg_lowpart_offset (int_mode, int_op_mode);
815 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
816 return adjust_address_nv (XEXP (op, 0), int_mode,
817 (WORDS_BIG_ENDIAN
818 ? byte - shifted_bytes
819 : byte + shifted_bytes));
820 }
821
822 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
823 (OP:SI foo:SI) if OP is NEG or ABS. */
824 if ((GET_CODE (op) == ABS
825 || GET_CODE (op) == NEG)
826 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
827 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
828 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
829 return simplify_gen_unary (GET_CODE (op), mode,
830 XEXP (XEXP (op, 0), 0), mode);
831
832 /* (truncate:A (subreg:B (truncate:C X) 0)) is
833 (truncate:A X). */
834 if (GET_CODE (op) == SUBREG
835 && is_a <scalar_int_mode> (mode, &int_mode)
836 && SCALAR_INT_MODE_P (op_mode)
837 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &subreg_mode)
838 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
839 && subreg_lowpart_p (op))
840 {
841 rtx inner = XEXP (SUBREG_REG (op), 0);
842 if (GET_MODE_PRECISION (int_mode) <= GET_MODE_PRECISION (subreg_mode))
843 return simplify_gen_unary (TRUNCATE, int_mode, inner,
844 GET_MODE (inner));
845 else
846 /* If subreg above is paradoxical and C is narrower
847 than A, return (subreg:A (truncate:C X) 0). */
848 return simplify_gen_subreg (int_mode, SUBREG_REG (op), subreg_mode, 0);
849 }
850
851 /* (truncate:A (truncate:B X)) is (truncate:A X). */
852 if (GET_CODE (op) == TRUNCATE)
853 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
854 GET_MODE (XEXP (op, 0)));
855
856 /* (truncate:A (ior X C)) is (const_int -1) if C is equal to that already,
857 in mode A. */
858 if (GET_CODE (op) == IOR
859 && SCALAR_INT_MODE_P (mode)
860 && SCALAR_INT_MODE_P (op_mode)
861 && CONST_INT_P (XEXP (op, 1))
862 && trunc_int_for_mode (INTVAL (XEXP (op, 1)), mode) == -1)
863 return constm1_rtx;
864
865 return NULL_RTX;
866 }
867 \f
868 /* Try to simplify a unary operation CODE whose output mode is to be
869 MODE with input operand OP whose mode was originally OP_MODE.
870 Return zero if no simplification can be made. */
871 rtx
872 simplify_unary_operation (enum rtx_code code, machine_mode mode,
873 rtx op, machine_mode op_mode)
874 {
875 rtx trueop, tem;
876
877 trueop = avoid_constant_pool_reference (op);
878
879 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
880 if (tem)
881 return tem;
882
883 return simplify_unary_operation_1 (code, mode, op);
884 }
885
886 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
887 to be exact. */
888
889 static bool
890 exact_int_to_float_conversion_p (const_rtx op)
891 {
892 int out_bits = significand_size (GET_MODE_INNER (GET_MODE (op)));
893 machine_mode op0_mode = GET_MODE (XEXP (op, 0));
894 /* Constants shouldn't reach here. */
895 gcc_assert (op0_mode != VOIDmode);
896 int in_prec = GET_MODE_UNIT_PRECISION (op0_mode);
897 int in_bits = in_prec;
898 if (HWI_COMPUTABLE_MODE_P (op0_mode))
899 {
900 unsigned HOST_WIDE_INT nonzero = nonzero_bits (XEXP (op, 0), op0_mode);
901 if (GET_CODE (op) == FLOAT)
902 in_bits -= num_sign_bit_copies (XEXP (op, 0), op0_mode);
903 else if (GET_CODE (op) == UNSIGNED_FLOAT)
904 in_bits = wi::min_precision (wi::uhwi (nonzero, in_prec), UNSIGNED);
905 else
906 gcc_unreachable ();
907 in_bits -= wi::ctz (wi::uhwi (nonzero, in_prec));
908 }
909 return in_bits <= out_bits;
910 }
911
912 /* Perform some simplifications we can do even if the operands
913 aren't constant. */
914 static rtx
915 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
916 {
917 enum rtx_code reversed;
918 rtx temp, elt, base, step;
919 scalar_int_mode inner, int_mode, op_mode, op0_mode;
920
921 switch (code)
922 {
923 case NOT:
924 /* (not (not X)) == X. */
925 if (GET_CODE (op) == NOT)
926 return XEXP (op, 0);
927
928 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
929 comparison is all ones. */
930 if (COMPARISON_P (op)
931 && (mode == BImode || STORE_FLAG_VALUE == -1)
932 && ((reversed = reversed_comparison_code (op, NULL)) != UNKNOWN))
933 return simplify_gen_relational (reversed, mode, VOIDmode,
934 XEXP (op, 0), XEXP (op, 1));
935
936 /* (not (plus X -1)) can become (neg X). */
937 if (GET_CODE (op) == PLUS
938 && XEXP (op, 1) == constm1_rtx)
939 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
940
941 /* Similarly, (not (neg X)) is (plus X -1). Only do this for
942 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
943 and MODE_VECTOR_INT. */
944 if (GET_CODE (op) == NEG && CONSTM1_RTX (mode))
945 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
946 CONSTM1_RTX (mode));
947
948 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
949 if (GET_CODE (op) == XOR
950 && CONST_INT_P (XEXP (op, 1))
951 && (temp = simplify_unary_operation (NOT, mode,
952 XEXP (op, 1), mode)) != 0)
953 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
954
955 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
956 if (GET_CODE (op) == PLUS
957 && CONST_INT_P (XEXP (op, 1))
958 && mode_signbit_p (mode, XEXP (op, 1))
959 && (temp = simplify_unary_operation (NOT, mode,
960 XEXP (op, 1), mode)) != 0)
961 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
962
963
964 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
965 operands other than 1, but that is not valid. We could do a
966 similar simplification for (not (lshiftrt C X)) where C is
967 just the sign bit, but this doesn't seem common enough to
968 bother with. */
969 if (GET_CODE (op) == ASHIFT
970 && XEXP (op, 0) == const1_rtx)
971 {
972 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
973 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
974 }
975
976 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
977 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
978 so we can perform the above simplification. */
979 if (STORE_FLAG_VALUE == -1
980 && is_a <scalar_int_mode> (mode, &int_mode)
981 && GET_CODE (op) == ASHIFTRT
982 && CONST_INT_P (XEXP (op, 1))
983 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (int_mode) - 1)
984 return simplify_gen_relational (GE, int_mode, VOIDmode,
985 XEXP (op, 0), const0_rtx);
986
987
988 if (partial_subreg_p (op)
989 && subreg_lowpart_p (op)
990 && GET_CODE (SUBREG_REG (op)) == ASHIFT
991 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
992 {
993 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
994 rtx x;
995
996 x = gen_rtx_ROTATE (inner_mode,
997 simplify_gen_unary (NOT, inner_mode, const1_rtx,
998 inner_mode),
999 XEXP (SUBREG_REG (op), 1));
1000 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
1001 if (temp)
1002 return temp;
1003 }
1004
1005 /* Apply De Morgan's laws to reduce number of patterns for machines
1006 with negating logical insns (and-not, nand, etc.). If result has
1007 only one NOT, put it first, since that is how the patterns are
1008 coded. */
1009 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
1010 {
1011 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
1012 machine_mode op_mode;
1013
1014 op_mode = GET_MODE (in1);
1015 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
1016
1017 op_mode = GET_MODE (in2);
1018 if (op_mode == VOIDmode)
1019 op_mode = mode;
1020 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
1021
1022 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
1023 std::swap (in1, in2);
1024
1025 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
1026 mode, in1, in2);
1027 }
1028
1029 /* (not (bswap x)) -> (bswap (not x)). */
1030 if (GET_CODE (op) == BSWAP)
1031 {
1032 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1033 return simplify_gen_unary (BSWAP, mode, x, mode);
1034 }
1035 break;
1036
1037 case NEG:
1038 /* (neg (neg X)) == X. */
1039 if (GET_CODE (op) == NEG)
1040 return XEXP (op, 0);
1041
1042 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1043 If comparison is not reversible use
1044 x ? y : (neg y). */
1045 if (GET_CODE (op) == IF_THEN_ELSE)
1046 {
1047 rtx cond = XEXP (op, 0);
1048 rtx true_rtx = XEXP (op, 1);
1049 rtx false_rtx = XEXP (op, 2);
1050
1051 if ((GET_CODE (true_rtx) == NEG
1052 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
1053 || (GET_CODE (false_rtx) == NEG
1054 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)))
1055 {
1056 if (reversed_comparison_code (cond, NULL) != UNKNOWN)
1057 temp = reversed_comparison (cond, mode);
1058 else
1059 {
1060 temp = cond;
1061 std::swap (true_rtx, false_rtx);
1062 }
1063 return simplify_gen_ternary (IF_THEN_ELSE, mode,
1064 mode, temp, true_rtx, false_rtx);
1065 }
1066 }
1067
1068 /* (neg (plus X 1)) can become (not X). */
1069 if (GET_CODE (op) == PLUS
1070 && XEXP (op, 1) == const1_rtx)
1071 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1072
1073 /* Similarly, (neg (not X)) is (plus X 1). */
1074 if (GET_CODE (op) == NOT)
1075 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
1076 CONST1_RTX (mode));
1077
1078 /* (neg (minus X Y)) can become (minus Y X). This transformation
1079 isn't safe for modes with signed zeros, since if X and Y are
1080 both +0, (minus Y X) is the same as (minus X Y). If the
1081 rounding mode is towards +infinity (or -infinity) then the two
1082 expressions will be rounded differently. */
1083 if (GET_CODE (op) == MINUS
1084 && !HONOR_SIGNED_ZEROS (mode)
1085 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1086 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
1087
1088 if (GET_CODE (op) == PLUS
1089 && !HONOR_SIGNED_ZEROS (mode)
1090 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1091 {
1092 /* (neg (plus A C)) is simplified to (minus -C A). */
1093 if (CONST_SCALAR_INT_P (XEXP (op, 1))
1094 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
1095 {
1096 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1097 if (temp)
1098 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1099 }
1100
1101 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1102 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1103 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1104 }
1105
1106 /* (neg (mult A B)) becomes (mult A (neg B)).
1107 This works even for floating-point values. */
1108 if (GET_CODE (op) == MULT
1109 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1110 {
1111 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1112 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1113 }
1114
1115 /* NEG commutes with ASHIFT since it is multiplication. Only do
1116 this if we can then eliminate the NEG (e.g., if the operand
1117 is a constant). */
1118 if (GET_CODE (op) == ASHIFT)
1119 {
1120 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1121 if (temp)
1122 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1123 }
1124
1125 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1126 C is equal to the width of MODE minus 1. */
1127 if (GET_CODE (op) == ASHIFTRT
1128 && CONST_INT_P (XEXP (op, 1))
1129 && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
1130 return simplify_gen_binary (LSHIFTRT, mode,
1131 XEXP (op, 0), XEXP (op, 1));
1132
1133 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1134 C is equal to the width of MODE minus 1. */
1135 if (GET_CODE (op) == LSHIFTRT
1136 && CONST_INT_P (XEXP (op, 1))
1137 && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
1138 return simplify_gen_binary (ASHIFTRT, mode,
1139 XEXP (op, 0), XEXP (op, 1));
1140
1141 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1142 if (GET_CODE (op) == XOR
1143 && XEXP (op, 1) == const1_rtx
1144 && nonzero_bits (XEXP (op, 0), mode) == 1)
1145 return plus_constant (mode, XEXP (op, 0), -1);
1146
1147 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1148 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1149 if (GET_CODE (op) == LT
1150 && XEXP (op, 1) == const0_rtx
1151 && is_a <scalar_int_mode> (GET_MODE (XEXP (op, 0)), &inner))
1152 {
1153 int_mode = as_a <scalar_int_mode> (mode);
1154 int isize = GET_MODE_PRECISION (inner);
1155 if (STORE_FLAG_VALUE == 1)
1156 {
1157 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1158 gen_int_shift_amount (inner,
1159 isize - 1));
1160 if (int_mode == inner)
1161 return temp;
1162 if (GET_MODE_PRECISION (int_mode) > isize)
1163 return simplify_gen_unary (SIGN_EXTEND, int_mode, temp, inner);
1164 return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
1165 }
1166 else if (STORE_FLAG_VALUE == -1)
1167 {
1168 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1169 gen_int_shift_amount (inner,
1170 isize - 1));
1171 if (int_mode == inner)
1172 return temp;
1173 if (GET_MODE_PRECISION (int_mode) > isize)
1174 return simplify_gen_unary (ZERO_EXTEND, int_mode, temp, inner);
1175 return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
1176 }
1177 }
1178
1179 if (vec_series_p (op, &base, &step))
1180 {
1181 /* Only create a new series if we can simplify both parts. In other
1182 cases this isn't really a simplification, and it's not necessarily
1183 a win to replace a vector operation with a scalar operation. */
1184 scalar_mode inner_mode = GET_MODE_INNER (mode);
1185 base = simplify_unary_operation (NEG, inner_mode, base, inner_mode);
1186 if (base)
1187 {
1188 step = simplify_unary_operation (NEG, inner_mode,
1189 step, inner_mode);
1190 if (step)
1191 return gen_vec_series (mode, base, step);
1192 }
1193 }
1194 break;
1195
1196 case TRUNCATE:
1197 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1198 with the umulXi3_highpart patterns. */
1199 if (GET_CODE (op) == LSHIFTRT
1200 && GET_CODE (XEXP (op, 0)) == MULT)
1201 break;
1202
1203 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1204 {
1205 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1206 {
1207 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1208 if (temp)
1209 return temp;
1210 }
1211 /* We can't handle truncation to a partial integer mode here
1212 because we don't know the real bitsize of the partial
1213 integer mode. */
1214 break;
1215 }
1216
1217 if (GET_MODE (op) != VOIDmode)
1218 {
1219 temp = simplify_truncation (mode, op, GET_MODE (op));
1220 if (temp)
1221 return temp;
1222 }
1223
1224 /* If we know that the value is already truncated, we can
1225 replace the TRUNCATE with a SUBREG. */
1226 if (known_eq (GET_MODE_NUNITS (mode), 1)
1227 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1228 || truncated_to_mode (mode, op)))
1229 {
1230 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1231 if (temp)
1232 return temp;
1233 }
1234
1235 /* A truncate of a comparison can be replaced with a subreg if
1236 STORE_FLAG_VALUE permits. This is like the previous test,
1237 but it works even if the comparison is done in a mode larger
1238 than HOST_BITS_PER_WIDE_INT. */
1239 if (HWI_COMPUTABLE_MODE_P (mode)
1240 && COMPARISON_P (op)
1241 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1242 {
1243 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1244 if (temp)
1245 return temp;
1246 }
1247
1248 /* A truncate of a memory is just loading the low part of the memory
1249 if we are not changing the meaning of the address. */
1250 if (GET_CODE (op) == MEM
1251 && !VECTOR_MODE_P (mode)
1252 && !MEM_VOLATILE_P (op)
1253 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1254 {
1255 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1256 if (temp)
1257 return temp;
1258 }
1259
1260 break;
1261
1262 case FLOAT_TRUNCATE:
1263 if (DECIMAL_FLOAT_MODE_P (mode))
1264 break;
1265
1266 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1267 if (GET_CODE (op) == FLOAT_EXTEND
1268 && GET_MODE (XEXP (op, 0)) == mode)
1269 return XEXP (op, 0);
1270
1271 /* (float_truncate:SF (float_truncate:DF foo:XF))
1272 = (float_truncate:SF foo:XF).
1273 This may eliminate double rounding, so it is unsafe.
1274
1275 (float_truncate:SF (float_extend:XF foo:DF))
1276 = (float_truncate:SF foo:DF).
1277
1278 (float_truncate:DF (float_extend:XF foo:SF))
1279 = (float_extend:DF foo:SF). */
1280 if ((GET_CODE (op) == FLOAT_TRUNCATE
1281 && flag_unsafe_math_optimizations)
1282 || GET_CODE (op) == FLOAT_EXTEND)
1283 return simplify_gen_unary (GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)))
1284 > GET_MODE_UNIT_SIZE (mode)
1285 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1286 mode,
1287 XEXP (op, 0), mode);
1288
1289 /* (float_truncate (float x)) is (float x) */
1290 if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1291 && (flag_unsafe_math_optimizations
1292 || exact_int_to_float_conversion_p (op)))
1293 return simplify_gen_unary (GET_CODE (op), mode,
1294 XEXP (op, 0),
1295 GET_MODE (XEXP (op, 0)));
1296
1297 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1298 (OP:SF foo:SF) if OP is NEG or ABS. */
1299 if ((GET_CODE (op) == ABS
1300 || GET_CODE (op) == NEG)
1301 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1302 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1303 return simplify_gen_unary (GET_CODE (op), mode,
1304 XEXP (XEXP (op, 0), 0), mode);
1305
1306 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1307 is (float_truncate:SF x). */
1308 if (GET_CODE (op) == SUBREG
1309 && subreg_lowpart_p (op)
1310 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1311 return SUBREG_REG (op);
1312 break;
1313
1314 case FLOAT_EXTEND:
1315 if (DECIMAL_FLOAT_MODE_P (mode))
1316 break;
1317
1318 /* (float_extend (float_extend x)) is (float_extend x)
1319
1320 (float_extend (float x)) is (float x) assuming that double
1321 rounding can't happen.
1322 */
1323 if (GET_CODE (op) == FLOAT_EXTEND
1324 || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1325 && exact_int_to_float_conversion_p (op)))
1326 return simplify_gen_unary (GET_CODE (op), mode,
1327 XEXP (op, 0),
1328 GET_MODE (XEXP (op, 0)));
1329
1330 break;
1331
1332 case ABS:
1333 /* (abs (neg <foo>)) -> (abs <foo>) */
1334 if (GET_CODE (op) == NEG)
1335 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1336 GET_MODE (XEXP (op, 0)));
1337
1338 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1339 do nothing. */
1340 if (GET_MODE (op) == VOIDmode)
1341 break;
1342
1343 /* If operand is something known to be positive, ignore the ABS. */
1344 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1345 || val_signbit_known_clear_p (GET_MODE (op),
1346 nonzero_bits (op, GET_MODE (op))))
1347 return op;
1348
1349 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1350 if (is_a <scalar_int_mode> (mode, &int_mode)
1351 && (num_sign_bit_copies (op, int_mode)
1352 == GET_MODE_PRECISION (int_mode)))
1353 return gen_rtx_NEG (int_mode, op);
1354
1355 break;
1356
1357 case FFS:
1358 /* (ffs (*_extend <X>)) = (ffs <X>) */
1359 if (GET_CODE (op) == SIGN_EXTEND
1360 || GET_CODE (op) == ZERO_EXTEND)
1361 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1362 GET_MODE (XEXP (op, 0)));
1363 break;
1364
1365 case POPCOUNT:
1366 switch (GET_CODE (op))
1367 {
1368 case BSWAP:
1369 case ZERO_EXTEND:
1370 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1371 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1372 GET_MODE (XEXP (op, 0)));
1373
1374 case ROTATE:
1375 case ROTATERT:
1376 /* Rotations don't affect popcount. */
1377 if (!side_effects_p (XEXP (op, 1)))
1378 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1379 GET_MODE (XEXP (op, 0)));
1380 break;
1381
1382 default:
1383 break;
1384 }
1385 break;
1386
1387 case PARITY:
1388 switch (GET_CODE (op))
1389 {
1390 case NOT:
1391 case BSWAP:
1392 case ZERO_EXTEND:
1393 case SIGN_EXTEND:
1394 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1395 GET_MODE (XEXP (op, 0)));
1396
1397 case ROTATE:
1398 case ROTATERT:
1399 /* Rotations don't affect parity. */
1400 if (!side_effects_p (XEXP (op, 1)))
1401 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1402 GET_MODE (XEXP (op, 0)));
1403 break;
1404
1405 default:
1406 break;
1407 }
1408 break;
1409
1410 case BSWAP:
1411 /* (bswap (bswap x)) -> x. */
1412 if (GET_CODE (op) == BSWAP)
1413 return XEXP (op, 0);
1414 break;
1415
1416 case FLOAT:
1417 /* (float (sign_extend <X>)) = (float <X>). */
1418 if (GET_CODE (op) == SIGN_EXTEND)
1419 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1420 GET_MODE (XEXP (op, 0)));
1421 break;
1422
1423 case SIGN_EXTEND:
1424 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1425 becomes just the MINUS if its mode is MODE. This allows
1426 folding switch statements on machines using casesi (such as
1427 the VAX). */
1428 if (GET_CODE (op) == TRUNCATE
1429 && GET_MODE (XEXP (op, 0)) == mode
1430 && GET_CODE (XEXP (op, 0)) == MINUS
1431 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1432 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1433 return XEXP (op, 0);
1434
1435 /* Extending a widening multiplication should be canonicalized to
1436 a wider widening multiplication. */
1437 if (GET_CODE (op) == MULT)
1438 {
1439 rtx lhs = XEXP (op, 0);
1440 rtx rhs = XEXP (op, 1);
1441 enum rtx_code lcode = GET_CODE (lhs);
1442 enum rtx_code rcode = GET_CODE (rhs);
1443
1444 /* Widening multiplies usually extend both operands, but sometimes
1445 they use a shift to extract a portion of a register. */
1446 if ((lcode == SIGN_EXTEND
1447 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1448 && (rcode == SIGN_EXTEND
1449 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1450 {
1451 machine_mode lmode = GET_MODE (lhs);
1452 machine_mode rmode = GET_MODE (rhs);
1453 int bits;
1454
1455 if (lcode == ASHIFTRT)
1456 /* Number of bits not shifted off the end. */
1457 bits = (GET_MODE_UNIT_PRECISION (lmode)
1458 - INTVAL (XEXP (lhs, 1)));
1459 else /* lcode == SIGN_EXTEND */
1460 /* Size of inner mode. */
1461 bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0)));
1462
1463 if (rcode == ASHIFTRT)
1464 bits += (GET_MODE_UNIT_PRECISION (rmode)
1465 - INTVAL (XEXP (rhs, 1)));
1466 else /* rcode == SIGN_EXTEND */
1467 bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0)));
1468
1469 /* We can only widen multiplies if the result is mathematiclly
1470 equivalent. I.e. if overflow was impossible. */
1471 if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op)))
1472 return simplify_gen_binary
1473 (MULT, mode,
1474 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1475 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1476 }
1477 }
1478
1479 /* Check for a sign extension of a subreg of a promoted
1480 variable, where the promotion is sign-extended, and the
1481 target mode is the same as the variable's promotion. */
1482 if (GET_CODE (op) == SUBREG
1483 && SUBREG_PROMOTED_VAR_P (op)
1484 && SUBREG_PROMOTED_SIGNED_P (op)
1485 && !paradoxical_subreg_p (mode, GET_MODE (SUBREG_REG (op))))
1486 {
1487 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1488 if (temp)
1489 return temp;
1490 }
1491
1492 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1493 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1494 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1495 {
1496 gcc_assert (GET_MODE_UNIT_PRECISION (mode)
1497 > GET_MODE_UNIT_PRECISION (GET_MODE (op)));
1498 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1499 GET_MODE (XEXP (op, 0)));
1500 }
1501
1502 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1503 is (sign_extend:M (subreg:O <X>)) if there is mode with
1504 GET_MODE_BITSIZE (N) - I bits.
1505 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1506 is similarly (zero_extend:M (subreg:O <X>)). */
1507 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1508 && GET_CODE (XEXP (op, 0)) == ASHIFT
1509 && is_a <scalar_int_mode> (mode, &int_mode)
1510 && CONST_INT_P (XEXP (op, 1))
1511 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1512 && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
1513 GET_MODE_BITSIZE (op_mode) > INTVAL (XEXP (op, 1))))
1514 {
1515 scalar_int_mode tmode;
1516 gcc_assert (GET_MODE_BITSIZE (int_mode)
1517 > GET_MODE_BITSIZE (op_mode));
1518 if (int_mode_for_size (GET_MODE_BITSIZE (op_mode)
1519 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1520 {
1521 rtx inner =
1522 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1523 if (inner)
1524 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1525 ? SIGN_EXTEND : ZERO_EXTEND,
1526 int_mode, inner, tmode);
1527 }
1528 }
1529
1530 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1531 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1532 if (GET_CODE (op) == LSHIFTRT
1533 && CONST_INT_P (XEXP (op, 1))
1534 && XEXP (op, 1) != const0_rtx)
1535 return simplify_gen_unary (ZERO_EXTEND, mode, op, GET_MODE (op));
1536
1537 #if defined(POINTERS_EXTEND_UNSIGNED)
1538 /* As we do not know which address space the pointer is referring to,
1539 we can do this only if the target does not support different pointer
1540 or address modes depending on the address space. */
1541 if (target_default_pointer_address_modes_p ()
1542 && ! POINTERS_EXTEND_UNSIGNED
1543 && mode == Pmode && GET_MODE (op) == ptr_mode
1544 && (CONSTANT_P (op)
1545 || (GET_CODE (op) == SUBREG
1546 && REG_P (SUBREG_REG (op))
1547 && REG_POINTER (SUBREG_REG (op))
1548 && GET_MODE (SUBREG_REG (op)) == Pmode))
1549 && !targetm.have_ptr_extend ())
1550 {
1551 temp
1552 = convert_memory_address_addr_space_1 (Pmode, op,
1553 ADDR_SPACE_GENERIC, false,
1554 true);
1555 if (temp)
1556 return temp;
1557 }
1558 #endif
1559 break;
1560
1561 case ZERO_EXTEND:
1562 /* Check for a zero extension of a subreg of a promoted
1563 variable, where the promotion is zero-extended, and the
1564 target mode is the same as the variable's promotion. */
1565 if (GET_CODE (op) == SUBREG
1566 && SUBREG_PROMOTED_VAR_P (op)
1567 && SUBREG_PROMOTED_UNSIGNED_P (op)
1568 && !paradoxical_subreg_p (mode, GET_MODE (SUBREG_REG (op))))
1569 {
1570 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1571 if (temp)
1572 return temp;
1573 }
1574
1575 /* Extending a widening multiplication should be canonicalized to
1576 a wider widening multiplication. */
1577 if (GET_CODE (op) == MULT)
1578 {
1579 rtx lhs = XEXP (op, 0);
1580 rtx rhs = XEXP (op, 1);
1581 enum rtx_code lcode = GET_CODE (lhs);
1582 enum rtx_code rcode = GET_CODE (rhs);
1583
1584 /* Widening multiplies usually extend both operands, but sometimes
1585 they use a shift to extract a portion of a register. */
1586 if ((lcode == ZERO_EXTEND
1587 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1588 && (rcode == ZERO_EXTEND
1589 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1590 {
1591 machine_mode lmode = GET_MODE (lhs);
1592 machine_mode rmode = GET_MODE (rhs);
1593 int bits;
1594
1595 if (lcode == LSHIFTRT)
1596 /* Number of bits not shifted off the end. */
1597 bits = (GET_MODE_UNIT_PRECISION (lmode)
1598 - INTVAL (XEXP (lhs, 1)));
1599 else /* lcode == ZERO_EXTEND */
1600 /* Size of inner mode. */
1601 bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0)));
1602
1603 if (rcode == LSHIFTRT)
1604 bits += (GET_MODE_UNIT_PRECISION (rmode)
1605 - INTVAL (XEXP (rhs, 1)));
1606 else /* rcode == ZERO_EXTEND */
1607 bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0)));
1608
1609 /* We can only widen multiplies if the result is mathematiclly
1610 equivalent. I.e. if overflow was impossible. */
1611 if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op)))
1612 return simplify_gen_binary
1613 (MULT, mode,
1614 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1615 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1616 }
1617 }
1618
1619 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1620 if (GET_CODE (op) == ZERO_EXTEND)
1621 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1622 GET_MODE (XEXP (op, 0)));
1623
1624 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1625 is (zero_extend:M (subreg:O <X>)) if there is mode with
1626 GET_MODE_PRECISION (N) - I bits. */
1627 if (GET_CODE (op) == LSHIFTRT
1628 && GET_CODE (XEXP (op, 0)) == ASHIFT
1629 && is_a <scalar_int_mode> (mode, &int_mode)
1630 && CONST_INT_P (XEXP (op, 1))
1631 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1632 && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
1633 GET_MODE_PRECISION (op_mode) > INTVAL (XEXP (op, 1))))
1634 {
1635 scalar_int_mode tmode;
1636 if (int_mode_for_size (GET_MODE_PRECISION (op_mode)
1637 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1638 {
1639 rtx inner =
1640 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1641 if (inner)
1642 return simplify_gen_unary (ZERO_EXTEND, int_mode,
1643 inner, tmode);
1644 }
1645 }
1646
1647 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1648 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1649 of mode N. E.g.
1650 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1651 (and:SI (reg:SI) (const_int 63)). */
1652 if (partial_subreg_p (op)
1653 && is_a <scalar_int_mode> (mode, &int_mode)
1654 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &op0_mode)
1655 && GET_MODE_PRECISION (op0_mode) <= HOST_BITS_PER_WIDE_INT
1656 && GET_MODE_PRECISION (int_mode) >= GET_MODE_PRECISION (op0_mode)
1657 && subreg_lowpart_p (op)
1658 && (nonzero_bits (SUBREG_REG (op), op0_mode)
1659 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1660 {
1661 if (GET_MODE_PRECISION (int_mode) == GET_MODE_PRECISION (op0_mode))
1662 return SUBREG_REG (op);
1663 return simplify_gen_unary (ZERO_EXTEND, int_mode, SUBREG_REG (op),
1664 op0_mode);
1665 }
1666
1667 #if defined(POINTERS_EXTEND_UNSIGNED)
1668 /* As we do not know which address space the pointer is referring to,
1669 we can do this only if the target does not support different pointer
1670 or address modes depending on the address space. */
1671 if (target_default_pointer_address_modes_p ()
1672 && POINTERS_EXTEND_UNSIGNED > 0
1673 && mode == Pmode && GET_MODE (op) == ptr_mode
1674 && (CONSTANT_P (op)
1675 || (GET_CODE (op) == SUBREG
1676 && REG_P (SUBREG_REG (op))
1677 && REG_POINTER (SUBREG_REG (op))
1678 && GET_MODE (SUBREG_REG (op)) == Pmode))
1679 && !targetm.have_ptr_extend ())
1680 {
1681 temp
1682 = convert_memory_address_addr_space_1 (Pmode, op,
1683 ADDR_SPACE_GENERIC, false,
1684 true);
1685 if (temp)
1686 return temp;
1687 }
1688 #endif
1689 break;
1690
1691 default:
1692 break;
1693 }
1694
1695 if (VECTOR_MODE_P (mode) && vec_duplicate_p (op, &elt))
1696 {
1697 /* Try applying the operator to ELT and see if that simplifies.
1698 We can duplicate the result if so.
1699
1700 The reason we don't use simplify_gen_unary is that it isn't
1701 necessarily a win to convert things like:
1702
1703 (neg:V (vec_duplicate:V (reg:S R)))
1704
1705 to:
1706
1707 (vec_duplicate:V (neg:S (reg:S R)))
1708
1709 The first might be done entirely in vector registers while the
1710 second might need a move between register files. */
1711 temp = simplify_unary_operation (code, GET_MODE_INNER (mode),
1712 elt, GET_MODE_INNER (GET_MODE (op)));
1713 if (temp)
1714 return gen_vec_duplicate (mode, temp);
1715 }
1716
1717 return 0;
1718 }
1719
1720 /* Try to compute the value of a unary operation CODE whose output mode is to
1721 be MODE with input operand OP whose mode was originally OP_MODE.
1722 Return zero if the value cannot be computed. */
1723 rtx
1724 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1725 rtx op, machine_mode op_mode)
1726 {
1727 scalar_int_mode result_mode;
1728
1729 if (code == VEC_DUPLICATE)
1730 {
1731 gcc_assert (VECTOR_MODE_P (mode));
1732 if (GET_MODE (op) != VOIDmode)
1733 {
1734 if (!VECTOR_MODE_P (GET_MODE (op)))
1735 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1736 else
1737 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1738 (GET_MODE (op)));
1739 }
1740 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op))
1741 return gen_const_vec_duplicate (mode, op);
1742 unsigned int n_elts;
1743 if (GET_CODE (op) == CONST_VECTOR
1744 && GET_MODE_NUNITS (mode).is_constant (&n_elts))
1745 {
1746 /* This must be constant if we're duplicating it to a constant
1747 number of elements. */
1748 unsigned int in_n_elts = CONST_VECTOR_NUNITS (op).to_constant ();
1749 gcc_assert (in_n_elts < n_elts);
1750 gcc_assert ((n_elts % in_n_elts) == 0);
1751 rtvec v = rtvec_alloc (n_elts);
1752 for (unsigned i = 0; i < n_elts; i++)
1753 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1754 return gen_rtx_CONST_VECTOR (mode, v);
1755 }
1756 }
1757
1758 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1759 {
1760 unsigned int n_elts;
1761 if (!CONST_VECTOR_NUNITS (op).is_constant (&n_elts))
1762 return NULL_RTX;
1763
1764 machine_mode opmode = GET_MODE (op);
1765 gcc_assert (known_eq (GET_MODE_NUNITS (mode), n_elts));
1766 gcc_assert (known_eq (GET_MODE_NUNITS (opmode), n_elts));
1767
1768 rtvec v = rtvec_alloc (n_elts);
1769 unsigned int i;
1770
1771 for (i = 0; i < n_elts; i++)
1772 {
1773 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1774 CONST_VECTOR_ELT (op, i),
1775 GET_MODE_INNER (opmode));
1776 if (!x || !valid_for_const_vector_p (mode, x))
1777 return 0;
1778 RTVEC_ELT (v, i) = x;
1779 }
1780 return gen_rtx_CONST_VECTOR (mode, v);
1781 }
1782
1783 /* The order of these tests is critical so that, for example, we don't
1784 check the wrong mode (input vs. output) for a conversion operation,
1785 such as FIX. At some point, this should be simplified. */
1786
1787 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1788 {
1789 REAL_VALUE_TYPE d;
1790
1791 if (op_mode == VOIDmode)
1792 {
1793 /* CONST_INT have VOIDmode as the mode. We assume that all
1794 the bits of the constant are significant, though, this is
1795 a dangerous assumption as many times CONST_INTs are
1796 created and used with garbage in the bits outside of the
1797 precision of the implied mode of the const_int. */
1798 op_mode = MAX_MODE_INT;
1799 }
1800
1801 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), SIGNED);
1802
1803 /* Avoid the folding if flag_signaling_nans is on and
1804 operand is a signaling NaN. */
1805 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1806 return 0;
1807
1808 d = real_value_truncate (mode, d);
1809 return const_double_from_real_value (d, mode);
1810 }
1811 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1812 {
1813 REAL_VALUE_TYPE d;
1814
1815 if (op_mode == VOIDmode)
1816 {
1817 /* CONST_INT have VOIDmode as the mode. We assume that all
1818 the bits of the constant are significant, though, this is
1819 a dangerous assumption as many times CONST_INTs are
1820 created and used with garbage in the bits outside of the
1821 precision of the implied mode of the const_int. */
1822 op_mode = MAX_MODE_INT;
1823 }
1824
1825 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), UNSIGNED);
1826
1827 /* Avoid the folding if flag_signaling_nans is on and
1828 operand is a signaling NaN. */
1829 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1830 return 0;
1831
1832 d = real_value_truncate (mode, d);
1833 return const_double_from_real_value (d, mode);
1834 }
1835
1836 if (CONST_SCALAR_INT_P (op) && is_a <scalar_int_mode> (mode, &result_mode))
1837 {
1838 unsigned int width = GET_MODE_PRECISION (result_mode);
1839 wide_int result;
1840 scalar_int_mode imode = (op_mode == VOIDmode
1841 ? result_mode
1842 : as_a <scalar_int_mode> (op_mode));
1843 rtx_mode_t op0 = rtx_mode_t (op, imode);
1844 int int_value;
1845
1846 #if TARGET_SUPPORTS_WIDE_INT == 0
1847 /* This assert keeps the simplification from producing a result
1848 that cannot be represented in a CONST_DOUBLE but a lot of
1849 upstream callers expect that this function never fails to
1850 simplify something and so you if you added this to the test
1851 above the code would die later anyway. If this assert
1852 happens, you just need to make the port support wide int. */
1853 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1854 #endif
1855
1856 switch (code)
1857 {
1858 case NOT:
1859 result = wi::bit_not (op0);
1860 break;
1861
1862 case NEG:
1863 result = wi::neg (op0);
1864 break;
1865
1866 case ABS:
1867 result = wi::abs (op0);
1868 break;
1869
1870 case FFS:
1871 result = wi::shwi (wi::ffs (op0), result_mode);
1872 break;
1873
1874 case CLZ:
1875 if (wi::ne_p (op0, 0))
1876 int_value = wi::clz (op0);
1877 else if (! CLZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
1878 int_value = GET_MODE_PRECISION (imode);
1879 result = wi::shwi (int_value, result_mode);
1880 break;
1881
1882 case CLRSB:
1883 result = wi::shwi (wi::clrsb (op0), result_mode);
1884 break;
1885
1886 case CTZ:
1887 if (wi::ne_p (op0, 0))
1888 int_value = wi::ctz (op0);
1889 else if (! CTZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
1890 int_value = GET_MODE_PRECISION (imode);
1891 result = wi::shwi (int_value, result_mode);
1892 break;
1893
1894 case POPCOUNT:
1895 result = wi::shwi (wi::popcount (op0), result_mode);
1896 break;
1897
1898 case PARITY:
1899 result = wi::shwi (wi::parity (op0), result_mode);
1900 break;
1901
1902 case BSWAP:
1903 result = wide_int (op0).bswap ();
1904 break;
1905
1906 case TRUNCATE:
1907 case ZERO_EXTEND:
1908 result = wide_int::from (op0, width, UNSIGNED);
1909 break;
1910
1911 case SIGN_EXTEND:
1912 result = wide_int::from (op0, width, SIGNED);
1913 break;
1914
1915 case SQRT:
1916 default:
1917 return 0;
1918 }
1919
1920 return immed_wide_int_const (result, result_mode);
1921 }
1922
1923 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1924 && SCALAR_FLOAT_MODE_P (mode)
1925 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1926 {
1927 REAL_VALUE_TYPE d = *CONST_DOUBLE_REAL_VALUE (op);
1928 switch (code)
1929 {
1930 case SQRT:
1931 return 0;
1932 case ABS:
1933 d = real_value_abs (&d);
1934 break;
1935 case NEG:
1936 d = real_value_negate (&d);
1937 break;
1938 case FLOAT_TRUNCATE:
1939 /* Don't perform the operation if flag_signaling_nans is on
1940 and the operand is a signaling NaN. */
1941 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1942 return NULL_RTX;
1943 d = real_value_truncate (mode, d);
1944 break;
1945 case FLOAT_EXTEND:
1946 /* Don't perform the operation if flag_signaling_nans is on
1947 and the operand is a signaling NaN. */
1948 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1949 return NULL_RTX;
1950 /* All this does is change the mode, unless changing
1951 mode class. */
1952 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1953 real_convert (&d, mode, &d);
1954 break;
1955 case FIX:
1956 /* Don't perform the operation if flag_signaling_nans is on
1957 and the operand is a signaling NaN. */
1958 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1959 return NULL_RTX;
1960 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1961 break;
1962 case NOT:
1963 {
1964 long tmp[4];
1965 int i;
1966
1967 real_to_target (tmp, &d, GET_MODE (op));
1968 for (i = 0; i < 4; i++)
1969 tmp[i] = ~tmp[i];
1970 real_from_target (&d, tmp, mode);
1971 break;
1972 }
1973 default:
1974 gcc_unreachable ();
1975 }
1976 return const_double_from_real_value (d, mode);
1977 }
1978 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1979 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1980 && is_int_mode (mode, &result_mode))
1981 {
1982 unsigned int width = GET_MODE_PRECISION (result_mode);
1983 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1984 operators are intentionally left unspecified (to ease implementation
1985 by target backends), for consistency, this routine implements the
1986 same semantics for constant folding as used by the middle-end. */
1987
1988 /* This was formerly used only for non-IEEE float.
1989 eggert@twinsun.com says it is safe for IEEE also. */
1990 REAL_VALUE_TYPE t;
1991 const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (op);
1992 wide_int wmax, wmin;
1993 /* This is part of the abi to real_to_integer, but we check
1994 things before making this call. */
1995 bool fail;
1996
1997 switch (code)
1998 {
1999 case FIX:
2000 if (REAL_VALUE_ISNAN (*x))
2001 return const0_rtx;
2002
2003 /* Test against the signed upper bound. */
2004 wmax = wi::max_value (width, SIGNED);
2005 real_from_integer (&t, VOIDmode, wmax, SIGNED);
2006 if (real_less (&t, x))
2007 return immed_wide_int_const (wmax, mode);
2008
2009 /* Test against the signed lower bound. */
2010 wmin = wi::min_value (width, SIGNED);
2011 real_from_integer (&t, VOIDmode, wmin, SIGNED);
2012 if (real_less (x, &t))
2013 return immed_wide_int_const (wmin, mode);
2014
2015 return immed_wide_int_const (real_to_integer (x, &fail, width),
2016 mode);
2017
2018 case UNSIGNED_FIX:
2019 if (REAL_VALUE_ISNAN (*x) || REAL_VALUE_NEGATIVE (*x))
2020 return const0_rtx;
2021
2022 /* Test against the unsigned upper bound. */
2023 wmax = wi::max_value (width, UNSIGNED);
2024 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
2025 if (real_less (&t, x))
2026 return immed_wide_int_const (wmax, mode);
2027
2028 return immed_wide_int_const (real_to_integer (x, &fail, width),
2029 mode);
2030
2031 default:
2032 gcc_unreachable ();
2033 }
2034 }
2035
2036 /* Handle polynomial integers. */
2037 else if (CONST_POLY_INT_P (op))
2038 {
2039 poly_wide_int result;
2040 switch (code)
2041 {
2042 case NEG:
2043 result = -const_poly_int_value (op);
2044 break;
2045
2046 case NOT:
2047 result = ~const_poly_int_value (op);
2048 break;
2049
2050 default:
2051 return NULL_RTX;
2052 }
2053 return immed_wide_int_const (result, mode);
2054 }
2055
2056 return NULL_RTX;
2057 }
2058 \f
2059 /* Subroutine of simplify_binary_operation to simplify a binary operation
2060 CODE that can commute with byte swapping, with result mode MODE and
2061 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2062 Return zero if no simplification or canonicalization is possible. */
2063
2064 static rtx
2065 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
2066 rtx op0, rtx op1)
2067 {
2068 rtx tem;
2069
2070 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2071 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
2072 {
2073 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
2074 simplify_gen_unary (BSWAP, mode, op1, mode));
2075 return simplify_gen_unary (BSWAP, mode, tem, mode);
2076 }
2077
2078 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2079 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
2080 {
2081 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
2082 return simplify_gen_unary (BSWAP, mode, tem, mode);
2083 }
2084
2085 return NULL_RTX;
2086 }
2087
2088 /* Subroutine of simplify_binary_operation to simplify a commutative,
2089 associative binary operation CODE with result mode MODE, operating
2090 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2091 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2092 canonicalization is possible. */
2093
2094 static rtx
2095 simplify_associative_operation (enum rtx_code code, machine_mode mode,
2096 rtx op0, rtx op1)
2097 {
2098 rtx tem;
2099
2100 /* Linearize the operator to the left. */
2101 if (GET_CODE (op1) == code)
2102 {
2103 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2104 if (GET_CODE (op0) == code)
2105 {
2106 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2107 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2108 }
2109
2110 /* "a op (b op c)" becomes "(b op c) op a". */
2111 if (! swap_commutative_operands_p (op1, op0))
2112 return simplify_gen_binary (code, mode, op1, op0);
2113
2114 std::swap (op0, op1);
2115 }
2116
2117 if (GET_CODE (op0) == code)
2118 {
2119 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2120 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2121 {
2122 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2123 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2124 }
2125
2126 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2127 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2128 if (tem != 0)
2129 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2130
2131 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2132 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2133 if (tem != 0)
2134 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2135 }
2136
2137 return 0;
2138 }
2139
2140
2141 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2142 and OP1. Return 0 if no simplification is possible.
2143
2144 Don't use this for relational operations such as EQ or LT.
2145 Use simplify_relational_operation instead. */
2146 rtx
2147 simplify_binary_operation (enum rtx_code code, machine_mode mode,
2148 rtx op0, rtx op1)
2149 {
2150 rtx trueop0, trueop1;
2151 rtx tem;
2152
2153 /* Relational operations don't work here. We must know the mode
2154 of the operands in order to do the comparison correctly.
2155 Assuming a full word can give incorrect results.
2156 Consider comparing 128 with -128 in QImode. */
2157 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2158 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2159
2160 /* Make sure the constant is second. */
2161 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2162 && swap_commutative_operands_p (op0, op1))
2163 std::swap (op0, op1);
2164
2165 trueop0 = avoid_constant_pool_reference (op0);
2166 trueop1 = avoid_constant_pool_reference (op1);
2167
2168 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2169 if (tem)
2170 return tem;
2171 tem = simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2172
2173 if (tem)
2174 return tem;
2175
2176 /* If the above steps did not result in a simplification and op0 or op1
2177 were constant pool references, use the referenced constants directly. */
2178 if (trueop0 != op0 || trueop1 != op1)
2179 return simplify_gen_binary (code, mode, trueop0, trueop1);
2180
2181 return NULL_RTX;
2182 }
2183
2184 /* Subroutine of simplify_binary_operation_1 that looks for cases in
2185 which OP0 and OP1 are both vector series or vector duplicates
2186 (which are really just series with a step of 0). If so, try to
2187 form a new series by applying CODE to the bases and to the steps.
2188 Return null if no simplification is possible.
2189
2190 MODE is the mode of the operation and is known to be a vector
2191 integer mode. */
2192
2193 static rtx
2194 simplify_binary_operation_series (rtx_code code, machine_mode mode,
2195 rtx op0, rtx op1)
2196 {
2197 rtx base0, step0;
2198 if (vec_duplicate_p (op0, &base0))
2199 step0 = const0_rtx;
2200 else if (!vec_series_p (op0, &base0, &step0))
2201 return NULL_RTX;
2202
2203 rtx base1, step1;
2204 if (vec_duplicate_p (op1, &base1))
2205 step1 = const0_rtx;
2206 else if (!vec_series_p (op1, &base1, &step1))
2207 return NULL_RTX;
2208
2209 /* Only create a new series if we can simplify both parts. In other
2210 cases this isn't really a simplification, and it's not necessarily
2211 a win to replace a vector operation with a scalar operation. */
2212 scalar_mode inner_mode = GET_MODE_INNER (mode);
2213 rtx new_base = simplify_binary_operation (code, inner_mode, base0, base1);
2214 if (!new_base)
2215 return NULL_RTX;
2216
2217 rtx new_step = simplify_binary_operation (code, inner_mode, step0, step1);
2218 if (!new_step)
2219 return NULL_RTX;
2220
2221 return gen_vec_series (mode, new_base, new_step);
2222 }
2223
2224 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2225 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2226 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2227 actual constants. */
2228
2229 static rtx
2230 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
2231 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2232 {
2233 rtx tem, reversed, opleft, opright, elt0, elt1;
2234 HOST_WIDE_INT val;
2235 scalar_int_mode int_mode, inner_mode;
2236 poly_int64 offset;
2237
2238 /* Even if we can't compute a constant result,
2239 there are some cases worth simplifying. */
2240
2241 switch (code)
2242 {
2243 case PLUS:
2244 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2245 when x is NaN, infinite, or finite and nonzero. They aren't
2246 when x is -0 and the rounding mode is not towards -infinity,
2247 since (-0) + 0 is then 0. */
2248 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2249 return op0;
2250
2251 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2252 transformations are safe even for IEEE. */
2253 if (GET_CODE (op0) == NEG)
2254 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2255 else if (GET_CODE (op1) == NEG)
2256 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2257
2258 /* (~a) + 1 -> -a */
2259 if (INTEGRAL_MODE_P (mode)
2260 && GET_CODE (op0) == NOT
2261 && trueop1 == const1_rtx)
2262 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2263
2264 /* Handle both-operands-constant cases. We can only add
2265 CONST_INTs to constants since the sum of relocatable symbols
2266 can't be handled by most assemblers. Don't add CONST_INT
2267 to CONST_INT since overflow won't be computed properly if wider
2268 than HOST_BITS_PER_WIDE_INT. */
2269
2270 if ((GET_CODE (op0) == CONST
2271 || GET_CODE (op0) == SYMBOL_REF
2272 || GET_CODE (op0) == LABEL_REF)
2273 && CONST_INT_P (op1))
2274 return plus_constant (mode, op0, INTVAL (op1));
2275 else if ((GET_CODE (op1) == CONST
2276 || GET_CODE (op1) == SYMBOL_REF
2277 || GET_CODE (op1) == LABEL_REF)
2278 && CONST_INT_P (op0))
2279 return plus_constant (mode, op1, INTVAL (op0));
2280
2281 /* See if this is something like X * C - X or vice versa or
2282 if the multiplication is written as a shift. If so, we can
2283 distribute and make a new multiply, shift, or maybe just
2284 have X (if C is 2 in the example above). But don't make
2285 something more expensive than we had before. */
2286
2287 if (is_a <scalar_int_mode> (mode, &int_mode))
2288 {
2289 rtx lhs = op0, rhs = op1;
2290
2291 wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
2292 wide_int coeff1 = wi::one (GET_MODE_PRECISION (int_mode));
2293
2294 if (GET_CODE (lhs) == NEG)
2295 {
2296 coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2297 lhs = XEXP (lhs, 0);
2298 }
2299 else if (GET_CODE (lhs) == MULT
2300 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2301 {
2302 coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
2303 lhs = XEXP (lhs, 0);
2304 }
2305 else if (GET_CODE (lhs) == ASHIFT
2306 && CONST_INT_P (XEXP (lhs, 1))
2307 && INTVAL (XEXP (lhs, 1)) >= 0
2308 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
2309 {
2310 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2311 GET_MODE_PRECISION (int_mode));
2312 lhs = XEXP (lhs, 0);
2313 }
2314
2315 if (GET_CODE (rhs) == NEG)
2316 {
2317 coeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2318 rhs = XEXP (rhs, 0);
2319 }
2320 else if (GET_CODE (rhs) == MULT
2321 && CONST_INT_P (XEXP (rhs, 1)))
2322 {
2323 coeff1 = rtx_mode_t (XEXP (rhs, 1), int_mode);
2324 rhs = XEXP (rhs, 0);
2325 }
2326 else if (GET_CODE (rhs) == ASHIFT
2327 && CONST_INT_P (XEXP (rhs, 1))
2328 && INTVAL (XEXP (rhs, 1)) >= 0
2329 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
2330 {
2331 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2332 GET_MODE_PRECISION (int_mode));
2333 rhs = XEXP (rhs, 0);
2334 }
2335
2336 if (rtx_equal_p (lhs, rhs))
2337 {
2338 rtx orig = gen_rtx_PLUS (int_mode, op0, op1);
2339 rtx coeff;
2340 bool speed = optimize_function_for_speed_p (cfun);
2341
2342 coeff = immed_wide_int_const (coeff0 + coeff1, int_mode);
2343
2344 tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
2345 return (set_src_cost (tem, int_mode, speed)
2346 <= set_src_cost (orig, int_mode, speed) ? tem : 0);
2347 }
2348 }
2349
2350 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2351 if (CONST_SCALAR_INT_P (op1)
2352 && GET_CODE (op0) == XOR
2353 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2354 && mode_signbit_p (mode, op1))
2355 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2356 simplify_gen_binary (XOR, mode, op1,
2357 XEXP (op0, 1)));
2358
2359 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2360 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2361 && GET_CODE (op0) == MULT
2362 && GET_CODE (XEXP (op0, 0)) == NEG)
2363 {
2364 rtx in1, in2;
2365
2366 in1 = XEXP (XEXP (op0, 0), 0);
2367 in2 = XEXP (op0, 1);
2368 return simplify_gen_binary (MINUS, mode, op1,
2369 simplify_gen_binary (MULT, mode,
2370 in1, in2));
2371 }
2372
2373 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2374 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2375 is 1. */
2376 if (COMPARISON_P (op0)
2377 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2378 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2379 && (reversed = reversed_comparison (op0, mode)))
2380 return
2381 simplify_gen_unary (NEG, mode, reversed, mode);
2382
2383 /* If one of the operands is a PLUS or a MINUS, see if we can
2384 simplify this by the associative law.
2385 Don't use the associative law for floating point.
2386 The inaccuracy makes it nonassociative,
2387 and subtle programs can break if operations are associated. */
2388
2389 if (INTEGRAL_MODE_P (mode)
2390 && (plus_minus_operand_p (op0)
2391 || plus_minus_operand_p (op1))
2392 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2393 return tem;
2394
2395 /* Reassociate floating point addition only when the user
2396 specifies associative math operations. */
2397 if (FLOAT_MODE_P (mode)
2398 && flag_associative_math)
2399 {
2400 tem = simplify_associative_operation (code, mode, op0, op1);
2401 if (tem)
2402 return tem;
2403 }
2404
2405 /* Handle vector series. */
2406 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
2407 {
2408 tem = simplify_binary_operation_series (code, mode, op0, op1);
2409 if (tem)
2410 return tem;
2411 }
2412 break;
2413
2414 case COMPARE:
2415 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2416 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2417 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2418 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2419 {
2420 rtx xop00 = XEXP (op0, 0);
2421 rtx xop10 = XEXP (op1, 0);
2422
2423 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2424 return xop00;
2425
2426 if (REG_P (xop00) && REG_P (xop10)
2427 && REGNO (xop00) == REGNO (xop10)
2428 && GET_MODE (xop00) == mode
2429 && GET_MODE (xop10) == mode
2430 && GET_MODE_CLASS (mode) == MODE_CC)
2431 return xop00;
2432 }
2433 break;
2434
2435 case MINUS:
2436 /* We can't assume x-x is 0 even with non-IEEE floating point,
2437 but since it is zero except in very strange circumstances, we
2438 will treat it as zero with -ffinite-math-only. */
2439 if (rtx_equal_p (trueop0, trueop1)
2440 && ! side_effects_p (op0)
2441 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2442 return CONST0_RTX (mode);
2443
2444 /* Change subtraction from zero into negation. (0 - x) is the
2445 same as -x when x is NaN, infinite, or finite and nonzero.
2446 But if the mode has signed zeros, and does not round towards
2447 -infinity, then 0 - 0 is 0, not -0. */
2448 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2449 return simplify_gen_unary (NEG, mode, op1, mode);
2450
2451 /* (-1 - a) is ~a, unless the expression contains symbolic
2452 constants, in which case not retaining additions and
2453 subtractions could cause invalid assembly to be produced. */
2454 if (trueop0 == constm1_rtx
2455 && !contains_symbolic_reference_p (op1))
2456 return simplify_gen_unary (NOT, mode, op1, mode);
2457
2458 /* Subtracting 0 has no effect unless the mode has signed zeros
2459 and supports rounding towards -infinity. In such a case,
2460 0 - 0 is -0. */
2461 if (!(HONOR_SIGNED_ZEROS (mode)
2462 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2463 && trueop1 == CONST0_RTX (mode))
2464 return op0;
2465
2466 /* See if this is something like X * C - X or vice versa or
2467 if the multiplication is written as a shift. If so, we can
2468 distribute and make a new multiply, shift, or maybe just
2469 have X (if C is 2 in the example above). But don't make
2470 something more expensive than we had before. */
2471
2472 if (is_a <scalar_int_mode> (mode, &int_mode))
2473 {
2474 rtx lhs = op0, rhs = op1;
2475
2476 wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
2477 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2478
2479 if (GET_CODE (lhs) == NEG)
2480 {
2481 coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2482 lhs = XEXP (lhs, 0);
2483 }
2484 else if (GET_CODE (lhs) == MULT
2485 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2486 {
2487 coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
2488 lhs = XEXP (lhs, 0);
2489 }
2490 else if (GET_CODE (lhs) == ASHIFT
2491 && CONST_INT_P (XEXP (lhs, 1))
2492 && INTVAL (XEXP (lhs, 1)) >= 0
2493 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
2494 {
2495 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2496 GET_MODE_PRECISION (int_mode));
2497 lhs = XEXP (lhs, 0);
2498 }
2499
2500 if (GET_CODE (rhs) == NEG)
2501 {
2502 negcoeff1 = wi::one (GET_MODE_PRECISION (int_mode));
2503 rhs = XEXP (rhs, 0);
2504 }
2505 else if (GET_CODE (rhs) == MULT
2506 && CONST_INT_P (XEXP (rhs, 1)))
2507 {
2508 negcoeff1 = wi::neg (rtx_mode_t (XEXP (rhs, 1), int_mode));
2509 rhs = XEXP (rhs, 0);
2510 }
2511 else if (GET_CODE (rhs) == ASHIFT
2512 && CONST_INT_P (XEXP (rhs, 1))
2513 && INTVAL (XEXP (rhs, 1)) >= 0
2514 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
2515 {
2516 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2517 GET_MODE_PRECISION (int_mode));
2518 negcoeff1 = -negcoeff1;
2519 rhs = XEXP (rhs, 0);
2520 }
2521
2522 if (rtx_equal_p (lhs, rhs))
2523 {
2524 rtx orig = gen_rtx_MINUS (int_mode, op0, op1);
2525 rtx coeff;
2526 bool speed = optimize_function_for_speed_p (cfun);
2527
2528 coeff = immed_wide_int_const (coeff0 + negcoeff1, int_mode);
2529
2530 tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
2531 return (set_src_cost (tem, int_mode, speed)
2532 <= set_src_cost (orig, int_mode, speed) ? tem : 0);
2533 }
2534 }
2535
2536 /* (a - (-b)) -> (a + b). True even for IEEE. */
2537 if (GET_CODE (op1) == NEG)
2538 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2539
2540 /* (-x - c) may be simplified as (-c - x). */
2541 if (GET_CODE (op0) == NEG
2542 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2543 {
2544 tem = simplify_unary_operation (NEG, mode, op1, mode);
2545 if (tem)
2546 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2547 }
2548
2549 if ((GET_CODE (op0) == CONST
2550 || GET_CODE (op0) == SYMBOL_REF
2551 || GET_CODE (op0) == LABEL_REF)
2552 && poly_int_rtx_p (op1, &offset))
2553 return plus_constant (mode, op0, trunc_int_for_mode (-offset, mode));
2554
2555 /* Don't let a relocatable value get a negative coeff. */
2556 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2557 return simplify_gen_binary (PLUS, mode,
2558 op0,
2559 neg_const_int (mode, op1));
2560
2561 /* (x - (x & y)) -> (x & ~y) */
2562 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2563 {
2564 if (rtx_equal_p (op0, XEXP (op1, 0)))
2565 {
2566 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2567 GET_MODE (XEXP (op1, 1)));
2568 return simplify_gen_binary (AND, mode, op0, tem);
2569 }
2570 if (rtx_equal_p (op0, XEXP (op1, 1)))
2571 {
2572 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2573 GET_MODE (XEXP (op1, 0)));
2574 return simplify_gen_binary (AND, mode, op0, tem);
2575 }
2576 }
2577
2578 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2579 by reversing the comparison code if valid. */
2580 if (STORE_FLAG_VALUE == 1
2581 && trueop0 == const1_rtx
2582 && COMPARISON_P (op1)
2583 && (reversed = reversed_comparison (op1, mode)))
2584 return reversed;
2585
2586 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2587 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2588 && GET_CODE (op1) == MULT
2589 && GET_CODE (XEXP (op1, 0)) == NEG)
2590 {
2591 rtx in1, in2;
2592
2593 in1 = XEXP (XEXP (op1, 0), 0);
2594 in2 = XEXP (op1, 1);
2595 return simplify_gen_binary (PLUS, mode,
2596 simplify_gen_binary (MULT, mode,
2597 in1, in2),
2598 op0);
2599 }
2600
2601 /* Canonicalize (minus (neg A) (mult B C)) to
2602 (minus (mult (neg B) C) A). */
2603 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2604 && GET_CODE (op1) == MULT
2605 && GET_CODE (op0) == NEG)
2606 {
2607 rtx in1, in2;
2608
2609 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2610 in2 = XEXP (op1, 1);
2611 return simplify_gen_binary (MINUS, mode,
2612 simplify_gen_binary (MULT, mode,
2613 in1, in2),
2614 XEXP (op0, 0));
2615 }
2616
2617 /* If one of the operands is a PLUS or a MINUS, see if we can
2618 simplify this by the associative law. This will, for example,
2619 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2620 Don't use the associative law for floating point.
2621 The inaccuracy makes it nonassociative,
2622 and subtle programs can break if operations are associated. */
2623
2624 if (INTEGRAL_MODE_P (mode)
2625 && (plus_minus_operand_p (op0)
2626 || plus_minus_operand_p (op1))
2627 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2628 return tem;
2629
2630 /* Handle vector series. */
2631 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
2632 {
2633 tem = simplify_binary_operation_series (code, mode, op0, op1);
2634 if (tem)
2635 return tem;
2636 }
2637 break;
2638
2639 case MULT:
2640 if (trueop1 == constm1_rtx)
2641 return simplify_gen_unary (NEG, mode, op0, mode);
2642
2643 if (GET_CODE (op0) == NEG)
2644 {
2645 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2646 /* If op1 is a MULT as well and simplify_unary_operation
2647 just moved the NEG to the second operand, simplify_gen_binary
2648 below could through simplify_associative_operation move
2649 the NEG around again and recurse endlessly. */
2650 if (temp
2651 && GET_CODE (op1) == MULT
2652 && GET_CODE (temp) == MULT
2653 && XEXP (op1, 0) == XEXP (temp, 0)
2654 && GET_CODE (XEXP (temp, 1)) == NEG
2655 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2656 temp = NULL_RTX;
2657 if (temp)
2658 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2659 }
2660 if (GET_CODE (op1) == NEG)
2661 {
2662 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2663 /* If op0 is a MULT as well and simplify_unary_operation
2664 just moved the NEG to the second operand, simplify_gen_binary
2665 below could through simplify_associative_operation move
2666 the NEG around again and recurse endlessly. */
2667 if (temp
2668 && GET_CODE (op0) == MULT
2669 && GET_CODE (temp) == MULT
2670 && XEXP (op0, 0) == XEXP (temp, 0)
2671 && GET_CODE (XEXP (temp, 1)) == NEG
2672 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2673 temp = NULL_RTX;
2674 if (temp)
2675 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2676 }
2677
2678 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2679 x is NaN, since x * 0 is then also NaN. Nor is it valid
2680 when the mode has signed zeros, since multiplying a negative
2681 number by 0 will give -0, not 0. */
2682 if (!HONOR_NANS (mode)
2683 && !HONOR_SIGNED_ZEROS (mode)
2684 && trueop1 == CONST0_RTX (mode)
2685 && ! side_effects_p (op0))
2686 return op1;
2687
2688 /* In IEEE floating point, x*1 is not equivalent to x for
2689 signalling NaNs. */
2690 if (!HONOR_SNANS (mode)
2691 && trueop1 == CONST1_RTX (mode))
2692 return op0;
2693
2694 /* Convert multiply by constant power of two into shift. */
2695 if (CONST_SCALAR_INT_P (trueop1))
2696 {
2697 val = wi::exact_log2 (rtx_mode_t (trueop1, mode));
2698 if (val >= 0)
2699 return simplify_gen_binary (ASHIFT, mode, op0,
2700 gen_int_shift_amount (mode, val));
2701 }
2702
2703 /* x*2 is x+x and x*(-1) is -x */
2704 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2705 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2706 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2707 && GET_MODE (op0) == mode)
2708 {
2709 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
2710
2711 if (real_equal (d1, &dconst2))
2712 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2713
2714 if (!HONOR_SNANS (mode)
2715 && real_equal (d1, &dconstm1))
2716 return simplify_gen_unary (NEG, mode, op0, mode);
2717 }
2718
2719 /* Optimize -x * -x as x * x. */
2720 if (FLOAT_MODE_P (mode)
2721 && GET_CODE (op0) == NEG
2722 && GET_CODE (op1) == NEG
2723 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2724 && !side_effects_p (XEXP (op0, 0)))
2725 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2726
2727 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2728 if (SCALAR_FLOAT_MODE_P (mode)
2729 && GET_CODE (op0) == ABS
2730 && GET_CODE (op1) == ABS
2731 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2732 && !side_effects_p (XEXP (op0, 0)))
2733 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2734
2735 /* Reassociate multiplication, but for floating point MULTs
2736 only when the user specifies unsafe math optimizations. */
2737 if (! FLOAT_MODE_P (mode)
2738 || flag_unsafe_math_optimizations)
2739 {
2740 tem = simplify_associative_operation (code, mode, op0, op1);
2741 if (tem)
2742 return tem;
2743 }
2744 break;
2745
2746 case IOR:
2747 if (trueop1 == CONST0_RTX (mode))
2748 return op0;
2749 if (INTEGRAL_MODE_P (mode)
2750 && trueop1 == CONSTM1_RTX (mode)
2751 && !side_effects_p (op0))
2752 return op1;
2753 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2754 return op0;
2755 /* A | (~A) -> -1 */
2756 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2757 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2758 && ! side_effects_p (op0)
2759 && SCALAR_INT_MODE_P (mode))
2760 return constm1_rtx;
2761
2762 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2763 if (CONST_INT_P (op1)
2764 && HWI_COMPUTABLE_MODE_P (mode)
2765 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2766 && !side_effects_p (op0))
2767 return op1;
2768
2769 /* Canonicalize (X & C1) | C2. */
2770 if (GET_CODE (op0) == AND
2771 && CONST_INT_P (trueop1)
2772 && CONST_INT_P (XEXP (op0, 1)))
2773 {
2774 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2775 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2776 HOST_WIDE_INT c2 = INTVAL (trueop1);
2777
2778 /* If (C1&C2) == C1, then (X&C1)|C2 becomes C2. */
2779 if ((c1 & c2) == c1
2780 && !side_effects_p (XEXP (op0, 0)))
2781 return trueop1;
2782
2783 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2784 if (((c1|c2) & mask) == mask)
2785 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2786 }
2787
2788 /* Convert (A & B) | A to A. */
2789 if (GET_CODE (op0) == AND
2790 && (rtx_equal_p (XEXP (op0, 0), op1)
2791 || rtx_equal_p (XEXP (op0, 1), op1))
2792 && ! side_effects_p (XEXP (op0, 0))
2793 && ! side_effects_p (XEXP (op0, 1)))
2794 return op1;
2795
2796 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2797 mode size to (rotate A CX). */
2798
2799 if (GET_CODE (op1) == ASHIFT
2800 || GET_CODE (op1) == SUBREG)
2801 {
2802 opleft = op1;
2803 opright = op0;
2804 }
2805 else
2806 {
2807 opright = op1;
2808 opleft = op0;
2809 }
2810
2811 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2812 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2813 && CONST_INT_P (XEXP (opleft, 1))
2814 && CONST_INT_P (XEXP (opright, 1))
2815 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2816 == GET_MODE_UNIT_PRECISION (mode)))
2817 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2818
2819 /* Same, but for ashift that has been "simplified" to a wider mode
2820 by simplify_shift_const. */
2821
2822 if (GET_CODE (opleft) == SUBREG
2823 && is_a <scalar_int_mode> (mode, &int_mode)
2824 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (opleft)),
2825 &inner_mode)
2826 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2827 && GET_CODE (opright) == LSHIFTRT
2828 && GET_CODE (XEXP (opright, 0)) == SUBREG
2829 && known_eq (SUBREG_BYTE (opleft), SUBREG_BYTE (XEXP (opright, 0)))
2830 && GET_MODE_SIZE (int_mode) < GET_MODE_SIZE (inner_mode)
2831 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2832 SUBREG_REG (XEXP (opright, 0)))
2833 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2834 && CONST_INT_P (XEXP (opright, 1))
2835 && (INTVAL (XEXP (SUBREG_REG (opleft), 1))
2836 + INTVAL (XEXP (opright, 1))
2837 == GET_MODE_PRECISION (int_mode)))
2838 return gen_rtx_ROTATE (int_mode, XEXP (opright, 0),
2839 XEXP (SUBREG_REG (opleft), 1));
2840
2841 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2842 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2843 the PLUS does not affect any of the bits in OP1: then we can do
2844 the IOR as a PLUS and we can associate. This is valid if OP1
2845 can be safely shifted left C bits. */
2846 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2847 && GET_CODE (XEXP (op0, 0)) == PLUS
2848 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2849 && CONST_INT_P (XEXP (op0, 1))
2850 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2851 {
2852 int count = INTVAL (XEXP (op0, 1));
2853 HOST_WIDE_INT mask = UINTVAL (trueop1) << count;
2854
2855 if (mask >> count == INTVAL (trueop1)
2856 && trunc_int_for_mode (mask, mode) == mask
2857 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2858 return simplify_gen_binary (ASHIFTRT, mode,
2859 plus_constant (mode, XEXP (op0, 0),
2860 mask),
2861 XEXP (op0, 1));
2862 }
2863
2864 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2865 if (tem)
2866 return tem;
2867
2868 tem = simplify_associative_operation (code, mode, op0, op1);
2869 if (tem)
2870 return tem;
2871 break;
2872
2873 case XOR:
2874 if (trueop1 == CONST0_RTX (mode))
2875 return op0;
2876 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2877 return simplify_gen_unary (NOT, mode, op0, mode);
2878 if (rtx_equal_p (trueop0, trueop1)
2879 && ! side_effects_p (op0)
2880 && GET_MODE_CLASS (mode) != MODE_CC)
2881 return CONST0_RTX (mode);
2882
2883 /* Canonicalize XOR of the most significant bit to PLUS. */
2884 if (CONST_SCALAR_INT_P (op1)
2885 && mode_signbit_p (mode, op1))
2886 return simplify_gen_binary (PLUS, mode, op0, op1);
2887 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2888 if (CONST_SCALAR_INT_P (op1)
2889 && GET_CODE (op0) == PLUS
2890 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2891 && mode_signbit_p (mode, XEXP (op0, 1)))
2892 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2893 simplify_gen_binary (XOR, mode, op1,
2894 XEXP (op0, 1)));
2895
2896 /* If we are XORing two things that have no bits in common,
2897 convert them into an IOR. This helps to detect rotation encoded
2898 using those methods and possibly other simplifications. */
2899
2900 if (HWI_COMPUTABLE_MODE_P (mode)
2901 && (nonzero_bits (op0, mode)
2902 & nonzero_bits (op1, mode)) == 0)
2903 return (simplify_gen_binary (IOR, mode, op0, op1));
2904
2905 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2906 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2907 (NOT y). */
2908 {
2909 int num_negated = 0;
2910
2911 if (GET_CODE (op0) == NOT)
2912 num_negated++, op0 = XEXP (op0, 0);
2913 if (GET_CODE (op1) == NOT)
2914 num_negated++, op1 = XEXP (op1, 0);
2915
2916 if (num_negated == 2)
2917 return simplify_gen_binary (XOR, mode, op0, op1);
2918 else if (num_negated == 1)
2919 return simplify_gen_unary (NOT, mode,
2920 simplify_gen_binary (XOR, mode, op0, op1),
2921 mode);
2922 }
2923
2924 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2925 correspond to a machine insn or result in further simplifications
2926 if B is a constant. */
2927
2928 if (GET_CODE (op0) == AND
2929 && rtx_equal_p (XEXP (op0, 1), op1)
2930 && ! side_effects_p (op1))
2931 return simplify_gen_binary (AND, mode,
2932 simplify_gen_unary (NOT, mode,
2933 XEXP (op0, 0), mode),
2934 op1);
2935
2936 else if (GET_CODE (op0) == AND
2937 && rtx_equal_p (XEXP (op0, 0), op1)
2938 && ! side_effects_p (op1))
2939 return simplify_gen_binary (AND, mode,
2940 simplify_gen_unary (NOT, mode,
2941 XEXP (op0, 1), mode),
2942 op1);
2943
2944 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2945 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2946 out bits inverted twice and not set by C. Similarly, given
2947 (xor (and (xor A B) C) D), simplify without inverting C in
2948 the xor operand: (xor (and A C) (B&C)^D).
2949 */
2950 else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
2951 && GET_CODE (XEXP (op0, 0)) == XOR
2952 && CONST_INT_P (op1)
2953 && CONST_INT_P (XEXP (op0, 1))
2954 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
2955 {
2956 enum rtx_code op = GET_CODE (op0);
2957 rtx a = XEXP (XEXP (op0, 0), 0);
2958 rtx b = XEXP (XEXP (op0, 0), 1);
2959 rtx c = XEXP (op0, 1);
2960 rtx d = op1;
2961 HOST_WIDE_INT bval = INTVAL (b);
2962 HOST_WIDE_INT cval = INTVAL (c);
2963 HOST_WIDE_INT dval = INTVAL (d);
2964 HOST_WIDE_INT xcval;
2965
2966 if (op == IOR)
2967 xcval = ~cval;
2968 else
2969 xcval = cval;
2970
2971 return simplify_gen_binary (XOR, mode,
2972 simplify_gen_binary (op, mode, a, c),
2973 gen_int_mode ((bval & xcval) ^ dval,
2974 mode));
2975 }
2976
2977 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2978 we can transform like this:
2979 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2980 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2981 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2982 Attempt a few simplifications when B and C are both constants. */
2983 if (GET_CODE (op0) == AND
2984 && CONST_INT_P (op1)
2985 && CONST_INT_P (XEXP (op0, 1)))
2986 {
2987 rtx a = XEXP (op0, 0);
2988 rtx b = XEXP (op0, 1);
2989 rtx c = op1;
2990 HOST_WIDE_INT bval = INTVAL (b);
2991 HOST_WIDE_INT cval = INTVAL (c);
2992
2993 /* Instead of computing ~A&C, we compute its negated value,
2994 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2995 optimize for sure. If it does not simplify, we still try
2996 to compute ~A&C below, but since that always allocates
2997 RTL, we don't try that before committing to returning a
2998 simplified expression. */
2999 rtx n_na_c = simplify_binary_operation (IOR, mode, a,
3000 GEN_INT (~cval));
3001
3002 if ((~cval & bval) == 0)
3003 {
3004 rtx na_c = NULL_RTX;
3005 if (n_na_c)
3006 na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
3007 else
3008 {
3009 /* If ~A does not simplify, don't bother: we don't
3010 want to simplify 2 operations into 3, and if na_c
3011 were to simplify with na, n_na_c would have
3012 simplified as well. */
3013 rtx na = simplify_unary_operation (NOT, mode, a, mode);
3014 if (na)
3015 na_c = simplify_gen_binary (AND, mode, na, c);
3016 }
3017
3018 /* Try to simplify ~A&C | ~B&C. */
3019 if (na_c != NULL_RTX)
3020 return simplify_gen_binary (IOR, mode, na_c,
3021 gen_int_mode (~bval & cval, mode));
3022 }
3023 else
3024 {
3025 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
3026 if (n_na_c == CONSTM1_RTX (mode))
3027 {
3028 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
3029 gen_int_mode (~cval & bval,
3030 mode));
3031 return simplify_gen_binary (IOR, mode, a_nc_b,
3032 gen_int_mode (~bval & cval,
3033 mode));
3034 }
3035 }
3036 }
3037
3038 /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
3039 do (ior (and A ~C) (and B C)) which is a machine instruction on some
3040 machines, and also has shorter instruction path length. */
3041 if (GET_CODE (op0) == AND
3042 && GET_CODE (XEXP (op0, 0)) == XOR
3043 && CONST_INT_P (XEXP (op0, 1))
3044 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), trueop1))
3045 {
3046 rtx a = trueop1;
3047 rtx b = XEXP (XEXP (op0, 0), 1);
3048 rtx c = XEXP (op0, 1);
3049 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
3050 rtx a_nc = simplify_gen_binary (AND, mode, a, nc);
3051 rtx bc = simplify_gen_binary (AND, mode, b, c);
3052 return simplify_gen_binary (IOR, mode, a_nc, bc);
3053 }
3054 /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C)) */
3055 else if (GET_CODE (op0) == AND
3056 && GET_CODE (XEXP (op0, 0)) == XOR
3057 && CONST_INT_P (XEXP (op0, 1))
3058 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), trueop1))
3059 {
3060 rtx a = XEXP (XEXP (op0, 0), 0);
3061 rtx b = trueop1;
3062 rtx c = XEXP (op0, 1);
3063 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
3064 rtx b_nc = simplify_gen_binary (AND, mode, b, nc);
3065 rtx ac = simplify_gen_binary (AND, mode, a, c);
3066 return simplify_gen_binary (IOR, mode, ac, b_nc);
3067 }
3068
3069 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
3070 comparison if STORE_FLAG_VALUE is 1. */
3071 if (STORE_FLAG_VALUE == 1
3072 && trueop1 == const1_rtx
3073 && COMPARISON_P (op0)
3074 && (reversed = reversed_comparison (op0, mode)))
3075 return reversed;
3076
3077 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
3078 is (lt foo (const_int 0)), so we can perform the above
3079 simplification if STORE_FLAG_VALUE is 1. */
3080
3081 if (is_a <scalar_int_mode> (mode, &int_mode)
3082 && STORE_FLAG_VALUE == 1
3083 && trueop1 == const1_rtx
3084 && GET_CODE (op0) == LSHIFTRT
3085 && CONST_INT_P (XEXP (op0, 1))
3086 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (int_mode) - 1)
3087 return gen_rtx_GE (int_mode, XEXP (op0, 0), const0_rtx);
3088
3089 /* (xor (comparison foo bar) (const_int sign-bit))
3090 when STORE_FLAG_VALUE is the sign bit. */
3091 if (is_a <scalar_int_mode> (mode, &int_mode)
3092 && val_signbit_p (int_mode, STORE_FLAG_VALUE)
3093 && trueop1 == const_true_rtx
3094 && COMPARISON_P (op0)
3095 && (reversed = reversed_comparison (op0, int_mode)))
3096 return reversed;
3097
3098 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3099 if (tem)
3100 return tem;
3101
3102 tem = simplify_associative_operation (code, mode, op0, op1);
3103 if (tem)
3104 return tem;
3105 break;
3106
3107 case AND:
3108 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3109 return trueop1;
3110 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
3111 return op0;
3112 if (HWI_COMPUTABLE_MODE_P (mode))
3113 {
3114 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
3115 HOST_WIDE_INT nzop1;
3116 if (CONST_INT_P (trueop1))
3117 {
3118 HOST_WIDE_INT val1 = INTVAL (trueop1);
3119 /* If we are turning off bits already known off in OP0, we need
3120 not do an AND. */
3121 if ((nzop0 & ~val1) == 0)
3122 return op0;
3123 }
3124 nzop1 = nonzero_bits (trueop1, mode);
3125 /* If we are clearing all the nonzero bits, the result is zero. */
3126 if ((nzop1 & nzop0) == 0
3127 && !side_effects_p (op0) && !side_effects_p (op1))
3128 return CONST0_RTX (mode);
3129 }
3130 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
3131 && GET_MODE_CLASS (mode) != MODE_CC)
3132 return op0;
3133 /* A & (~A) -> 0 */
3134 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3135 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3136 && ! side_effects_p (op0)
3137 && GET_MODE_CLASS (mode) != MODE_CC)
3138 return CONST0_RTX (mode);
3139
3140 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3141 there are no nonzero bits of C outside of X's mode. */
3142 if ((GET_CODE (op0) == SIGN_EXTEND
3143 || GET_CODE (op0) == ZERO_EXTEND)
3144 && CONST_INT_P (trueop1)
3145 && HWI_COMPUTABLE_MODE_P (mode)
3146 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
3147 & UINTVAL (trueop1)) == 0)
3148 {
3149 machine_mode imode = GET_MODE (XEXP (op0, 0));
3150 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
3151 gen_int_mode (INTVAL (trueop1),
3152 imode));
3153 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
3154 }
3155
3156 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3157 we might be able to further simplify the AND with X and potentially
3158 remove the truncation altogether. */
3159 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
3160 {
3161 rtx x = XEXP (op0, 0);
3162 machine_mode xmode = GET_MODE (x);
3163 tem = simplify_gen_binary (AND, xmode, x,
3164 gen_int_mode (INTVAL (trueop1), xmode));
3165 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
3166 }
3167
3168 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3169 if (GET_CODE (op0) == IOR
3170 && CONST_INT_P (trueop1)
3171 && CONST_INT_P (XEXP (op0, 1)))
3172 {
3173 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
3174 return simplify_gen_binary (IOR, mode,
3175 simplify_gen_binary (AND, mode,
3176 XEXP (op0, 0), op1),
3177 gen_int_mode (tmp, mode));
3178 }
3179
3180 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3181 insn (and may simplify more). */
3182 if (GET_CODE (op0) == XOR
3183 && rtx_equal_p (XEXP (op0, 0), op1)
3184 && ! side_effects_p (op1))
3185 return simplify_gen_binary (AND, mode,
3186 simplify_gen_unary (NOT, mode,
3187 XEXP (op0, 1), mode),
3188 op1);
3189
3190 if (GET_CODE (op0) == XOR
3191 && rtx_equal_p (XEXP (op0, 1), op1)
3192 && ! side_effects_p (op1))
3193 return simplify_gen_binary (AND, mode,
3194 simplify_gen_unary (NOT, mode,
3195 XEXP (op0, 0), mode),
3196 op1);
3197
3198 /* Similarly for (~(A ^ B)) & A. */
3199 if (GET_CODE (op0) == NOT
3200 && GET_CODE (XEXP (op0, 0)) == XOR
3201 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3202 && ! side_effects_p (op1))
3203 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3204
3205 if (GET_CODE (op0) == NOT
3206 && GET_CODE (XEXP (op0, 0)) == XOR
3207 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3208 && ! side_effects_p (op1))
3209 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3210
3211 /* Convert (A | B) & A to A. */
3212 if (GET_CODE (op0) == IOR
3213 && (rtx_equal_p (XEXP (op0, 0), op1)
3214 || rtx_equal_p (XEXP (op0, 1), op1))
3215 && ! side_effects_p (XEXP (op0, 0))
3216 && ! side_effects_p (XEXP (op0, 1)))
3217 return op1;
3218
3219 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3220 ((A & N) + B) & M -> (A + B) & M
3221 Similarly if (N & M) == 0,
3222 ((A | N) + B) & M -> (A + B) & M
3223 and for - instead of + and/or ^ instead of |.
3224 Also, if (N & M) == 0, then
3225 (A +- N) & M -> A & M. */
3226 if (CONST_INT_P (trueop1)
3227 && HWI_COMPUTABLE_MODE_P (mode)
3228 && ~UINTVAL (trueop1)
3229 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3230 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3231 {
3232 rtx pmop[2];
3233 int which;
3234
3235 pmop[0] = XEXP (op0, 0);
3236 pmop[1] = XEXP (op0, 1);
3237
3238 if (CONST_INT_P (pmop[1])
3239 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3240 return simplify_gen_binary (AND, mode, pmop[0], op1);
3241
3242 for (which = 0; which < 2; which++)
3243 {
3244 tem = pmop[which];
3245 switch (GET_CODE (tem))
3246 {
3247 case AND:
3248 if (CONST_INT_P (XEXP (tem, 1))
3249 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3250 == UINTVAL (trueop1))
3251 pmop[which] = XEXP (tem, 0);
3252 break;
3253 case IOR:
3254 case XOR:
3255 if (CONST_INT_P (XEXP (tem, 1))
3256 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3257 pmop[which] = XEXP (tem, 0);
3258 break;
3259 default:
3260 break;
3261 }
3262 }
3263
3264 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3265 {
3266 tem = simplify_gen_binary (GET_CODE (op0), mode,
3267 pmop[0], pmop[1]);
3268 return simplify_gen_binary (code, mode, tem, op1);
3269 }
3270 }
3271
3272 /* (and X (ior (not X) Y) -> (and X Y) */
3273 if (GET_CODE (op1) == IOR
3274 && GET_CODE (XEXP (op1, 0)) == NOT
3275 && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
3276 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3277
3278 /* (and (ior (not X) Y) X) -> (and X Y) */
3279 if (GET_CODE (op0) == IOR
3280 && GET_CODE (XEXP (op0, 0)) == NOT
3281 && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
3282 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3283
3284 /* (and X (ior Y (not X)) -> (and X Y) */
3285 if (GET_CODE (op1) == IOR
3286 && GET_CODE (XEXP (op1, 1)) == NOT
3287 && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3288 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3289
3290 /* (and (ior Y (not X)) X) -> (and X Y) */
3291 if (GET_CODE (op0) == IOR
3292 && GET_CODE (XEXP (op0, 1)) == NOT
3293 && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3294 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3295
3296 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3297 if (tem)
3298 return tem;
3299
3300 tem = simplify_associative_operation (code, mode, op0, op1);
3301 if (tem)
3302 return tem;
3303 break;
3304
3305 case UDIV:
3306 /* 0/x is 0 (or x&0 if x has side-effects). */
3307 if (trueop0 == CONST0_RTX (mode)
3308 && !cfun->can_throw_non_call_exceptions)
3309 {
3310 if (side_effects_p (op1))
3311 return simplify_gen_binary (AND, mode, op1, trueop0);
3312 return trueop0;
3313 }
3314 /* x/1 is x. */
3315 if (trueop1 == CONST1_RTX (mode))
3316 {
3317 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3318 if (tem)
3319 return tem;
3320 }
3321 /* Convert divide by power of two into shift. */
3322 if (CONST_INT_P (trueop1)
3323 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3324 return simplify_gen_binary (LSHIFTRT, mode, op0,
3325 gen_int_shift_amount (mode, val));
3326 break;
3327
3328 case DIV:
3329 /* Handle floating point and integers separately. */
3330 if (SCALAR_FLOAT_MODE_P (mode))
3331 {
3332 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3333 safe for modes with NaNs, since 0.0 / 0.0 will then be
3334 NaN rather than 0.0. Nor is it safe for modes with signed
3335 zeros, since dividing 0 by a negative number gives -0.0 */
3336 if (trueop0 == CONST0_RTX (mode)
3337 && !HONOR_NANS (mode)
3338 && !HONOR_SIGNED_ZEROS (mode)
3339 && ! side_effects_p (op1))
3340 return op0;
3341 /* x/1.0 is x. */
3342 if (trueop1 == CONST1_RTX (mode)
3343 && !HONOR_SNANS (mode))
3344 return op0;
3345
3346 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3347 && trueop1 != CONST0_RTX (mode))
3348 {
3349 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
3350
3351 /* x/-1.0 is -x. */
3352 if (real_equal (d1, &dconstm1)
3353 && !HONOR_SNANS (mode))
3354 return simplify_gen_unary (NEG, mode, op0, mode);
3355
3356 /* Change FP division by a constant into multiplication.
3357 Only do this with -freciprocal-math. */
3358 if (flag_reciprocal_math
3359 && !real_equal (d1, &dconst0))
3360 {
3361 REAL_VALUE_TYPE d;
3362 real_arithmetic (&d, RDIV_EXPR, &dconst1, d1);
3363 tem = const_double_from_real_value (d, mode);
3364 return simplify_gen_binary (MULT, mode, op0, tem);
3365 }
3366 }
3367 }
3368 else if (SCALAR_INT_MODE_P (mode))
3369 {
3370 /* 0/x is 0 (or x&0 if x has side-effects). */
3371 if (trueop0 == CONST0_RTX (mode)
3372 && !cfun->can_throw_non_call_exceptions)
3373 {
3374 if (side_effects_p (op1))
3375 return simplify_gen_binary (AND, mode, op1, trueop0);
3376 return trueop0;
3377 }
3378 /* x/1 is x. */
3379 if (trueop1 == CONST1_RTX (mode))
3380 {
3381 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3382 if (tem)
3383 return tem;
3384 }
3385 /* x/-1 is -x. */
3386 if (trueop1 == constm1_rtx)
3387 {
3388 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3389 if (x)
3390 return simplify_gen_unary (NEG, mode, x, mode);
3391 }
3392 }
3393 break;
3394
3395 case UMOD:
3396 /* 0%x is 0 (or x&0 if x has side-effects). */
3397 if (trueop0 == CONST0_RTX (mode))
3398 {
3399 if (side_effects_p (op1))
3400 return simplify_gen_binary (AND, mode, op1, trueop0);
3401 return trueop0;
3402 }
3403 /* x%1 is 0 (of x&0 if x has side-effects). */
3404 if (trueop1 == CONST1_RTX (mode))
3405 {
3406 if (side_effects_p (op0))
3407 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3408 return CONST0_RTX (mode);
3409 }
3410 /* Implement modulus by power of two as AND. */
3411 if (CONST_INT_P (trueop1)
3412 && exact_log2 (UINTVAL (trueop1)) > 0)
3413 return simplify_gen_binary (AND, mode, op0,
3414 gen_int_mode (INTVAL (op1) - 1, mode));
3415 break;
3416
3417 case MOD:
3418 /* 0%x is 0 (or x&0 if x has side-effects). */
3419 if (trueop0 == CONST0_RTX (mode))
3420 {
3421 if (side_effects_p (op1))
3422 return simplify_gen_binary (AND, mode, op1, trueop0);
3423 return trueop0;
3424 }
3425 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3426 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3427 {
3428 if (side_effects_p (op0))
3429 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3430 return CONST0_RTX (mode);
3431 }
3432 break;
3433
3434 case ROTATERT:
3435 case ROTATE:
3436 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3437 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3438 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3439 amount instead. */
3440 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3441 if (CONST_INT_P (trueop1)
3442 && IN_RANGE (INTVAL (trueop1),
3443 GET_MODE_UNIT_PRECISION (mode) / 2 + (code == ROTATE),
3444 GET_MODE_UNIT_PRECISION (mode) - 1))
3445 {
3446 int new_amount = GET_MODE_UNIT_PRECISION (mode) - INTVAL (trueop1);
3447 rtx new_amount_rtx = gen_int_shift_amount (mode, new_amount);
3448 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3449 mode, op0, new_amount_rtx);
3450 }
3451 #endif
3452 /* FALLTHRU */
3453 case ASHIFTRT:
3454 if (trueop1 == CONST0_RTX (mode))
3455 return op0;
3456 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3457 return op0;
3458 /* Rotating ~0 always results in ~0. */
3459 if (CONST_INT_P (trueop0)
3460 && HWI_COMPUTABLE_MODE_P (mode)
3461 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3462 && ! side_effects_p (op1))
3463 return op0;
3464
3465 canonicalize_shift:
3466 /* Given:
3467 scalar modes M1, M2
3468 scalar constants c1, c2
3469 size (M2) > size (M1)
3470 c1 == size (M2) - size (M1)
3471 optimize:
3472 ([a|l]shiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3473 <low_part>)
3474 (const_int <c2>))
3475 to:
3476 (subreg:M1 ([a|l]shiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3477 <low_part>). */
3478 if ((code == ASHIFTRT || code == LSHIFTRT)
3479 && is_a <scalar_int_mode> (mode, &int_mode)
3480 && SUBREG_P (op0)
3481 && CONST_INT_P (op1)
3482 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3483 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op0)),
3484 &inner_mode)
3485 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3486 && GET_MODE_BITSIZE (inner_mode) > GET_MODE_BITSIZE (int_mode)
3487 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3488 == GET_MODE_BITSIZE (inner_mode) - GET_MODE_BITSIZE (int_mode))
3489 && subreg_lowpart_p (op0))
3490 {
3491 rtx tmp = gen_int_shift_amount
3492 (inner_mode, INTVAL (XEXP (SUBREG_REG (op0), 1)) + INTVAL (op1));
3493 tmp = simplify_gen_binary (code, inner_mode,
3494 XEXP (SUBREG_REG (op0), 0),
3495 tmp);
3496 return lowpart_subreg (int_mode, tmp, inner_mode);
3497 }
3498
3499 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3500 {
3501 val = INTVAL (op1) & (GET_MODE_UNIT_PRECISION (mode) - 1);
3502 if (val != INTVAL (op1))
3503 return simplify_gen_binary (code, mode, op0,
3504 gen_int_shift_amount (mode, val));
3505 }
3506 break;
3507
3508 case ASHIFT:
3509 case SS_ASHIFT:
3510 case US_ASHIFT:
3511 if (trueop1 == CONST0_RTX (mode))
3512 return op0;
3513 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3514 return op0;
3515 goto canonicalize_shift;
3516
3517 case LSHIFTRT:
3518 if (trueop1 == CONST0_RTX (mode))
3519 return op0;
3520 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3521 return op0;
3522 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3523 if (GET_CODE (op0) == CLZ
3524 && is_a <scalar_int_mode> (GET_MODE (XEXP (op0, 0)), &inner_mode)
3525 && CONST_INT_P (trueop1)
3526 && STORE_FLAG_VALUE == 1
3527 && INTVAL (trueop1) < GET_MODE_UNIT_PRECISION (mode))
3528 {
3529 unsigned HOST_WIDE_INT zero_val = 0;
3530
3531 if (CLZ_DEFINED_VALUE_AT_ZERO (inner_mode, zero_val)
3532 && zero_val == GET_MODE_PRECISION (inner_mode)
3533 && INTVAL (trueop1) == exact_log2 (zero_val))
3534 return simplify_gen_relational (EQ, mode, inner_mode,
3535 XEXP (op0, 0), const0_rtx);
3536 }
3537 goto canonicalize_shift;
3538
3539 case SMIN:
3540 if (HWI_COMPUTABLE_MODE_P (mode)
3541 && mode_signbit_p (mode, trueop1)
3542 && ! side_effects_p (op0))
3543 return op1;
3544 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3545 return op0;
3546 tem = simplify_associative_operation (code, mode, op0, op1);
3547 if (tem)
3548 return tem;
3549 break;
3550
3551 case SMAX:
3552 if (HWI_COMPUTABLE_MODE_P (mode)
3553 && CONST_INT_P (trueop1)
3554 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3555 && ! side_effects_p (op0))
3556 return op1;
3557 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3558 return op0;
3559 tem = simplify_associative_operation (code, mode, op0, op1);
3560 if (tem)
3561 return tem;
3562 break;
3563
3564 case UMIN:
3565 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3566 return op1;
3567 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3568 return op0;
3569 tem = simplify_associative_operation (code, mode, op0, op1);
3570 if (tem)
3571 return tem;
3572 break;
3573
3574 case UMAX:
3575 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3576 return op1;
3577 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3578 return op0;
3579 tem = simplify_associative_operation (code, mode, op0, op1);
3580 if (tem)
3581 return tem;
3582 break;
3583
3584 case SS_PLUS:
3585 case US_PLUS:
3586 case SS_MINUS:
3587 case US_MINUS:
3588 case SS_MULT:
3589 case US_MULT:
3590 case SS_DIV:
3591 case US_DIV:
3592 /* ??? There are simplifications that can be done. */
3593 return 0;
3594
3595 case VEC_SERIES:
3596 if (op1 == CONST0_RTX (GET_MODE_INNER (mode)))
3597 return gen_vec_duplicate (mode, op0);
3598 if (valid_for_const_vector_p (mode, op0)
3599 && valid_for_const_vector_p (mode, op1))
3600 return gen_const_vec_series (mode, op0, op1);
3601 return 0;
3602
3603 case VEC_SELECT:
3604 if (!VECTOR_MODE_P (mode))
3605 {
3606 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3607 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3608 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3609 gcc_assert (XVECLEN (trueop1, 0) == 1);
3610 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3611
3612 if (vec_duplicate_p (trueop0, &elt0))
3613 return elt0;
3614
3615 if (GET_CODE (trueop0) == CONST_VECTOR)
3616 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3617 (trueop1, 0, 0)));
3618
3619 /* Extract a scalar element from a nested VEC_SELECT expression
3620 (with optional nested VEC_CONCAT expression). Some targets
3621 (i386) extract scalar element from a vector using chain of
3622 nested VEC_SELECT expressions. When input operand is a memory
3623 operand, this operation can be simplified to a simple scalar
3624 load from an offseted memory address. */
3625 int n_elts;
3626 if (GET_CODE (trueop0) == VEC_SELECT
3627 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0, 0)))
3628 .is_constant (&n_elts)))
3629 {
3630 rtx op0 = XEXP (trueop0, 0);
3631 rtx op1 = XEXP (trueop0, 1);
3632
3633 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3634 int elem;
3635
3636 rtvec vec;
3637 rtx tmp_op, tmp;
3638
3639 gcc_assert (GET_CODE (op1) == PARALLEL);
3640 gcc_assert (i < n_elts);
3641
3642 /* Select element, pointed by nested selector. */
3643 elem = INTVAL (XVECEXP (op1, 0, i));
3644
3645 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3646 if (GET_CODE (op0) == VEC_CONCAT)
3647 {
3648 rtx op00 = XEXP (op0, 0);
3649 rtx op01 = XEXP (op0, 1);
3650
3651 machine_mode mode00, mode01;
3652 int n_elts00, n_elts01;
3653
3654 mode00 = GET_MODE (op00);
3655 mode01 = GET_MODE (op01);
3656
3657 /* Find out the number of elements of each operand.
3658 Since the concatenated result has a constant number
3659 of elements, the operands must too. */
3660 n_elts00 = GET_MODE_NUNITS (mode00).to_constant ();
3661 n_elts01 = GET_MODE_NUNITS (mode01).to_constant ();
3662
3663 gcc_assert (n_elts == n_elts00 + n_elts01);
3664
3665 /* Select correct operand of VEC_CONCAT
3666 and adjust selector. */
3667 if (elem < n_elts01)
3668 tmp_op = op00;
3669 else
3670 {
3671 tmp_op = op01;
3672 elem -= n_elts00;
3673 }
3674 }
3675 else
3676 tmp_op = op0;
3677
3678 vec = rtvec_alloc (1);
3679 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3680
3681 tmp = gen_rtx_fmt_ee (code, mode,
3682 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3683 return tmp;
3684 }
3685 }
3686 else
3687 {
3688 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3689 gcc_assert (GET_MODE_INNER (mode)
3690 == GET_MODE_INNER (GET_MODE (trueop0)));
3691 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3692
3693 if (vec_duplicate_p (trueop0, &elt0))
3694 /* It doesn't matter which elements are selected by trueop1,
3695 because they are all the same. */
3696 return gen_vec_duplicate (mode, elt0);
3697
3698 if (GET_CODE (trueop0) == CONST_VECTOR)
3699 {
3700 unsigned n_elts = XVECLEN (trueop1, 0);
3701 rtvec v = rtvec_alloc (n_elts);
3702 unsigned int i;
3703
3704 gcc_assert (known_eq (n_elts, GET_MODE_NUNITS (mode)));
3705 for (i = 0; i < n_elts; i++)
3706 {
3707 rtx x = XVECEXP (trueop1, 0, i);
3708
3709 gcc_assert (CONST_INT_P (x));
3710 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3711 INTVAL (x));
3712 }
3713
3714 return gen_rtx_CONST_VECTOR (mode, v);
3715 }
3716
3717 /* Recognize the identity. */
3718 if (GET_MODE (trueop0) == mode)
3719 {
3720 bool maybe_ident = true;
3721 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3722 {
3723 rtx j = XVECEXP (trueop1, 0, i);
3724 if (!CONST_INT_P (j) || INTVAL (j) != i)
3725 {
3726 maybe_ident = false;
3727 break;
3728 }
3729 }
3730 if (maybe_ident)
3731 return trueop0;
3732 }
3733
3734 /* If we build {a,b} then permute it, build the result directly. */
3735 if (XVECLEN (trueop1, 0) == 2
3736 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3737 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3738 && GET_CODE (trueop0) == VEC_CONCAT
3739 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3740 && GET_MODE (XEXP (trueop0, 0)) == mode
3741 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3742 && GET_MODE (XEXP (trueop0, 1)) == mode)
3743 {
3744 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3745 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3746 rtx subop0, subop1;
3747
3748 gcc_assert (i0 < 4 && i1 < 4);
3749 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3750 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3751
3752 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3753 }
3754
3755 if (XVECLEN (trueop1, 0) == 2
3756 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3757 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3758 && GET_CODE (trueop0) == VEC_CONCAT
3759 && GET_MODE (trueop0) == mode)
3760 {
3761 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3762 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3763 rtx subop0, subop1;
3764
3765 gcc_assert (i0 < 2 && i1 < 2);
3766 subop0 = XEXP (trueop0, i0);
3767 subop1 = XEXP (trueop0, i1);
3768
3769 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3770 }
3771
3772 /* If we select one half of a vec_concat, return that. */
3773 int l0, l1;
3774 if (GET_CODE (trueop0) == VEC_CONCAT
3775 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0, 0)))
3776 .is_constant (&l0))
3777 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0, 1)))
3778 .is_constant (&l1))
3779 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3780 {
3781 rtx subop0 = XEXP (trueop0, 0);
3782 rtx subop1 = XEXP (trueop0, 1);
3783 machine_mode mode0 = GET_MODE (subop0);
3784 machine_mode mode1 = GET_MODE (subop1);
3785 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3786 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3787 {
3788 bool success = true;
3789 for (int i = 1; i < l0; ++i)
3790 {
3791 rtx j = XVECEXP (trueop1, 0, i);
3792 if (!CONST_INT_P (j) || INTVAL (j) != i)
3793 {
3794 success = false;
3795 break;
3796 }
3797 }
3798 if (success)
3799 return subop0;
3800 }
3801 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3802 {
3803 bool success = true;
3804 for (int i = 1; i < l1; ++i)
3805 {
3806 rtx j = XVECEXP (trueop1, 0, i);
3807 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3808 {
3809 success = false;
3810 break;
3811 }
3812 }
3813 if (success)
3814 return subop1;
3815 }
3816 }
3817 }
3818
3819 if (XVECLEN (trueop1, 0) == 1
3820 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3821 && GET_CODE (trueop0) == VEC_CONCAT)
3822 {
3823 rtx vec = trueop0;
3824 offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3825
3826 /* Try to find the element in the VEC_CONCAT. */
3827 while (GET_MODE (vec) != mode
3828 && GET_CODE (vec) == VEC_CONCAT)
3829 {
3830 poly_int64 vec_size;
3831
3832 if (CONST_INT_P (XEXP (vec, 0)))
3833 {
3834 /* vec_concat of two const_ints doesn't make sense with
3835 respect to modes. */
3836 if (CONST_INT_P (XEXP (vec, 1)))
3837 return 0;
3838
3839 vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
3840 - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
3841 }
3842 else
3843 vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3844
3845 if (known_lt (offset, vec_size))
3846 vec = XEXP (vec, 0);
3847 else if (known_ge (offset, vec_size))
3848 {
3849 offset -= vec_size;
3850 vec = XEXP (vec, 1);
3851 }
3852 else
3853 break;
3854 vec = avoid_constant_pool_reference (vec);
3855 }
3856
3857 if (GET_MODE (vec) == mode)
3858 return vec;
3859 }
3860
3861 /* If we select elements in a vec_merge that all come from the same
3862 operand, select from that operand directly. */
3863 if (GET_CODE (op0) == VEC_MERGE)
3864 {
3865 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3866 if (CONST_INT_P (trueop02))
3867 {
3868 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3869 bool all_operand0 = true;
3870 bool all_operand1 = true;
3871 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3872 {
3873 rtx j = XVECEXP (trueop1, 0, i);
3874 if (sel & (HOST_WIDE_INT_1U << UINTVAL (j)))
3875 all_operand1 = false;
3876 else
3877 all_operand0 = false;
3878 }
3879 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3880 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3881 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3882 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3883 }
3884 }
3885
3886 /* If we have two nested selects that are inverses of each
3887 other, replace them with the source operand. */
3888 if (GET_CODE (trueop0) == VEC_SELECT
3889 && GET_MODE (XEXP (trueop0, 0)) == mode)
3890 {
3891 rtx op0_subop1 = XEXP (trueop0, 1);
3892 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3893 gcc_assert (known_eq (XVECLEN (trueop1, 0), GET_MODE_NUNITS (mode)));
3894
3895 /* Apply the outer ordering vector to the inner one. (The inner
3896 ordering vector is expressly permitted to be of a different
3897 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3898 then the two VEC_SELECTs cancel. */
3899 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3900 {
3901 rtx x = XVECEXP (trueop1, 0, i);
3902 if (!CONST_INT_P (x))
3903 return 0;
3904 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3905 if (!CONST_INT_P (y) || i != INTVAL (y))
3906 return 0;
3907 }
3908 return XEXP (trueop0, 0);
3909 }
3910
3911 return 0;
3912 case VEC_CONCAT:
3913 {
3914 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3915 ? GET_MODE (trueop0)
3916 : GET_MODE_INNER (mode));
3917 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3918 ? GET_MODE (trueop1)
3919 : GET_MODE_INNER (mode));
3920
3921 gcc_assert (VECTOR_MODE_P (mode));
3922 gcc_assert (known_eq (GET_MODE_SIZE (op0_mode)
3923 + GET_MODE_SIZE (op1_mode),
3924 GET_MODE_SIZE (mode)));
3925
3926 if (VECTOR_MODE_P (op0_mode))
3927 gcc_assert (GET_MODE_INNER (mode)
3928 == GET_MODE_INNER (op0_mode));
3929 else
3930 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3931
3932 if (VECTOR_MODE_P (op1_mode))
3933 gcc_assert (GET_MODE_INNER (mode)
3934 == GET_MODE_INNER (op1_mode));
3935 else
3936 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3937
3938 unsigned int n_elts, in_n_elts;
3939 if ((GET_CODE (trueop0) == CONST_VECTOR
3940 || CONST_SCALAR_INT_P (trueop0)
3941 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3942 && (GET_CODE (trueop1) == CONST_VECTOR
3943 || CONST_SCALAR_INT_P (trueop1)
3944 || CONST_DOUBLE_AS_FLOAT_P (trueop1))
3945 && GET_MODE_NUNITS (mode).is_constant (&n_elts)
3946 && GET_MODE_NUNITS (op0_mode).is_constant (&in_n_elts))
3947 {
3948 rtvec v = rtvec_alloc (n_elts);
3949 unsigned int i;
3950 for (i = 0; i < n_elts; i++)
3951 {
3952 if (i < in_n_elts)
3953 {
3954 if (!VECTOR_MODE_P (op0_mode))
3955 RTVEC_ELT (v, i) = trueop0;
3956 else
3957 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3958 }
3959 else
3960 {
3961 if (!VECTOR_MODE_P (op1_mode))
3962 RTVEC_ELT (v, i) = trueop1;
3963 else
3964 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3965 i - in_n_elts);
3966 }
3967 }
3968
3969 return gen_rtx_CONST_VECTOR (mode, v);
3970 }
3971
3972 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3973 Restrict the transformation to avoid generating a VEC_SELECT with a
3974 mode unrelated to its operand. */
3975 if (GET_CODE (trueop0) == VEC_SELECT
3976 && GET_CODE (trueop1) == VEC_SELECT
3977 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3978 && GET_MODE (XEXP (trueop0, 0)) == mode)
3979 {
3980 rtx par0 = XEXP (trueop0, 1);
3981 rtx par1 = XEXP (trueop1, 1);
3982 int len0 = XVECLEN (par0, 0);
3983 int len1 = XVECLEN (par1, 0);
3984 rtvec vec = rtvec_alloc (len0 + len1);
3985 for (int i = 0; i < len0; i++)
3986 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3987 for (int i = 0; i < len1; i++)
3988 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3989 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3990 gen_rtx_PARALLEL (VOIDmode, vec));
3991 }
3992 }
3993 return 0;
3994
3995 default:
3996 gcc_unreachable ();
3997 }
3998
3999 if (mode == GET_MODE (op0)
4000 && mode == GET_MODE (op1)
4001 && vec_duplicate_p (op0, &elt0)
4002 && vec_duplicate_p (op1, &elt1))
4003 {
4004 /* Try applying the operator to ELT and see if that simplifies.
4005 We can duplicate the result if so.
4006
4007 The reason we don't use simplify_gen_binary is that it isn't
4008 necessarily a win to convert things like:
4009
4010 (plus:V (vec_duplicate:V (reg:S R1))
4011 (vec_duplicate:V (reg:S R2)))
4012
4013 to:
4014
4015 (vec_duplicate:V (plus:S (reg:S R1) (reg:S R2)))
4016
4017 The first might be done entirely in vector registers while the
4018 second might need a move between register files. */
4019 tem = simplify_binary_operation (code, GET_MODE_INNER (mode),
4020 elt0, elt1);
4021 if (tem)
4022 return gen_vec_duplicate (mode, tem);
4023 }
4024
4025 return 0;
4026 }
4027
4028 rtx
4029 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
4030 rtx op0, rtx op1)
4031 {
4032 if (VECTOR_MODE_P (mode)
4033 && code != VEC_CONCAT
4034 && GET_CODE (op0) == CONST_VECTOR
4035 && GET_CODE (op1) == CONST_VECTOR)
4036 {
4037 unsigned int n_elts;
4038 if (!CONST_VECTOR_NUNITS (op0).is_constant (&n_elts))
4039 return NULL_RTX;
4040
4041 gcc_assert (known_eq (n_elts, CONST_VECTOR_NUNITS (op1)));
4042 gcc_assert (known_eq (n_elts, GET_MODE_NUNITS (mode)));
4043 rtvec v = rtvec_alloc (n_elts);
4044 unsigned int i;
4045
4046 for (i = 0; i < n_elts; i++)
4047 {
4048 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
4049 CONST_VECTOR_ELT (op0, i),
4050 CONST_VECTOR_ELT (op1, i));
4051 if (!x || !valid_for_const_vector_p (mode, x))
4052 return 0;
4053 RTVEC_ELT (v, i) = x;
4054 }
4055
4056 return gen_rtx_CONST_VECTOR (mode, v);
4057 }
4058
4059 if (VECTOR_MODE_P (mode)
4060 && code == VEC_CONCAT
4061 && (CONST_SCALAR_INT_P (op0)
4062 || CONST_FIXED_P (op0)
4063 || CONST_DOUBLE_AS_FLOAT_P (op0))
4064 && (CONST_SCALAR_INT_P (op1)
4065 || CONST_DOUBLE_AS_FLOAT_P (op1)
4066 || CONST_FIXED_P (op1)))
4067 {
4068 /* Both inputs have a constant number of elements, so the result
4069 must too. */
4070 unsigned n_elts = GET_MODE_NUNITS (mode).to_constant ();
4071 rtvec v = rtvec_alloc (n_elts);
4072
4073 gcc_assert (n_elts >= 2);
4074 if (n_elts == 2)
4075 {
4076 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
4077 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
4078
4079 RTVEC_ELT (v, 0) = op0;
4080 RTVEC_ELT (v, 1) = op1;
4081 }
4082 else
4083 {
4084 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0)).to_constant ();
4085 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1)).to_constant ();
4086 unsigned i;
4087
4088 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
4089 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
4090 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
4091
4092 for (i = 0; i < op0_n_elts; ++i)
4093 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op0, i);
4094 for (i = 0; i < op1_n_elts; ++i)
4095 RTVEC_ELT (v, op0_n_elts+i) = CONST_VECTOR_ELT (op1, i);
4096 }
4097
4098 return gen_rtx_CONST_VECTOR (mode, v);
4099 }
4100
4101 if (SCALAR_FLOAT_MODE_P (mode)
4102 && CONST_DOUBLE_AS_FLOAT_P (op0)
4103 && CONST_DOUBLE_AS_FLOAT_P (op1)
4104 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
4105 {
4106 if (code == AND
4107 || code == IOR
4108 || code == XOR)
4109 {
4110 long tmp0[4];
4111 long tmp1[4];
4112 REAL_VALUE_TYPE r;
4113 int i;
4114
4115 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
4116 GET_MODE (op0));
4117 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
4118 GET_MODE (op1));
4119 for (i = 0; i < 4; i++)
4120 {
4121 switch (code)
4122 {
4123 case AND:
4124 tmp0[i] &= tmp1[i];
4125 break;
4126 case IOR:
4127 tmp0[i] |= tmp1[i];
4128 break;
4129 case XOR:
4130 tmp0[i] ^= tmp1[i];
4131 break;
4132 default:
4133 gcc_unreachable ();
4134 }
4135 }
4136 real_from_target (&r, tmp0, mode);
4137 return const_double_from_real_value (r, mode);
4138 }
4139 else
4140 {
4141 REAL_VALUE_TYPE f0, f1, value, result;
4142 const REAL_VALUE_TYPE *opr0, *opr1;
4143 bool inexact;
4144
4145 opr0 = CONST_DOUBLE_REAL_VALUE (op0);
4146 opr1 = CONST_DOUBLE_REAL_VALUE (op1);
4147
4148 if (HONOR_SNANS (mode)
4149 && (REAL_VALUE_ISSIGNALING_NAN (*opr0)
4150 || REAL_VALUE_ISSIGNALING_NAN (*opr1)))
4151 return 0;
4152
4153 real_convert (&f0, mode, opr0);
4154 real_convert (&f1, mode, opr1);
4155
4156 if (code == DIV
4157 && real_equal (&f1, &dconst0)
4158 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
4159 return 0;
4160
4161 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4162 && flag_trapping_math
4163 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
4164 {
4165 int s0 = REAL_VALUE_NEGATIVE (f0);
4166 int s1 = REAL_VALUE_NEGATIVE (f1);
4167
4168 switch (code)
4169 {
4170 case PLUS:
4171 /* Inf + -Inf = NaN plus exception. */
4172 if (s0 != s1)
4173 return 0;
4174 break;
4175 case MINUS:
4176 /* Inf - Inf = NaN plus exception. */
4177 if (s0 == s1)
4178 return 0;
4179 break;
4180 case DIV:
4181 /* Inf / Inf = NaN plus exception. */
4182 return 0;
4183 default:
4184 break;
4185 }
4186 }
4187
4188 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4189 && flag_trapping_math
4190 && ((REAL_VALUE_ISINF (f0) && real_equal (&f1, &dconst0))
4191 || (REAL_VALUE_ISINF (f1)
4192 && real_equal (&f0, &dconst0))))
4193 /* Inf * 0 = NaN plus exception. */
4194 return 0;
4195
4196 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
4197 &f0, &f1);
4198 real_convert (&result, mode, &value);
4199
4200 /* Don't constant fold this floating point operation if
4201 the result has overflowed and flag_trapping_math. */
4202
4203 if (flag_trapping_math
4204 && MODE_HAS_INFINITIES (mode)
4205 && REAL_VALUE_ISINF (result)
4206 && !REAL_VALUE_ISINF (f0)
4207 && !REAL_VALUE_ISINF (f1))
4208 /* Overflow plus exception. */
4209 return 0;
4210
4211 /* Don't constant fold this floating point operation if the
4212 result may dependent upon the run-time rounding mode and
4213 flag_rounding_math is set, or if GCC's software emulation
4214 is unable to accurately represent the result. */
4215
4216 if ((flag_rounding_math
4217 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
4218 && (inexact || !real_identical (&result, &value)))
4219 return NULL_RTX;
4220
4221 return const_double_from_real_value (result, mode);
4222 }
4223 }
4224
4225 /* We can fold some multi-word operations. */
4226 scalar_int_mode int_mode;
4227 if (is_a <scalar_int_mode> (mode, &int_mode)
4228 && CONST_SCALAR_INT_P (op0)
4229 && CONST_SCALAR_INT_P (op1))
4230 {
4231 wide_int result;
4232 bool overflow;
4233 rtx_mode_t pop0 = rtx_mode_t (op0, int_mode);
4234 rtx_mode_t pop1 = rtx_mode_t (op1, int_mode);
4235
4236 #if TARGET_SUPPORTS_WIDE_INT == 0
4237 /* This assert keeps the simplification from producing a result
4238 that cannot be represented in a CONST_DOUBLE but a lot of
4239 upstream callers expect that this function never fails to
4240 simplify something and so you if you added this to the test
4241 above the code would die later anyway. If this assert
4242 happens, you just need to make the port support wide int. */
4243 gcc_assert (GET_MODE_PRECISION (int_mode) <= HOST_BITS_PER_DOUBLE_INT);
4244 #endif
4245 switch (code)
4246 {
4247 case MINUS:
4248 result = wi::sub (pop0, pop1);
4249 break;
4250
4251 case PLUS:
4252 result = wi::add (pop0, pop1);
4253 break;
4254
4255 case MULT:
4256 result = wi::mul (pop0, pop1);
4257 break;
4258
4259 case DIV:
4260 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
4261 if (overflow)
4262 return NULL_RTX;
4263 break;
4264
4265 case MOD:
4266 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
4267 if (overflow)
4268 return NULL_RTX;
4269 break;
4270
4271 case UDIV:
4272 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
4273 if (overflow)
4274 return NULL_RTX;
4275 break;
4276
4277 case UMOD:
4278 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
4279 if (overflow)
4280 return NULL_RTX;
4281 break;
4282
4283 case AND:
4284 result = wi::bit_and (pop0, pop1);
4285 break;
4286
4287 case IOR:
4288 result = wi::bit_or (pop0, pop1);
4289 break;
4290
4291 case XOR:
4292 result = wi::bit_xor (pop0, pop1);
4293 break;
4294
4295 case SMIN:
4296 result = wi::smin (pop0, pop1);
4297 break;
4298
4299 case SMAX:
4300 result = wi::smax (pop0, pop1);
4301 break;
4302
4303 case UMIN:
4304 result = wi::umin (pop0, pop1);
4305 break;
4306
4307 case UMAX:
4308 result = wi::umax (pop0, pop1);
4309 break;
4310
4311 case LSHIFTRT:
4312 case ASHIFTRT:
4313 case ASHIFT:
4314 {
4315 wide_int wop1 = pop1;
4316 if (SHIFT_COUNT_TRUNCATED)
4317 wop1 = wi::umod_trunc (wop1, GET_MODE_PRECISION (int_mode));
4318 else if (wi::geu_p (wop1, GET_MODE_PRECISION (int_mode)))
4319 return NULL_RTX;
4320
4321 switch (code)
4322 {
4323 case LSHIFTRT:
4324 result = wi::lrshift (pop0, wop1);
4325 break;
4326
4327 case ASHIFTRT:
4328 result = wi::arshift (pop0, wop1);
4329 break;
4330
4331 case ASHIFT:
4332 result = wi::lshift (pop0, wop1);
4333 break;
4334
4335 default:
4336 gcc_unreachable ();
4337 }
4338 break;
4339 }
4340 case ROTATE:
4341 case ROTATERT:
4342 {
4343 if (wi::neg_p (pop1))
4344 return NULL_RTX;
4345
4346 switch (code)
4347 {
4348 case ROTATE:
4349 result = wi::lrotate (pop0, pop1);
4350 break;
4351
4352 case ROTATERT:
4353 result = wi::rrotate (pop0, pop1);
4354 break;
4355
4356 default:
4357 gcc_unreachable ();
4358 }
4359 break;
4360 }
4361 default:
4362 return NULL_RTX;
4363 }
4364 return immed_wide_int_const (result, int_mode);
4365 }
4366
4367 /* Handle polynomial integers. */
4368 if (NUM_POLY_INT_COEFFS > 1
4369 && is_a <scalar_int_mode> (mode, &int_mode)
4370 && poly_int_rtx_p (op0)
4371 && poly_int_rtx_p (op1))
4372 {
4373 poly_wide_int result;
4374 switch (code)
4375 {
4376 case PLUS:
4377 result = wi::to_poly_wide (op0, mode) + wi::to_poly_wide (op1, mode);
4378 break;
4379
4380 case MINUS:
4381 result = wi::to_poly_wide (op0, mode) - wi::to_poly_wide (op1, mode);
4382 break;
4383
4384 case MULT:
4385 if (CONST_SCALAR_INT_P (op1))
4386 result = wi::to_poly_wide (op0, mode) * rtx_mode_t (op1, mode);
4387 else
4388 return NULL_RTX;
4389 break;
4390
4391 case ASHIFT:
4392 if (CONST_SCALAR_INT_P (op1))
4393 {
4394 wide_int shift = rtx_mode_t (op1, mode);
4395 if (SHIFT_COUNT_TRUNCATED)
4396 shift = wi::umod_trunc (shift, GET_MODE_PRECISION (int_mode));
4397 else if (wi::geu_p (shift, GET_MODE_PRECISION (int_mode)))
4398 return NULL_RTX;
4399 result = wi::to_poly_wide (op0, mode) << shift;
4400 }
4401 else
4402 return NULL_RTX;
4403 break;
4404
4405 case IOR:
4406 if (!CONST_SCALAR_INT_P (op1)
4407 || !can_ior_p (wi::to_poly_wide (op0, mode),
4408 rtx_mode_t (op1, mode), &result))
4409 return NULL_RTX;
4410 break;
4411
4412 default:
4413 return NULL_RTX;
4414 }
4415 return immed_wide_int_const (result, int_mode);
4416 }
4417
4418 return NULL_RTX;
4419 }
4420
4421
4422 \f
4423 /* Return a positive integer if X should sort after Y. The value
4424 returned is 1 if and only if X and Y are both regs. */
4425
4426 static int
4427 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4428 {
4429 int result;
4430
4431 result = (commutative_operand_precedence (y)
4432 - commutative_operand_precedence (x));
4433 if (result)
4434 return result + result;
4435
4436 /* Group together equal REGs to do more simplification. */
4437 if (REG_P (x) && REG_P (y))
4438 return REGNO (x) > REGNO (y);
4439
4440 return 0;
4441 }
4442
4443 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4444 operands may be another PLUS or MINUS.
4445
4446 Rather than test for specific case, we do this by a brute-force method
4447 and do all possible simplifications until no more changes occur. Then
4448 we rebuild the operation.
4449
4450 May return NULL_RTX when no changes were made. */
4451
4452 static rtx
4453 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
4454 rtx op1)
4455 {
4456 struct simplify_plus_minus_op_data
4457 {
4458 rtx op;
4459 short neg;
4460 } ops[16];
4461 rtx result, tem;
4462 int n_ops = 2;
4463 int changed, n_constants, canonicalized = 0;
4464 int i, j;
4465
4466 memset (ops, 0, sizeof ops);
4467
4468 /* Set up the two operands and then expand them until nothing has been
4469 changed. If we run out of room in our array, give up; this should
4470 almost never happen. */
4471
4472 ops[0].op = op0;
4473 ops[0].neg = 0;
4474 ops[1].op = op1;
4475 ops[1].neg = (code == MINUS);
4476
4477 do
4478 {
4479 changed = 0;
4480 n_constants = 0;
4481
4482 for (i = 0; i < n_ops; i++)
4483 {
4484 rtx this_op = ops[i].op;
4485 int this_neg = ops[i].neg;
4486 enum rtx_code this_code = GET_CODE (this_op);
4487
4488 switch (this_code)
4489 {
4490 case PLUS:
4491 case MINUS:
4492 if (n_ops == ARRAY_SIZE (ops))
4493 return NULL_RTX;
4494
4495 ops[n_ops].op = XEXP (this_op, 1);
4496 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4497 n_ops++;
4498
4499 ops[i].op = XEXP (this_op, 0);
4500 changed = 1;
4501 /* If this operand was negated then we will potentially
4502 canonicalize the expression. Similarly if we don't
4503 place the operands adjacent we're re-ordering the
4504 expression and thus might be performing a
4505 canonicalization. Ignore register re-ordering.
4506 ??? It might be better to shuffle the ops array here,
4507 but then (plus (plus (A, B), plus (C, D))) wouldn't
4508 be seen as non-canonical. */
4509 if (this_neg
4510 || (i != n_ops - 2
4511 && !(REG_P (ops[i].op) && REG_P (ops[n_ops - 1].op))))
4512 canonicalized = 1;
4513 break;
4514
4515 case NEG:
4516 ops[i].op = XEXP (this_op, 0);
4517 ops[i].neg = ! this_neg;
4518 changed = 1;
4519 canonicalized = 1;
4520 break;
4521
4522 case CONST:
4523 if (n_ops != ARRAY_SIZE (ops)
4524 && GET_CODE (XEXP (this_op, 0)) == PLUS
4525 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4526 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4527 {
4528 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4529 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4530 ops[n_ops].neg = this_neg;
4531 n_ops++;
4532 changed = 1;
4533 canonicalized = 1;
4534 }
4535 break;
4536
4537 case NOT:
4538 /* ~a -> (-a - 1) */
4539 if (n_ops != ARRAY_SIZE (ops))
4540 {
4541 ops[n_ops].op = CONSTM1_RTX (mode);
4542 ops[n_ops++].neg = this_neg;
4543 ops[i].op = XEXP (this_op, 0);
4544 ops[i].neg = !this_neg;
4545 changed = 1;
4546 canonicalized = 1;
4547 }
4548 break;
4549
4550 case CONST_INT:
4551 n_constants++;
4552 if (this_neg)
4553 {
4554 ops[i].op = neg_const_int (mode, this_op);
4555 ops[i].neg = 0;
4556 changed = 1;
4557 canonicalized = 1;
4558 }
4559 break;
4560
4561 default:
4562 break;
4563 }
4564 }
4565 }
4566 while (changed);
4567
4568 if (n_constants > 1)
4569 canonicalized = 1;
4570
4571 gcc_assert (n_ops >= 2);
4572
4573 /* If we only have two operands, we can avoid the loops. */
4574 if (n_ops == 2)
4575 {
4576 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4577 rtx lhs, rhs;
4578
4579 /* Get the two operands. Be careful with the order, especially for
4580 the cases where code == MINUS. */
4581 if (ops[0].neg && ops[1].neg)
4582 {
4583 lhs = gen_rtx_NEG (mode, ops[0].op);
4584 rhs = ops[1].op;
4585 }
4586 else if (ops[0].neg)
4587 {
4588 lhs = ops[1].op;
4589 rhs = ops[0].op;
4590 }
4591 else
4592 {
4593 lhs = ops[0].op;
4594 rhs = ops[1].op;
4595 }
4596
4597 return simplify_const_binary_operation (code, mode, lhs, rhs);
4598 }
4599
4600 /* Now simplify each pair of operands until nothing changes. */
4601 while (1)
4602 {
4603 /* Insertion sort is good enough for a small array. */
4604 for (i = 1; i < n_ops; i++)
4605 {
4606 struct simplify_plus_minus_op_data save;
4607 int cmp;
4608
4609 j = i - 1;
4610 cmp = simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op);
4611 if (cmp <= 0)
4612 continue;
4613 /* Just swapping registers doesn't count as canonicalization. */
4614 if (cmp != 1)
4615 canonicalized = 1;
4616
4617 save = ops[i];
4618 do
4619 ops[j + 1] = ops[j];
4620 while (j--
4621 && simplify_plus_minus_op_data_cmp (ops[j].op, save.op) > 0);
4622 ops[j + 1] = save;
4623 }
4624
4625 changed = 0;
4626 for (i = n_ops - 1; i > 0; i--)
4627 for (j = i - 1; j >= 0; j--)
4628 {
4629 rtx lhs = ops[j].op, rhs = ops[i].op;
4630 int lneg = ops[j].neg, rneg = ops[i].neg;
4631
4632 if (lhs != 0 && rhs != 0)
4633 {
4634 enum rtx_code ncode = PLUS;
4635
4636 if (lneg != rneg)
4637 {
4638 ncode = MINUS;
4639 if (lneg)
4640 std::swap (lhs, rhs);
4641 }
4642 else if (swap_commutative_operands_p (lhs, rhs))
4643 std::swap (lhs, rhs);
4644
4645 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4646 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4647 {
4648 rtx tem_lhs, tem_rhs;
4649
4650 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4651 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4652 tem = simplify_binary_operation (ncode, mode, tem_lhs,
4653 tem_rhs);
4654
4655 if (tem && !CONSTANT_P (tem))
4656 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4657 }
4658 else
4659 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4660
4661 if (tem)
4662 {
4663 /* Reject "simplifications" that just wrap the two
4664 arguments in a CONST. Failure to do so can result
4665 in infinite recursion with simplify_binary_operation
4666 when it calls us to simplify CONST operations.
4667 Also, if we find such a simplification, don't try
4668 any more combinations with this rhs: We must have
4669 something like symbol+offset, ie. one of the
4670 trivial CONST expressions we handle later. */
4671 if (GET_CODE (tem) == CONST
4672 && GET_CODE (XEXP (tem, 0)) == ncode
4673 && XEXP (XEXP (tem, 0), 0) == lhs
4674 && XEXP (XEXP (tem, 0), 1) == rhs)
4675 break;
4676 lneg &= rneg;
4677 if (GET_CODE (tem) == NEG)
4678 tem = XEXP (tem, 0), lneg = !lneg;
4679 if (CONST_INT_P (tem) && lneg)
4680 tem = neg_const_int (mode, tem), lneg = 0;
4681
4682 ops[i].op = tem;
4683 ops[i].neg = lneg;
4684 ops[j].op = NULL_RTX;
4685 changed = 1;
4686 canonicalized = 1;
4687 }
4688 }
4689 }
4690
4691 if (!changed)
4692 break;
4693
4694 /* Pack all the operands to the lower-numbered entries. */
4695 for (i = 0, j = 0; j < n_ops; j++)
4696 if (ops[j].op)
4697 {
4698 ops[i] = ops[j];
4699 i++;
4700 }
4701 n_ops = i;
4702 }
4703
4704 /* If nothing changed, check that rematerialization of rtl instructions
4705 is still required. */
4706 if (!canonicalized)
4707 {
4708 /* Perform rematerialization if only all operands are registers and
4709 all operations are PLUS. */
4710 /* ??? Also disallow (non-global, non-frame) fixed registers to work
4711 around rs6000 and how it uses the CA register. See PR67145. */
4712 for (i = 0; i < n_ops; i++)
4713 if (ops[i].neg
4714 || !REG_P (ops[i].op)
4715 || (REGNO (ops[i].op) < FIRST_PSEUDO_REGISTER
4716 && fixed_regs[REGNO (ops[i].op)]
4717 && !global_regs[REGNO (ops[i].op)]
4718 && ops[i].op != frame_pointer_rtx
4719 && ops[i].op != arg_pointer_rtx
4720 && ops[i].op != stack_pointer_rtx))
4721 return NULL_RTX;
4722 goto gen_result;
4723 }
4724
4725 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4726 if (n_ops == 2
4727 && CONST_INT_P (ops[1].op)
4728 && CONSTANT_P (ops[0].op)
4729 && ops[0].neg)
4730 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4731
4732 /* We suppressed creation of trivial CONST expressions in the
4733 combination loop to avoid recursion. Create one manually now.
4734 The combination loop should have ensured that there is exactly
4735 one CONST_INT, and the sort will have ensured that it is last
4736 in the array and that any other constant will be next-to-last. */
4737
4738 if (n_ops > 1
4739 && CONST_INT_P (ops[n_ops - 1].op)
4740 && CONSTANT_P (ops[n_ops - 2].op))
4741 {
4742 rtx value = ops[n_ops - 1].op;
4743 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4744 value = neg_const_int (mode, value);
4745 if (CONST_INT_P (value))
4746 {
4747 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4748 INTVAL (value));
4749 n_ops--;
4750 }
4751 }
4752
4753 /* Put a non-negated operand first, if possible. */
4754
4755 for (i = 0; i < n_ops && ops[i].neg; i++)
4756 continue;
4757 if (i == n_ops)
4758 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4759 else if (i != 0)
4760 {
4761 tem = ops[0].op;
4762 ops[0] = ops[i];
4763 ops[i].op = tem;
4764 ops[i].neg = 1;
4765 }
4766
4767 /* Now make the result by performing the requested operations. */
4768 gen_result:
4769 result = ops[0].op;
4770 for (i = 1; i < n_ops; i++)
4771 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4772 mode, result, ops[i].op);
4773
4774 return result;
4775 }
4776
4777 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4778 static bool
4779 plus_minus_operand_p (const_rtx x)
4780 {
4781 return GET_CODE (x) == PLUS
4782 || GET_CODE (x) == MINUS
4783 || (GET_CODE (x) == CONST
4784 && GET_CODE (XEXP (x, 0)) == PLUS
4785 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4786 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4787 }
4788
4789 /* Like simplify_binary_operation except used for relational operators.
4790 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4791 not also be VOIDmode.
4792
4793 CMP_MODE specifies in which mode the comparison is done in, so it is
4794 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4795 the operands or, if both are VOIDmode, the operands are compared in
4796 "infinite precision". */
4797 rtx
4798 simplify_relational_operation (enum rtx_code code, machine_mode mode,
4799 machine_mode cmp_mode, rtx op0, rtx op1)
4800 {
4801 rtx tem, trueop0, trueop1;
4802
4803 if (cmp_mode == VOIDmode)
4804 cmp_mode = GET_MODE (op0);
4805 if (cmp_mode == VOIDmode)
4806 cmp_mode = GET_MODE (op1);
4807
4808 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4809 if (tem)
4810 {
4811 if (SCALAR_FLOAT_MODE_P (mode))
4812 {
4813 if (tem == const0_rtx)
4814 return CONST0_RTX (mode);
4815 #ifdef FLOAT_STORE_FLAG_VALUE
4816 {
4817 REAL_VALUE_TYPE val;
4818 val = FLOAT_STORE_FLAG_VALUE (mode);
4819 return const_double_from_real_value (val, mode);
4820 }
4821 #else
4822 return NULL_RTX;
4823 #endif
4824 }
4825 if (VECTOR_MODE_P (mode))
4826 {
4827 if (tem == const0_rtx)
4828 return CONST0_RTX (mode);
4829 #ifdef VECTOR_STORE_FLAG_VALUE
4830 {
4831 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4832 if (val == NULL_RTX)
4833 return NULL_RTX;
4834 if (val == const1_rtx)
4835 return CONST1_RTX (mode);
4836
4837 return gen_const_vec_duplicate (mode, val);
4838 }
4839 #else
4840 return NULL_RTX;
4841 #endif
4842 }
4843
4844 return tem;
4845 }
4846
4847 /* For the following tests, ensure const0_rtx is op1. */
4848 if (swap_commutative_operands_p (op0, op1)
4849 || (op0 == const0_rtx && op1 != const0_rtx))
4850 std::swap (op0, op1), code = swap_condition (code);
4851
4852 /* If op0 is a compare, extract the comparison arguments from it. */
4853 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4854 return simplify_gen_relational (code, mode, VOIDmode,
4855 XEXP (op0, 0), XEXP (op0, 1));
4856
4857 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4858 || CC0_P (op0))
4859 return NULL_RTX;
4860
4861 trueop0 = avoid_constant_pool_reference (op0);
4862 trueop1 = avoid_constant_pool_reference (op1);
4863 return simplify_relational_operation_1 (code, mode, cmp_mode,
4864 trueop0, trueop1);
4865 }
4866
4867 /* This part of simplify_relational_operation is only used when CMP_MODE
4868 is not in class MODE_CC (i.e. it is a real comparison).
4869
4870 MODE is the mode of the result, while CMP_MODE specifies in which
4871 mode the comparison is done in, so it is the mode of the operands. */
4872
4873 static rtx
4874 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4875 machine_mode cmp_mode, rtx op0, rtx op1)
4876 {
4877 enum rtx_code op0code = GET_CODE (op0);
4878
4879 if (op1 == const0_rtx && COMPARISON_P (op0))
4880 {
4881 /* If op0 is a comparison, extract the comparison arguments
4882 from it. */
4883 if (code == NE)
4884 {
4885 if (GET_MODE (op0) == mode)
4886 return simplify_rtx (op0);
4887 else
4888 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4889 XEXP (op0, 0), XEXP (op0, 1));
4890 }
4891 else if (code == EQ)
4892 {
4893 enum rtx_code new_code = reversed_comparison_code (op0, NULL);
4894 if (new_code != UNKNOWN)
4895 return simplify_gen_relational (new_code, mode, VOIDmode,
4896 XEXP (op0, 0), XEXP (op0, 1));
4897 }
4898 }
4899
4900 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4901 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4902 if ((code == LTU || code == GEU)
4903 && GET_CODE (op0) == PLUS
4904 && CONST_INT_P (XEXP (op0, 1))
4905 && (rtx_equal_p (op1, XEXP (op0, 0))
4906 || rtx_equal_p (op1, XEXP (op0, 1)))
4907 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4908 && XEXP (op0, 1) != const0_rtx)
4909 {
4910 rtx new_cmp
4911 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4912 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4913 cmp_mode, XEXP (op0, 0), new_cmp);
4914 }
4915
4916 /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
4917 transformed into (LTU a -C). */
4918 if (code == GTU && GET_CODE (op0) == PLUS && CONST_INT_P (op1)
4919 && CONST_INT_P (XEXP (op0, 1))
4920 && (UINTVAL (op1) == UINTVAL (XEXP (op0, 1)) - 1)
4921 && XEXP (op0, 1) != const0_rtx)
4922 {
4923 rtx new_cmp
4924 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4925 return simplify_gen_relational (LTU, mode, cmp_mode,
4926 XEXP (op0, 0), new_cmp);
4927 }
4928
4929 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4930 if ((code == LTU || code == GEU)
4931 && GET_CODE (op0) == PLUS
4932 && rtx_equal_p (op1, XEXP (op0, 1))
4933 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4934 && !rtx_equal_p (op1, XEXP (op0, 0)))
4935 return simplify_gen_relational (code, mode, cmp_mode, op0,
4936 copy_rtx (XEXP (op0, 0)));
4937
4938 if (op1 == const0_rtx)
4939 {
4940 /* Canonicalize (GTU x 0) as (NE x 0). */
4941 if (code == GTU)
4942 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4943 /* Canonicalize (LEU x 0) as (EQ x 0). */
4944 if (code == LEU)
4945 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4946 }
4947 else if (op1 == const1_rtx)
4948 {
4949 switch (code)
4950 {
4951 case GE:
4952 /* Canonicalize (GE x 1) as (GT x 0). */
4953 return simplify_gen_relational (GT, mode, cmp_mode,
4954 op0, const0_rtx);
4955 case GEU:
4956 /* Canonicalize (GEU x 1) as (NE x 0). */
4957 return simplify_gen_relational (NE, mode, cmp_mode,
4958 op0, const0_rtx);
4959 case LT:
4960 /* Canonicalize (LT x 1) as (LE x 0). */
4961 return simplify_gen_relational (LE, mode, cmp_mode,
4962 op0, const0_rtx);
4963 case LTU:
4964 /* Canonicalize (LTU x 1) as (EQ x 0). */
4965 return simplify_gen_relational (EQ, mode, cmp_mode,
4966 op0, const0_rtx);
4967 default:
4968 break;
4969 }
4970 }
4971 else if (op1 == constm1_rtx)
4972 {
4973 /* Canonicalize (LE x -1) as (LT x 0). */
4974 if (code == LE)
4975 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4976 /* Canonicalize (GT x -1) as (GE x 0). */
4977 if (code == GT)
4978 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4979 }
4980
4981 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4982 if ((code == EQ || code == NE)
4983 && (op0code == PLUS || op0code == MINUS)
4984 && CONSTANT_P (op1)
4985 && CONSTANT_P (XEXP (op0, 1))
4986 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4987 {
4988 rtx x = XEXP (op0, 0);
4989 rtx c = XEXP (op0, 1);
4990 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4991 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4992
4993 /* Detect an infinite recursive condition, where we oscillate at this
4994 simplification case between:
4995 A + B == C <---> C - B == A,
4996 where A, B, and C are all constants with non-simplifiable expressions,
4997 usually SYMBOL_REFs. */
4998 if (GET_CODE (tem) == invcode
4999 && CONSTANT_P (x)
5000 && rtx_equal_p (c, XEXP (tem, 1)))
5001 return NULL_RTX;
5002
5003 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
5004 }
5005
5006 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
5007 the same as (zero_extract:SI FOO (const_int 1) BAR). */
5008 scalar_int_mode int_mode, int_cmp_mode;
5009 if (code == NE
5010 && op1 == const0_rtx
5011 && is_int_mode (mode, &int_mode)
5012 && is_a <scalar_int_mode> (cmp_mode, &int_cmp_mode)
5013 /* ??? Work-around BImode bugs in the ia64 backend. */
5014 && int_mode != BImode
5015 && int_cmp_mode != BImode
5016 && nonzero_bits (op0, int_cmp_mode) == 1
5017 && STORE_FLAG_VALUE == 1)
5018 return GET_MODE_SIZE (int_mode) > GET_MODE_SIZE (int_cmp_mode)
5019 ? simplify_gen_unary (ZERO_EXTEND, int_mode, op0, int_cmp_mode)
5020 : lowpart_subreg (int_mode, op0, int_cmp_mode);
5021
5022 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
5023 if ((code == EQ || code == NE)
5024 && op1 == const0_rtx
5025 && op0code == XOR)
5026 return simplify_gen_relational (code, mode, cmp_mode,
5027 XEXP (op0, 0), XEXP (op0, 1));
5028
5029 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
5030 if ((code == EQ || code == NE)
5031 && op0code == XOR
5032 && rtx_equal_p (XEXP (op0, 0), op1)
5033 && !side_effects_p (XEXP (op0, 0)))
5034 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
5035 CONST0_RTX (mode));
5036
5037 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
5038 if ((code == EQ || code == NE)
5039 && op0code == XOR
5040 && rtx_equal_p (XEXP (op0, 1), op1)
5041 && !side_effects_p (XEXP (op0, 1)))
5042 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
5043 CONST0_RTX (mode));
5044
5045 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
5046 if ((code == EQ || code == NE)
5047 && op0code == XOR
5048 && CONST_SCALAR_INT_P (op1)
5049 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
5050 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
5051 simplify_gen_binary (XOR, cmp_mode,
5052 XEXP (op0, 1), op1));
5053
5054 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
5055 can be implemented with a BICS instruction on some targets, or
5056 constant-folded if y is a constant. */
5057 if ((code == EQ || code == NE)
5058 && op0code == AND
5059 && rtx_equal_p (XEXP (op0, 0), op1)
5060 && !side_effects_p (op1)
5061 && op1 != CONST0_RTX (cmp_mode))
5062 {
5063 rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1), cmp_mode);
5064 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
5065
5066 return simplify_gen_relational (code, mode, cmp_mode, lhs,
5067 CONST0_RTX (cmp_mode));
5068 }
5069
5070 /* Likewise for (eq/ne (and x y) y). */
5071 if ((code == EQ || code == NE)
5072 && op0code == AND
5073 && rtx_equal_p (XEXP (op0, 1), op1)
5074 && !side_effects_p (op1)
5075 && op1 != CONST0_RTX (cmp_mode))
5076 {
5077 rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0), cmp_mode);
5078 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
5079
5080 return simplify_gen_relational (code, mode, cmp_mode, lhs,
5081 CONST0_RTX (cmp_mode));
5082 }
5083
5084 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
5085 if ((code == EQ || code == NE)
5086 && GET_CODE (op0) == BSWAP
5087 && CONST_SCALAR_INT_P (op1))
5088 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
5089 simplify_gen_unary (BSWAP, cmp_mode,
5090 op1, cmp_mode));
5091
5092 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
5093 if ((code == EQ || code == NE)
5094 && GET_CODE (op0) == BSWAP
5095 && GET_CODE (op1) == BSWAP)
5096 return simplify_gen_relational (code, mode, cmp_mode,
5097 XEXP (op0, 0), XEXP (op1, 0));
5098
5099 if (op0code == POPCOUNT && op1 == const0_rtx)
5100 switch (code)
5101 {
5102 case EQ:
5103 case LE:
5104 case LEU:
5105 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
5106 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
5107 XEXP (op0, 0), const0_rtx);
5108
5109 case NE:
5110 case GT:
5111 case GTU:
5112 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
5113 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
5114 XEXP (op0, 0), const0_rtx);
5115
5116 default:
5117 break;
5118 }
5119
5120 return NULL_RTX;
5121 }
5122
5123 enum
5124 {
5125 CMP_EQ = 1,
5126 CMP_LT = 2,
5127 CMP_GT = 4,
5128 CMP_LTU = 8,
5129 CMP_GTU = 16
5130 };
5131
5132
5133 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
5134 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
5135 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
5136 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
5137 For floating-point comparisons, assume that the operands were ordered. */
5138
5139 static rtx
5140 comparison_result (enum rtx_code code, int known_results)
5141 {
5142 switch (code)
5143 {
5144 case EQ:
5145 case UNEQ:
5146 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
5147 case NE:
5148 case LTGT:
5149 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
5150
5151 case LT:
5152 case UNLT:
5153 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
5154 case GE:
5155 case UNGE:
5156 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
5157
5158 case GT:
5159 case UNGT:
5160 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
5161 case LE:
5162 case UNLE:
5163 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
5164
5165 case LTU:
5166 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
5167 case GEU:
5168 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
5169
5170 case GTU:
5171 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
5172 case LEU:
5173 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
5174
5175 case ORDERED:
5176 return const_true_rtx;
5177 case UNORDERED:
5178 return const0_rtx;
5179 default:
5180 gcc_unreachable ();
5181 }
5182 }
5183
5184 /* Check if the given comparison (done in the given MODE) is actually
5185 a tautology or a contradiction. If the mode is VOID_mode, the
5186 comparison is done in "infinite precision". If no simplification
5187 is possible, this function returns zero. Otherwise, it returns
5188 either const_true_rtx or const0_rtx. */
5189
5190 rtx
5191 simplify_const_relational_operation (enum rtx_code code,
5192 machine_mode mode,
5193 rtx op0, rtx op1)
5194 {
5195 rtx tem;
5196 rtx trueop0;
5197 rtx trueop1;
5198
5199 gcc_assert (mode != VOIDmode
5200 || (GET_MODE (op0) == VOIDmode
5201 && GET_MODE (op1) == VOIDmode));
5202
5203 /* If op0 is a compare, extract the comparison arguments from it. */
5204 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
5205 {
5206 op1 = XEXP (op0, 1);
5207 op0 = XEXP (op0, 0);
5208
5209 if (GET_MODE (op0) != VOIDmode)
5210 mode = GET_MODE (op0);
5211 else if (GET_MODE (op1) != VOIDmode)
5212 mode = GET_MODE (op1);
5213 else
5214 return 0;
5215 }
5216
5217 /* We can't simplify MODE_CC values since we don't know what the
5218 actual comparison is. */
5219 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
5220 return 0;
5221
5222 /* Make sure the constant is second. */
5223 if (swap_commutative_operands_p (op0, op1))
5224 {
5225 std::swap (op0, op1);
5226 code = swap_condition (code);
5227 }
5228
5229 trueop0 = avoid_constant_pool_reference (op0);
5230 trueop1 = avoid_constant_pool_reference (op1);
5231
5232 /* For integer comparisons of A and B maybe we can simplify A - B and can
5233 then simplify a comparison of that with zero. If A and B are both either
5234 a register or a CONST_INT, this can't help; testing for these cases will
5235 prevent infinite recursion here and speed things up.
5236
5237 We can only do this for EQ and NE comparisons as otherwise we may
5238 lose or introduce overflow which we cannot disregard as undefined as
5239 we do not know the signedness of the operation on either the left or
5240 the right hand side of the comparison. */
5241
5242 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
5243 && (code == EQ || code == NE)
5244 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
5245 && (REG_P (op1) || CONST_INT_P (trueop1)))
5246 && (tem = simplify_binary_operation (MINUS, mode, op0, op1)) != 0
5247 /* We cannot do this if tem is a nonzero address. */
5248 && ! nonzero_address_p (tem))
5249 return simplify_const_relational_operation (signed_condition (code),
5250 mode, tem, const0_rtx);
5251
5252 if (! HONOR_NANS (mode) && code == ORDERED)
5253 return const_true_rtx;
5254
5255 if (! HONOR_NANS (mode) && code == UNORDERED)
5256 return const0_rtx;
5257
5258 /* For modes without NaNs, if the two operands are equal, we know the
5259 result except if they have side-effects. Even with NaNs we know
5260 the result of unordered comparisons and, if signaling NaNs are
5261 irrelevant, also the result of LT/GT/LTGT. */
5262 if ((! HONOR_NANS (trueop0)
5263 || code == UNEQ || code == UNLE || code == UNGE
5264 || ((code == LT || code == GT || code == LTGT)
5265 && ! HONOR_SNANS (trueop0)))
5266 && rtx_equal_p (trueop0, trueop1)
5267 && ! side_effects_p (trueop0))
5268 return comparison_result (code, CMP_EQ);
5269
5270 /* If the operands are floating-point constants, see if we can fold
5271 the result. */
5272 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
5273 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
5274 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
5275 {
5276 const REAL_VALUE_TYPE *d0 = CONST_DOUBLE_REAL_VALUE (trueop0);
5277 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
5278
5279 /* Comparisons are unordered iff at least one of the values is NaN. */
5280 if (REAL_VALUE_ISNAN (*d0) || REAL_VALUE_ISNAN (*d1))
5281 switch (code)
5282 {
5283 case UNEQ:
5284 case UNLT:
5285 case UNGT:
5286 case UNLE:
5287 case UNGE:
5288 case NE:
5289 case UNORDERED:
5290 return const_true_rtx;
5291 case EQ:
5292 case LT:
5293 case GT:
5294 case LE:
5295 case GE:
5296 case LTGT:
5297 case ORDERED:
5298 return const0_rtx;
5299 default:
5300 return 0;
5301 }
5302
5303 return comparison_result (code,
5304 (real_equal (d0, d1) ? CMP_EQ :
5305 real_less (d0, d1) ? CMP_LT : CMP_GT));
5306 }
5307
5308 /* Otherwise, see if the operands are both integers. */
5309 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
5310 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
5311 {
5312 /* It would be nice if we really had a mode here. However, the
5313 largest int representable on the target is as good as
5314 infinite. */
5315 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
5316 rtx_mode_t ptrueop0 = rtx_mode_t (trueop0, cmode);
5317 rtx_mode_t ptrueop1 = rtx_mode_t (trueop1, cmode);
5318
5319 if (wi::eq_p (ptrueop0, ptrueop1))
5320 return comparison_result (code, CMP_EQ);
5321 else
5322 {
5323 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
5324 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
5325 return comparison_result (code, cr);
5326 }
5327 }
5328
5329 /* Optimize comparisons with upper and lower bounds. */
5330 scalar_int_mode int_mode;
5331 if (CONST_INT_P (trueop1)
5332 && is_a <scalar_int_mode> (mode, &int_mode)
5333 && HWI_COMPUTABLE_MODE_P (int_mode)
5334 && !side_effects_p (trueop0))
5335 {
5336 int sign;
5337 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, int_mode);
5338 HOST_WIDE_INT val = INTVAL (trueop1);
5339 HOST_WIDE_INT mmin, mmax;
5340
5341 if (code == GEU
5342 || code == LEU
5343 || code == GTU
5344 || code == LTU)
5345 sign = 0;
5346 else
5347 sign = 1;
5348
5349 /* Get a reduced range if the sign bit is zero. */
5350 if (nonzero <= (GET_MODE_MASK (int_mode) >> 1))
5351 {
5352 mmin = 0;
5353 mmax = nonzero;
5354 }
5355 else
5356 {
5357 rtx mmin_rtx, mmax_rtx;
5358 get_mode_bounds (int_mode, sign, int_mode, &mmin_rtx, &mmax_rtx);
5359
5360 mmin = INTVAL (mmin_rtx);
5361 mmax = INTVAL (mmax_rtx);
5362 if (sign)
5363 {
5364 unsigned int sign_copies
5365 = num_sign_bit_copies (trueop0, int_mode);
5366
5367 mmin >>= (sign_copies - 1);
5368 mmax >>= (sign_copies - 1);
5369 }
5370 }
5371
5372 switch (code)
5373 {
5374 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5375 case GEU:
5376 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5377 return const_true_rtx;
5378 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5379 return const0_rtx;
5380 break;
5381 case GE:
5382 if (val <= mmin)
5383 return const_true_rtx;
5384 if (val > mmax)
5385 return const0_rtx;
5386 break;
5387
5388 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5389 case LEU:
5390 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5391 return const_true_rtx;
5392 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5393 return const0_rtx;
5394 break;
5395 case LE:
5396 if (val >= mmax)
5397 return const_true_rtx;
5398 if (val < mmin)
5399 return const0_rtx;
5400 break;
5401
5402 case EQ:
5403 /* x == y is always false for y out of range. */
5404 if (val < mmin || val > mmax)
5405 return const0_rtx;
5406 break;
5407
5408 /* x > y is always false for y >= mmax, always true for y < mmin. */
5409 case GTU:
5410 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5411 return const0_rtx;
5412 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5413 return const_true_rtx;
5414 break;
5415 case GT:
5416 if (val >= mmax)
5417 return const0_rtx;
5418 if (val < mmin)
5419 return const_true_rtx;
5420 break;
5421
5422 /* x < y is always false for y <= mmin, always true for y > mmax. */
5423 case LTU:
5424 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5425 return const0_rtx;
5426 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5427 return const_true_rtx;
5428 break;
5429 case LT:
5430 if (val <= mmin)
5431 return const0_rtx;
5432 if (val > mmax)
5433 return const_true_rtx;
5434 break;
5435
5436 case NE:
5437 /* x != y is always true for y out of range. */
5438 if (val < mmin || val > mmax)
5439 return const_true_rtx;
5440 break;
5441
5442 default:
5443 break;
5444 }
5445 }
5446
5447 /* Optimize integer comparisons with zero. */
5448 if (is_a <scalar_int_mode> (mode, &int_mode)
5449 && trueop1 == const0_rtx
5450 && !side_effects_p (trueop0))
5451 {
5452 /* Some addresses are known to be nonzero. We don't know
5453 their sign, but equality comparisons are known. */
5454 if (nonzero_address_p (trueop0))
5455 {
5456 if (code == EQ || code == LEU)
5457 return const0_rtx;
5458 if (code == NE || code == GTU)
5459 return const_true_rtx;
5460 }
5461
5462 /* See if the first operand is an IOR with a constant. If so, we
5463 may be able to determine the result of this comparison. */
5464 if (GET_CODE (op0) == IOR)
5465 {
5466 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5467 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5468 {
5469 int sign_bitnum = GET_MODE_PRECISION (int_mode) - 1;
5470 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5471 && (UINTVAL (inner_const)
5472 & (HOST_WIDE_INT_1U
5473 << sign_bitnum)));
5474
5475 switch (code)
5476 {
5477 case EQ:
5478 case LEU:
5479 return const0_rtx;
5480 case NE:
5481 case GTU:
5482 return const_true_rtx;
5483 case LT:
5484 case LE:
5485 if (has_sign)
5486 return const_true_rtx;
5487 break;
5488 case GT:
5489 case GE:
5490 if (has_sign)
5491 return const0_rtx;
5492 break;
5493 default:
5494 break;
5495 }
5496 }
5497 }
5498 }
5499
5500 /* Optimize comparison of ABS with zero. */
5501 if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
5502 && (GET_CODE (trueop0) == ABS
5503 || (GET_CODE (trueop0) == FLOAT_EXTEND
5504 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5505 {
5506 switch (code)
5507 {
5508 case LT:
5509 /* Optimize abs(x) < 0.0. */
5510 if (!INTEGRAL_MODE_P (mode) && !HONOR_SNANS (mode))
5511 return const0_rtx;
5512 break;
5513
5514 case GE:
5515 /* Optimize abs(x) >= 0.0. */
5516 if (!INTEGRAL_MODE_P (mode) && !HONOR_NANS (mode))
5517 return const_true_rtx;
5518 break;
5519
5520 case UNGE:
5521 /* Optimize ! (abs(x) < 0.0). */
5522 return const_true_rtx;
5523
5524 default:
5525 break;
5526 }
5527 }
5528
5529 return 0;
5530 }
5531
5532 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
5533 where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
5534 or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
5535 can be simplified to that or NULL_RTX if not.
5536 Assume X is compared against zero with CMP_CODE and the true
5537 arm is TRUE_VAL and the false arm is FALSE_VAL. */
5538
5539 static rtx
5540 simplify_cond_clz_ctz (rtx x, rtx_code cmp_code, rtx true_val, rtx false_val)
5541 {
5542 if (cmp_code != EQ && cmp_code != NE)
5543 return NULL_RTX;
5544
5545 /* Result on X == 0 and X !=0 respectively. */
5546 rtx on_zero, on_nonzero;
5547 if (cmp_code == EQ)
5548 {
5549 on_zero = true_val;
5550 on_nonzero = false_val;
5551 }
5552 else
5553 {
5554 on_zero = false_val;
5555 on_nonzero = true_val;
5556 }
5557
5558 rtx_code op_code = GET_CODE (on_nonzero);
5559 if ((op_code != CLZ && op_code != CTZ)
5560 || !rtx_equal_p (XEXP (on_nonzero, 0), x)
5561 || !CONST_INT_P (on_zero))
5562 return NULL_RTX;
5563
5564 HOST_WIDE_INT op_val;
5565 scalar_int_mode mode ATTRIBUTE_UNUSED
5566 = as_a <scalar_int_mode> (GET_MODE (XEXP (on_nonzero, 0)));
5567 if (((op_code == CLZ && CLZ_DEFINED_VALUE_AT_ZERO (mode, op_val))
5568 || (op_code == CTZ && CTZ_DEFINED_VALUE_AT_ZERO (mode, op_val)))
5569 && op_val == INTVAL (on_zero))
5570 return on_nonzero;
5571
5572 return NULL_RTX;
5573 }
5574
5575 \f
5576 /* Simplify CODE, an operation with result mode MODE and three operands,
5577 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5578 a constant. Return 0 if no simplifications is possible. */
5579
5580 rtx
5581 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5582 machine_mode op0_mode, rtx op0, rtx op1,
5583 rtx op2)
5584 {
5585 bool any_change = false;
5586 rtx tem, trueop2;
5587 scalar_int_mode int_mode, int_op0_mode;
5588 unsigned int n_elts;
5589
5590 switch (code)
5591 {
5592 case FMA:
5593 /* Simplify negations around the multiplication. */
5594 /* -a * -b + c => a * b + c. */
5595 if (GET_CODE (op0) == NEG)
5596 {
5597 tem = simplify_unary_operation (NEG, mode, op1, mode);
5598 if (tem)
5599 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5600 }
5601 else if (GET_CODE (op1) == NEG)
5602 {
5603 tem = simplify_unary_operation (NEG, mode, op0, mode);
5604 if (tem)
5605 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5606 }
5607
5608 /* Canonicalize the two multiplication operands. */
5609 /* a * -b + c => -b * a + c. */
5610 if (swap_commutative_operands_p (op0, op1))
5611 std::swap (op0, op1), any_change = true;
5612
5613 if (any_change)
5614 return gen_rtx_FMA (mode, op0, op1, op2);
5615 return NULL_RTX;
5616
5617 case SIGN_EXTRACT:
5618 case ZERO_EXTRACT:
5619 if (CONST_INT_P (op0)
5620 && CONST_INT_P (op1)
5621 && CONST_INT_P (op2)
5622 && is_a <scalar_int_mode> (mode, &int_mode)
5623 && INTVAL (op1) + INTVAL (op2) <= GET_MODE_PRECISION (int_mode)
5624 && HWI_COMPUTABLE_MODE_P (int_mode))
5625 {
5626 /* Extracting a bit-field from a constant */
5627 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5628 HOST_WIDE_INT op1val = INTVAL (op1);
5629 HOST_WIDE_INT op2val = INTVAL (op2);
5630 if (!BITS_BIG_ENDIAN)
5631 val >>= op2val;
5632 else if (is_a <scalar_int_mode> (op0_mode, &int_op0_mode))
5633 val >>= GET_MODE_PRECISION (int_op0_mode) - op2val - op1val;
5634 else
5635 /* Not enough information to calculate the bit position. */
5636 break;
5637
5638 if (HOST_BITS_PER_WIDE_INT != op1val)
5639 {
5640 /* First zero-extend. */
5641 val &= (HOST_WIDE_INT_1U << op1val) - 1;
5642 /* If desired, propagate sign bit. */
5643 if (code == SIGN_EXTRACT
5644 && (val & (HOST_WIDE_INT_1U << (op1val - 1)))
5645 != 0)
5646 val |= ~ ((HOST_WIDE_INT_1U << op1val) - 1);
5647 }
5648
5649 return gen_int_mode (val, int_mode);
5650 }
5651 break;
5652
5653 case IF_THEN_ELSE:
5654 if (CONST_INT_P (op0))
5655 return op0 != const0_rtx ? op1 : op2;
5656
5657 /* Convert c ? a : a into "a". */
5658 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5659 return op1;
5660
5661 /* Convert a != b ? a : b into "a". */
5662 if (GET_CODE (op0) == NE
5663 && ! side_effects_p (op0)
5664 && ! HONOR_NANS (mode)
5665 && ! HONOR_SIGNED_ZEROS (mode)
5666 && ((rtx_equal_p (XEXP (op0, 0), op1)
5667 && rtx_equal_p (XEXP (op0, 1), op2))
5668 || (rtx_equal_p (XEXP (op0, 0), op2)
5669 && rtx_equal_p (XEXP (op0, 1), op1))))
5670 return op1;
5671
5672 /* Convert a == b ? a : b into "b". */
5673 if (GET_CODE (op0) == EQ
5674 && ! side_effects_p (op0)
5675 && ! HONOR_NANS (mode)
5676 && ! HONOR_SIGNED_ZEROS (mode)
5677 && ((rtx_equal_p (XEXP (op0, 0), op1)
5678 && rtx_equal_p (XEXP (op0, 1), op2))
5679 || (rtx_equal_p (XEXP (op0, 0), op2)
5680 && rtx_equal_p (XEXP (op0, 1), op1))))
5681 return op2;
5682
5683 /* Convert (!c) != {0,...,0} ? a : b into
5684 c != {0,...,0} ? b : a for vector modes. */
5685 if (VECTOR_MODE_P (GET_MODE (op1))
5686 && GET_CODE (op0) == NE
5687 && GET_CODE (XEXP (op0, 0)) == NOT
5688 && GET_CODE (XEXP (op0, 1)) == CONST_VECTOR)
5689 {
5690 rtx cv = XEXP (op0, 1);
5691 int nunits;
5692 bool ok = true;
5693 if (!CONST_VECTOR_NUNITS (cv).is_constant (&nunits))
5694 ok = false;
5695 else
5696 for (int i = 0; i < nunits; ++i)
5697 if (CONST_VECTOR_ELT (cv, i) != const0_rtx)
5698 {
5699 ok = false;
5700 break;
5701 }
5702 if (ok)
5703 {
5704 rtx new_op0 = gen_rtx_NE (GET_MODE (op0),
5705 XEXP (XEXP (op0, 0), 0),
5706 XEXP (op0, 1));
5707 rtx retval = gen_rtx_IF_THEN_ELSE (mode, new_op0, op2, op1);
5708 return retval;
5709 }
5710 }
5711
5712 /* Convert x == 0 ? N : clz (x) into clz (x) when
5713 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
5714 Similarly for ctz (x). */
5715 if (COMPARISON_P (op0) && !side_effects_p (op0)
5716 && XEXP (op0, 1) == const0_rtx)
5717 {
5718 rtx simplified
5719 = simplify_cond_clz_ctz (XEXP (op0, 0), GET_CODE (op0),
5720 op1, op2);
5721 if (simplified)
5722 return simplified;
5723 }
5724
5725 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5726 {
5727 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5728 ? GET_MODE (XEXP (op0, 1))
5729 : GET_MODE (XEXP (op0, 0)));
5730 rtx temp;
5731
5732 /* Look for happy constants in op1 and op2. */
5733 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5734 {
5735 HOST_WIDE_INT t = INTVAL (op1);
5736 HOST_WIDE_INT f = INTVAL (op2);
5737
5738 if (t == STORE_FLAG_VALUE && f == 0)
5739 code = GET_CODE (op0);
5740 else if (t == 0 && f == STORE_FLAG_VALUE)
5741 {
5742 enum rtx_code tmp;
5743 tmp = reversed_comparison_code (op0, NULL);
5744 if (tmp == UNKNOWN)
5745 break;
5746 code = tmp;
5747 }
5748 else
5749 break;
5750
5751 return simplify_gen_relational (code, mode, cmp_mode,
5752 XEXP (op0, 0), XEXP (op0, 1));
5753 }
5754
5755 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5756 cmp_mode, XEXP (op0, 0),
5757 XEXP (op0, 1));
5758
5759 /* See if any simplifications were possible. */
5760 if (temp)
5761 {
5762 if (CONST_INT_P (temp))
5763 return temp == const0_rtx ? op2 : op1;
5764 else if (temp)
5765 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5766 }
5767 }
5768 break;
5769
5770 case VEC_MERGE:
5771 gcc_assert (GET_MODE (op0) == mode);
5772 gcc_assert (GET_MODE (op1) == mode);
5773 gcc_assert (VECTOR_MODE_P (mode));
5774 trueop2 = avoid_constant_pool_reference (op2);
5775 if (CONST_INT_P (trueop2)
5776 && GET_MODE_NUNITS (mode).is_constant (&n_elts))
5777 {
5778 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5779 unsigned HOST_WIDE_INT mask;
5780 if (n_elts == HOST_BITS_PER_WIDE_INT)
5781 mask = -1;
5782 else
5783 mask = (HOST_WIDE_INT_1U << n_elts) - 1;
5784
5785 if (!(sel & mask) && !side_effects_p (op0))
5786 return op1;
5787 if ((sel & mask) == mask && !side_effects_p (op1))
5788 return op0;
5789
5790 rtx trueop0 = avoid_constant_pool_reference (op0);
5791 rtx trueop1 = avoid_constant_pool_reference (op1);
5792 if (GET_CODE (trueop0) == CONST_VECTOR
5793 && GET_CODE (trueop1) == CONST_VECTOR)
5794 {
5795 rtvec v = rtvec_alloc (n_elts);
5796 unsigned int i;
5797
5798 for (i = 0; i < n_elts; i++)
5799 RTVEC_ELT (v, i) = ((sel & (HOST_WIDE_INT_1U << i))
5800 ? CONST_VECTOR_ELT (trueop0, i)
5801 : CONST_VECTOR_ELT (trueop1, i));
5802 return gen_rtx_CONST_VECTOR (mode, v);
5803 }
5804
5805 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5806 if no element from a appears in the result. */
5807 if (GET_CODE (op0) == VEC_MERGE)
5808 {
5809 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5810 if (CONST_INT_P (tem))
5811 {
5812 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5813 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5814 return simplify_gen_ternary (code, mode, mode,
5815 XEXP (op0, 1), op1, op2);
5816 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5817 return simplify_gen_ternary (code, mode, mode,
5818 XEXP (op0, 0), op1, op2);
5819 }
5820 }
5821 if (GET_CODE (op1) == VEC_MERGE)
5822 {
5823 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5824 if (CONST_INT_P (tem))
5825 {
5826 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5827 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5828 return simplify_gen_ternary (code, mode, mode,
5829 op0, XEXP (op1, 1), op2);
5830 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5831 return simplify_gen_ternary (code, mode, mode,
5832 op0, XEXP (op1, 0), op2);
5833 }
5834 }
5835
5836 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5837 with a. */
5838 if (GET_CODE (op0) == VEC_DUPLICATE
5839 && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
5840 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
5841 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (op0, 0))), 1))
5842 {
5843 tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
5844 if (CONST_INT_P (tem) && CONST_INT_P (op2))
5845 {
5846 if (XEXP (XEXP (op0, 0), 0) == op1
5847 && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
5848 return op1;
5849 }
5850 }
5851 /* Replace (vec_merge (vec_duplicate (X)) (const_vector [A, B])
5852 (const_int N))
5853 with (vec_concat (X) (B)) if N == 1 or
5854 (vec_concat (A) (X)) if N == 2. */
5855 if (GET_CODE (op0) == VEC_DUPLICATE
5856 && GET_CODE (op1) == CONST_VECTOR
5857 && known_eq (CONST_VECTOR_NUNITS (op1), 2)
5858 && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
5859 && IN_RANGE (sel, 1, 2))
5860 {
5861 rtx newop0 = XEXP (op0, 0);
5862 rtx newop1 = CONST_VECTOR_ELT (op1, 2 - sel);
5863 if (sel == 2)
5864 std::swap (newop0, newop1);
5865 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
5866 }
5867 /* Replace (vec_merge (vec_duplicate x) (vec_concat (y) (z)) (const_int N))
5868 with (vec_concat x z) if N == 1, or (vec_concat y x) if N == 2.
5869 Only applies for vectors of two elements. */
5870 if (GET_CODE (op0) == VEC_DUPLICATE
5871 && GET_CODE (op1) == VEC_CONCAT
5872 && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
5873 && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
5874 && IN_RANGE (sel, 1, 2))
5875 {
5876 rtx newop0 = XEXP (op0, 0);
5877 rtx newop1 = XEXP (op1, 2 - sel);
5878 rtx otherop = XEXP (op1, sel - 1);
5879 if (sel == 2)
5880 std::swap (newop0, newop1);
5881 /* Don't want to throw away the other part of the vec_concat if
5882 it has side-effects. */
5883 if (!side_effects_p (otherop))
5884 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
5885 }
5886
5887 /* Replace:
5888
5889 (vec_merge:outer (vec_duplicate:outer x:inner)
5890 (subreg:outer y:inner 0)
5891 (const_int N))
5892
5893 with (vec_concat:outer x:inner y:inner) if N == 1,
5894 or (vec_concat:outer y:inner x:inner) if N == 2.
5895 We assume that degenrate cases (N == 0 or N == 3), which
5896 represent taking all elements from either input, are handled
5897 elsewhere.
5898
5899 Implicitly, this means we have a paradoxical subreg, but such
5900 a check is cheap, so make it anyway.
5901
5902 Only applies for vectors of two elements. */
5903
5904 if ((GET_CODE (op0) == VEC_DUPLICATE
5905 || GET_CODE (op1) == VEC_DUPLICATE)
5906 && GET_MODE (op0) == GET_MODE (op1)
5907 && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
5908 && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
5909 && IN_RANGE (sel, 1, 2))
5910 {
5911 rtx newop0 = op0, newop1 = op1;
5912
5913 /* Canonicalize locally such that the VEC_DUPLICATE is always
5914 the first operand. */
5915 if (GET_CODE (newop1) == VEC_DUPLICATE)
5916 {
5917 std::swap (newop0, newop1);
5918 /* If we swap the operand order, we also need to swap
5919 the selector mask. */
5920 sel = sel == 1 ? 2 : 1;
5921 }
5922
5923 if (GET_CODE (newop1) == SUBREG
5924 && paradoxical_subreg_p (newop1)
5925 && subreg_lowpart_p (newop1)
5926 && GET_MODE (SUBREG_REG (newop1))
5927 == GET_MODE (XEXP (newop0, 0)))
5928 {
5929 newop0 = XEXP (newop0, 0);
5930 newop1 = SUBREG_REG (newop1);
5931 if (sel == 2)
5932 std::swap (newop0, newop1);
5933 return simplify_gen_binary (VEC_CONCAT, mode,
5934 newop0, newop1);
5935 }
5936 }
5937
5938 /* Replace (vec_merge (vec_duplicate x) (vec_duplicate y)
5939 (const_int n))
5940 with (vec_concat x y) or (vec_concat y x) depending on value
5941 of N. */
5942 if (GET_CODE (op0) == VEC_DUPLICATE
5943 && GET_CODE (op1) == VEC_DUPLICATE
5944 && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
5945 && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
5946 && IN_RANGE (sel, 1, 2))
5947 {
5948 rtx newop0 = XEXP (op0, 0);
5949 rtx newop1 = XEXP (op1, 0);
5950 if (sel == 2)
5951 std::swap (newop0, newop1);
5952
5953 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
5954 }
5955 }
5956
5957 if (rtx_equal_p (op0, op1)
5958 && !side_effects_p (op2) && !side_effects_p (op1))
5959 return op0;
5960
5961 break;
5962
5963 default:
5964 gcc_unreachable ();
5965 }
5966
5967 return 0;
5968 }
5969
5970 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5971 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5972 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5973
5974 Works by unpacking OP into a collection of 8-bit values
5975 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5976 and then repacking them again for OUTERMODE. */
5977
5978 static rtx
5979 simplify_immed_subreg (fixed_size_mode outermode, rtx op,
5980 fixed_size_mode innermode, unsigned int byte)
5981 {
5982 enum {
5983 value_bit = 8,
5984 value_mask = (1 << value_bit) - 1
5985 };
5986 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
5987 int value_start;
5988 int i;
5989 int elem;
5990
5991 int num_elem;
5992 rtx * elems;
5993 int elem_bitsize;
5994 rtx result_s = NULL;
5995 rtvec result_v = NULL;
5996 enum mode_class outer_class;
5997 scalar_mode outer_submode;
5998 int max_bitsize;
5999
6000 /* Some ports misuse CCmode. */
6001 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
6002 return op;
6003
6004 /* We have no way to represent a complex constant at the rtl level. */
6005 if (COMPLEX_MODE_P (outermode))
6006 return NULL_RTX;
6007
6008 /* We support any size mode. */
6009 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
6010 GET_MODE_BITSIZE (innermode));
6011
6012 /* Unpack the value. */
6013
6014 if (GET_CODE (op) == CONST_VECTOR)
6015 {
6016 num_elem = GET_MODE_NUNITS (innermode);
6017 elem_bitsize = GET_MODE_UNIT_BITSIZE (innermode);
6018 }
6019 else
6020 {
6021 num_elem = 1;
6022 elem_bitsize = max_bitsize;
6023 }
6024 /* If this asserts, it is too complicated; reducing value_bit may help. */
6025 gcc_assert (BITS_PER_UNIT % value_bit == 0);
6026 /* I don't know how to handle endianness of sub-units. */
6027 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
6028
6029 for (elem = 0; elem < num_elem; elem++)
6030 {
6031 unsigned char * vp;
6032 rtx el = (GET_CODE (op) == CONST_VECTOR
6033 ? CONST_VECTOR_ELT (op, elem)
6034 : op);
6035
6036 /* Vectors are kept in target memory order. (This is probably
6037 a mistake.) */
6038 {
6039 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
6040 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
6041 / BITS_PER_UNIT);
6042 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
6043 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
6044 unsigned bytele = (subword_byte % UNITS_PER_WORD
6045 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
6046 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
6047 }
6048
6049 switch (GET_CODE (el))
6050 {
6051 case CONST_INT:
6052 for (i = 0;
6053 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
6054 i += value_bit)
6055 *vp++ = INTVAL (el) >> i;
6056 /* CONST_INTs are always logically sign-extended. */
6057 for (; i < elem_bitsize; i += value_bit)
6058 *vp++ = INTVAL (el) < 0 ? -1 : 0;
6059 break;
6060
6061 case CONST_WIDE_INT:
6062 {
6063 rtx_mode_t val = rtx_mode_t (el, GET_MODE_INNER (innermode));
6064 unsigned char extend = wi::sign_mask (val);
6065 int prec = wi::get_precision (val);
6066
6067 for (i = 0; i < prec && i < elem_bitsize; i += value_bit)
6068 *vp++ = wi::extract_uhwi (val, i, value_bit);
6069 for (; i < elem_bitsize; i += value_bit)
6070 *vp++ = extend;
6071 }
6072 break;
6073
6074 case CONST_DOUBLE:
6075 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
6076 {
6077 unsigned char extend = 0;
6078 /* If this triggers, someone should have generated a
6079 CONST_INT instead. */
6080 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
6081
6082 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
6083 *vp++ = CONST_DOUBLE_LOW (el) >> i;
6084 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
6085 {
6086 *vp++
6087 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
6088 i += value_bit;
6089 }
6090
6091 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
6092 extend = -1;
6093 for (; i < elem_bitsize; i += value_bit)
6094 *vp++ = extend;
6095 }
6096 else
6097 {
6098 /* This is big enough for anything on the platform. */
6099 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
6100 scalar_float_mode el_mode;
6101
6102 el_mode = as_a <scalar_float_mode> (GET_MODE (el));
6103 int bitsize = GET_MODE_BITSIZE (el_mode);
6104
6105 gcc_assert (bitsize <= elem_bitsize);
6106 gcc_assert (bitsize % value_bit == 0);
6107
6108 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
6109 GET_MODE (el));
6110
6111 /* real_to_target produces its result in words affected by
6112 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
6113 and use WORDS_BIG_ENDIAN instead; see the documentation
6114 of SUBREG in rtl.texi. */
6115 for (i = 0; i < bitsize; i += value_bit)
6116 {
6117 int ibase;
6118 if (WORDS_BIG_ENDIAN)
6119 ibase = bitsize - 1 - i;
6120 else
6121 ibase = i;
6122 *vp++ = tmp[ibase / 32] >> i % 32;
6123 }
6124
6125 /* It shouldn't matter what's done here, so fill it with
6126 zero. */
6127 for (; i < elem_bitsize; i += value_bit)
6128 *vp++ = 0;
6129 }
6130 break;
6131
6132 case CONST_FIXED:
6133 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
6134 {
6135 for (i = 0; i < elem_bitsize; i += value_bit)
6136 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
6137 }
6138 else
6139 {
6140 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
6141 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
6142 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
6143 i += value_bit)
6144 *vp++ = CONST_FIXED_VALUE_HIGH (el)
6145 >> (i - HOST_BITS_PER_WIDE_INT);
6146 for (; i < elem_bitsize; i += value_bit)
6147 *vp++ = 0;
6148 }
6149 break;
6150
6151 default:
6152 gcc_unreachable ();
6153 }
6154 }
6155
6156 /* Now, pick the right byte to start with. */
6157 /* Renumber BYTE so that the least-significant byte is byte 0. A special
6158 case is paradoxical SUBREGs, which shouldn't be adjusted since they
6159 will already have offset 0. */
6160 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
6161 {
6162 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
6163 - byte);
6164 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
6165 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
6166 byte = (subword_byte % UNITS_PER_WORD
6167 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
6168 }
6169
6170 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
6171 so if it's become negative it will instead be very large.) */
6172 gcc_assert (byte < GET_MODE_SIZE (innermode));
6173
6174 /* Convert from bytes to chunks of size value_bit. */
6175 value_start = byte * (BITS_PER_UNIT / value_bit);
6176
6177 /* Re-pack the value. */
6178 num_elem = GET_MODE_NUNITS (outermode);
6179
6180 if (VECTOR_MODE_P (outermode))
6181 {
6182 result_v = rtvec_alloc (num_elem);
6183 elems = &RTVEC_ELT (result_v, 0);
6184 }
6185 else
6186 elems = &result_s;
6187
6188 outer_submode = GET_MODE_INNER (outermode);
6189 outer_class = GET_MODE_CLASS (outer_submode);
6190 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
6191
6192 gcc_assert (elem_bitsize % value_bit == 0);
6193 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
6194
6195 for (elem = 0; elem < num_elem; elem++)
6196 {
6197 unsigned char *vp;
6198
6199 /* Vectors are stored in target memory order. (This is probably
6200 a mistake.) */
6201 {
6202 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
6203 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
6204 / BITS_PER_UNIT);
6205 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
6206 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
6207 unsigned bytele = (subword_byte % UNITS_PER_WORD
6208 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
6209 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
6210 }
6211
6212 switch (outer_class)
6213 {
6214 case MODE_INT:
6215 case MODE_PARTIAL_INT:
6216 {
6217 int u;
6218 int base = 0;
6219 int units
6220 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
6221 / HOST_BITS_PER_WIDE_INT;
6222 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
6223 wide_int r;
6224
6225 if (GET_MODE_PRECISION (outer_submode) > MAX_BITSIZE_MODE_ANY_INT)
6226 return NULL_RTX;
6227 for (u = 0; u < units; u++)
6228 {
6229 unsigned HOST_WIDE_INT buf = 0;
6230 for (i = 0;
6231 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
6232 i += value_bit)
6233 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
6234
6235 tmp[u] = buf;
6236 base += HOST_BITS_PER_WIDE_INT;
6237 }
6238 r = wide_int::from_array (tmp, units,
6239 GET_MODE_PRECISION (outer_submode));
6240 #if TARGET_SUPPORTS_WIDE_INT == 0
6241 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
6242 if (wi::min_precision (r, SIGNED) > HOST_BITS_PER_DOUBLE_INT)
6243 return NULL_RTX;
6244 #endif
6245 elems[elem] = immed_wide_int_const (r, outer_submode);
6246 }
6247 break;
6248
6249 case MODE_FLOAT:
6250 case MODE_DECIMAL_FLOAT:
6251 {
6252 REAL_VALUE_TYPE r;
6253 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32] = { 0 };
6254
6255 /* real_from_target wants its input in words affected by
6256 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
6257 and use WORDS_BIG_ENDIAN instead; see the documentation
6258 of SUBREG in rtl.texi. */
6259 for (i = 0; i < elem_bitsize; i += value_bit)
6260 {
6261 int ibase;
6262 if (WORDS_BIG_ENDIAN)
6263 ibase = elem_bitsize - 1 - i;
6264 else
6265 ibase = i;
6266 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
6267 }
6268
6269 real_from_target (&r, tmp, outer_submode);
6270 elems[elem] = const_double_from_real_value (r, outer_submode);
6271 }
6272 break;
6273
6274 case MODE_FRACT:
6275 case MODE_UFRACT:
6276 case MODE_ACCUM:
6277 case MODE_UACCUM:
6278 {
6279 FIXED_VALUE_TYPE f;
6280 f.data.low = 0;
6281 f.data.high = 0;
6282 f.mode = outer_submode;
6283
6284 for (i = 0;
6285 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
6286 i += value_bit)
6287 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
6288 for (; i < elem_bitsize; i += value_bit)
6289 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
6290 << (i - HOST_BITS_PER_WIDE_INT));
6291
6292 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
6293 }
6294 break;
6295
6296 default:
6297 gcc_unreachable ();
6298 }
6299 }
6300 if (VECTOR_MODE_P (outermode))
6301 return gen_rtx_CONST_VECTOR (outermode, result_v);
6302 else
6303 return result_s;
6304 }
6305
6306 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
6307 Return 0 if no simplifications are possible. */
6308 rtx
6309 simplify_subreg (machine_mode outermode, rtx op,
6310 machine_mode innermode, poly_uint64 byte)
6311 {
6312 /* Little bit of sanity checking. */
6313 gcc_assert (innermode != VOIDmode);
6314 gcc_assert (outermode != VOIDmode);
6315 gcc_assert (innermode != BLKmode);
6316 gcc_assert (outermode != BLKmode);
6317
6318 gcc_assert (GET_MODE (op) == innermode
6319 || GET_MODE (op) == VOIDmode);
6320
6321 poly_uint64 outersize = GET_MODE_SIZE (outermode);
6322 if (!multiple_p (byte, outersize))
6323 return NULL_RTX;
6324
6325 poly_uint64 innersize = GET_MODE_SIZE (innermode);
6326 if (maybe_ge (byte, innersize))
6327 return NULL_RTX;
6328
6329 if (outermode == innermode && known_eq (byte, 0U))
6330 return op;
6331
6332 if (multiple_p (byte, GET_MODE_UNIT_SIZE (innermode)))
6333 {
6334 rtx elt;
6335
6336 if (VECTOR_MODE_P (outermode)
6337 && GET_MODE_INNER (outermode) == GET_MODE_INNER (innermode)
6338 && vec_duplicate_p (op, &elt))
6339 return gen_vec_duplicate (outermode, elt);
6340
6341 if (outermode == GET_MODE_INNER (innermode)
6342 && vec_duplicate_p (op, &elt))
6343 return elt;
6344 }
6345
6346 if (CONST_SCALAR_INT_P (op)
6347 || CONST_DOUBLE_AS_FLOAT_P (op)
6348 || CONST_FIXED_P (op)
6349 || GET_CODE (op) == CONST_VECTOR)
6350 {
6351 /* simplify_immed_subreg deconstructs OP into bytes and constructs
6352 the result from bytes, so it only works if the sizes of the modes
6353 and the value of the offset are known at compile time. Cases that
6354 that apply to general modes and offsets should be handled here
6355 before calling simplify_immed_subreg. */
6356 fixed_size_mode fs_outermode, fs_innermode;
6357 unsigned HOST_WIDE_INT cbyte;
6358 if (is_a <fixed_size_mode> (outermode, &fs_outermode)
6359 && is_a <fixed_size_mode> (innermode, &fs_innermode)
6360 && byte.is_constant (&cbyte))
6361 return simplify_immed_subreg (fs_outermode, op, fs_innermode, cbyte);
6362
6363 return NULL_RTX;
6364 }
6365
6366 /* Changing mode twice with SUBREG => just change it once,
6367 or not at all if changing back op starting mode. */
6368 if (GET_CODE (op) == SUBREG)
6369 {
6370 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
6371 poly_uint64 innermostsize = GET_MODE_SIZE (innermostmode);
6372 rtx newx;
6373
6374 if (outermode == innermostmode
6375 && known_eq (byte, 0U)
6376 && known_eq (SUBREG_BYTE (op), 0))
6377 return SUBREG_REG (op);
6378
6379 /* Work out the memory offset of the final OUTERMODE value relative
6380 to the inner value of OP. */
6381 poly_int64 mem_offset = subreg_memory_offset (outermode,
6382 innermode, byte);
6383 poly_int64 op_mem_offset = subreg_memory_offset (op);
6384 poly_int64 final_offset = mem_offset + op_mem_offset;
6385
6386 /* See whether resulting subreg will be paradoxical. */
6387 if (!paradoxical_subreg_p (outermode, innermostmode))
6388 {
6389 /* Bail out in case resulting subreg would be incorrect. */
6390 if (maybe_lt (final_offset, 0)
6391 || maybe_ge (poly_uint64 (final_offset), innermostsize)
6392 || !multiple_p (final_offset, outersize))
6393 return NULL_RTX;
6394 }
6395 else
6396 {
6397 poly_int64 required_offset = subreg_memory_offset (outermode,
6398 innermostmode, 0);
6399 if (maybe_ne (final_offset, required_offset))
6400 return NULL_RTX;
6401 /* Paradoxical subregs always have byte offset 0. */
6402 final_offset = 0;
6403 }
6404
6405 /* Recurse for further possible simplifications. */
6406 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
6407 final_offset);
6408 if (newx)
6409 return newx;
6410 if (validate_subreg (outermode, innermostmode,
6411 SUBREG_REG (op), final_offset))
6412 {
6413 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
6414 if (SUBREG_PROMOTED_VAR_P (op)
6415 && SUBREG_PROMOTED_SIGN (op) >= 0
6416 && GET_MODE_CLASS (outermode) == MODE_INT
6417 && known_ge (outersize, innersize)
6418 && known_le (outersize, innermostsize)
6419 && subreg_lowpart_p (newx))
6420 {
6421 SUBREG_PROMOTED_VAR_P (newx) = 1;
6422 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
6423 }
6424 return newx;
6425 }
6426 return NULL_RTX;
6427 }
6428
6429 /* SUBREG of a hard register => just change the register number
6430 and/or mode. If the hard register is not valid in that mode,
6431 suppress this simplification. If the hard register is the stack,
6432 frame, or argument pointer, leave this as a SUBREG. */
6433
6434 if (REG_P (op) && HARD_REGISTER_P (op))
6435 {
6436 unsigned int regno, final_regno;
6437
6438 regno = REGNO (op);
6439 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
6440 if (HARD_REGISTER_NUM_P (final_regno))
6441 {
6442 rtx x = gen_rtx_REG_offset (op, outermode, final_regno,
6443 subreg_memory_offset (outermode,
6444 innermode, byte));
6445
6446 /* Propagate original regno. We don't have any way to specify
6447 the offset inside original regno, so do so only for lowpart.
6448 The information is used only by alias analysis that can not
6449 grog partial register anyway. */
6450
6451 if (known_eq (subreg_lowpart_offset (outermode, innermode), byte))
6452 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
6453 return x;
6454 }
6455 }
6456
6457 /* If we have a SUBREG of a register that we are replacing and we are
6458 replacing it with a MEM, make a new MEM and try replacing the
6459 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6460 or if we would be widening it. */
6461
6462 if (MEM_P (op)
6463 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
6464 /* Allow splitting of volatile memory references in case we don't
6465 have instruction to move the whole thing. */
6466 && (! MEM_VOLATILE_P (op)
6467 || ! have_insn_for (SET, innermode))
6468 && known_le (outersize, innersize))
6469 return adjust_address_nv (op, outermode, byte);
6470
6471 /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
6472 of two parts. */
6473 if (GET_CODE (op) == CONCAT
6474 || GET_CODE (op) == VEC_CONCAT)
6475 {
6476 poly_uint64 final_offset;
6477 rtx part, res;
6478
6479 machine_mode part_mode = GET_MODE (XEXP (op, 0));
6480 if (part_mode == VOIDmode)
6481 part_mode = GET_MODE_INNER (GET_MODE (op));
6482 poly_uint64 part_size = GET_MODE_SIZE (part_mode);
6483 if (known_lt (byte, part_size))
6484 {
6485 part = XEXP (op, 0);
6486 final_offset = byte;
6487 }
6488 else if (known_ge (byte, part_size))
6489 {
6490 part = XEXP (op, 1);
6491 final_offset = byte - part_size;
6492 }
6493 else
6494 return NULL_RTX;
6495
6496 if (maybe_gt (final_offset + outersize, part_size))
6497 return NULL_RTX;
6498
6499 part_mode = GET_MODE (part);
6500 if (part_mode == VOIDmode)
6501 part_mode = GET_MODE_INNER (GET_MODE (op));
6502 res = simplify_subreg (outermode, part, part_mode, final_offset);
6503 if (res)
6504 return res;
6505 if (validate_subreg (outermode, part_mode, part, final_offset))
6506 return gen_rtx_SUBREG (outermode, part, final_offset);
6507 return NULL_RTX;
6508 }
6509
6510 /* A SUBREG resulting from a zero extension may fold to zero if
6511 it extracts higher bits that the ZERO_EXTEND's source bits. */
6512 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
6513 {
6514 poly_uint64 bitpos = subreg_lsb_1 (outermode, innermode, byte);
6515 if (known_ge (bitpos, GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))))
6516 return CONST0_RTX (outermode);
6517 }
6518
6519 scalar_int_mode int_outermode, int_innermode;
6520 if (is_a <scalar_int_mode> (outermode, &int_outermode)
6521 && is_a <scalar_int_mode> (innermode, &int_innermode)
6522 && known_eq (byte, subreg_lowpart_offset (int_outermode, int_innermode)))
6523 {
6524 /* Handle polynomial integers. The upper bits of a paradoxical
6525 subreg are undefined, so this is safe regardless of whether
6526 we're truncating or extending. */
6527 if (CONST_POLY_INT_P (op))
6528 {
6529 poly_wide_int val
6530 = poly_wide_int::from (const_poly_int_value (op),
6531 GET_MODE_PRECISION (int_outermode),
6532 SIGNED);
6533 return immed_wide_int_const (val, int_outermode);
6534 }
6535
6536 if (GET_MODE_PRECISION (int_outermode)
6537 < GET_MODE_PRECISION (int_innermode))
6538 {
6539 rtx tem = simplify_truncation (int_outermode, op, int_innermode);
6540 if (tem)
6541 return tem;
6542 }
6543 }
6544
6545 return NULL_RTX;
6546 }
6547
6548 /* Make a SUBREG operation or equivalent if it folds. */
6549
6550 rtx
6551 simplify_gen_subreg (machine_mode outermode, rtx op,
6552 machine_mode innermode, poly_uint64 byte)
6553 {
6554 rtx newx;
6555
6556 newx = simplify_subreg (outermode, op, innermode, byte);
6557 if (newx)
6558 return newx;
6559
6560 if (GET_CODE (op) == SUBREG
6561 || GET_CODE (op) == CONCAT
6562 || GET_MODE (op) == VOIDmode)
6563 return NULL_RTX;
6564
6565 if (validate_subreg (outermode, innermode, op, byte))
6566 return gen_rtx_SUBREG (outermode, op, byte);
6567
6568 return NULL_RTX;
6569 }
6570
6571 /* Generates a subreg to get the least significant part of EXPR (in mode
6572 INNER_MODE) to OUTER_MODE. */
6573
6574 rtx
6575 lowpart_subreg (machine_mode outer_mode, rtx expr,
6576 machine_mode inner_mode)
6577 {
6578 return simplify_gen_subreg (outer_mode, expr, inner_mode,
6579 subreg_lowpart_offset (outer_mode, inner_mode));
6580 }
6581
6582 /* Simplify X, an rtx expression.
6583
6584 Return the simplified expression or NULL if no simplifications
6585 were possible.
6586
6587 This is the preferred entry point into the simplification routines;
6588 however, we still allow passes to call the more specific routines.
6589
6590 Right now GCC has three (yes, three) major bodies of RTL simplification
6591 code that need to be unified.
6592
6593 1. fold_rtx in cse.c. This code uses various CSE specific
6594 information to aid in RTL simplification.
6595
6596 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6597 it uses combine specific information to aid in RTL
6598 simplification.
6599
6600 3. The routines in this file.
6601
6602
6603 Long term we want to only have one body of simplification code; to
6604 get to that state I recommend the following steps:
6605
6606 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6607 which are not pass dependent state into these routines.
6608
6609 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6610 use this routine whenever possible.
6611
6612 3. Allow for pass dependent state to be provided to these
6613 routines and add simplifications based on the pass dependent
6614 state. Remove code from cse.c & combine.c that becomes
6615 redundant/dead.
6616
6617 It will take time, but ultimately the compiler will be easier to
6618 maintain and improve. It's totally silly that when we add a
6619 simplification that it needs to be added to 4 places (3 for RTL
6620 simplification and 1 for tree simplification. */
6621
6622 rtx
6623 simplify_rtx (const_rtx x)
6624 {
6625 const enum rtx_code code = GET_CODE (x);
6626 const machine_mode mode = GET_MODE (x);
6627
6628 switch (GET_RTX_CLASS (code))
6629 {
6630 case RTX_UNARY:
6631 return simplify_unary_operation (code, mode,
6632 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6633 case RTX_COMM_ARITH:
6634 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6635 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6636
6637 /* Fall through. */
6638
6639 case RTX_BIN_ARITH:
6640 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6641
6642 case RTX_TERNARY:
6643 case RTX_BITFIELD_OPS:
6644 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6645 XEXP (x, 0), XEXP (x, 1),
6646 XEXP (x, 2));
6647
6648 case RTX_COMPARE:
6649 case RTX_COMM_COMPARE:
6650 return simplify_relational_operation (code, mode,
6651 ((GET_MODE (XEXP (x, 0))
6652 != VOIDmode)
6653 ? GET_MODE (XEXP (x, 0))
6654 : GET_MODE (XEXP (x, 1))),
6655 XEXP (x, 0),
6656 XEXP (x, 1));
6657
6658 case RTX_EXTRA:
6659 if (code == SUBREG)
6660 return simplify_subreg (mode, SUBREG_REG (x),
6661 GET_MODE (SUBREG_REG (x)),
6662 SUBREG_BYTE (x));
6663 break;
6664
6665 case RTX_OBJ:
6666 if (code == LO_SUM)
6667 {
6668 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6669 if (GET_CODE (XEXP (x, 0)) == HIGH
6670 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6671 return XEXP (x, 1);
6672 }
6673 break;
6674
6675 default:
6676 break;
6677 }
6678 return NULL;
6679 }
6680
6681 #if CHECKING_P
6682
6683 namespace selftest {
6684
6685 /* Make a unique pseudo REG of mode MODE for use by selftests. */
6686
6687 static rtx
6688 make_test_reg (machine_mode mode)
6689 {
6690 static int test_reg_num = LAST_VIRTUAL_REGISTER + 1;
6691
6692 return gen_rtx_REG (mode, test_reg_num++);
6693 }
6694
6695 /* Test vector simplifications involving VEC_DUPLICATE in which the
6696 operands and result have vector mode MODE. SCALAR_REG is a pseudo
6697 register that holds one element of MODE. */
6698
6699 static void
6700 test_vector_ops_duplicate (machine_mode mode, rtx scalar_reg)
6701 {
6702 scalar_mode inner_mode = GET_MODE_INNER (mode);
6703 rtx duplicate = gen_rtx_VEC_DUPLICATE (mode, scalar_reg);
6704 poly_uint64 nunits = GET_MODE_NUNITS (mode);
6705 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
6706 {
6707 /* Test some simple unary cases with VEC_DUPLICATE arguments. */
6708 rtx not_scalar_reg = gen_rtx_NOT (inner_mode, scalar_reg);
6709 rtx duplicate_not = gen_rtx_VEC_DUPLICATE (mode, not_scalar_reg);
6710 ASSERT_RTX_EQ (duplicate,
6711 simplify_unary_operation (NOT, mode,
6712 duplicate_not, mode));
6713
6714 rtx neg_scalar_reg = gen_rtx_NEG (inner_mode, scalar_reg);
6715 rtx duplicate_neg = gen_rtx_VEC_DUPLICATE (mode, neg_scalar_reg);
6716 ASSERT_RTX_EQ (duplicate,
6717 simplify_unary_operation (NEG, mode,
6718 duplicate_neg, mode));
6719
6720 /* Test some simple binary cases with VEC_DUPLICATE arguments. */
6721 ASSERT_RTX_EQ (duplicate,
6722 simplify_binary_operation (PLUS, mode, duplicate,
6723 CONST0_RTX (mode)));
6724
6725 ASSERT_RTX_EQ (duplicate,
6726 simplify_binary_operation (MINUS, mode, duplicate,
6727 CONST0_RTX (mode)));
6728
6729 ASSERT_RTX_PTR_EQ (CONST0_RTX (mode),
6730 simplify_binary_operation (MINUS, mode, duplicate,
6731 duplicate));
6732 }
6733
6734 /* Test a scalar VEC_SELECT of a VEC_DUPLICATE. */
6735 rtx zero_par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
6736 ASSERT_RTX_PTR_EQ (scalar_reg,
6737 simplify_binary_operation (VEC_SELECT, inner_mode,
6738 duplicate, zero_par));
6739
6740 /* And again with the final element. */
6741 unsigned HOST_WIDE_INT const_nunits;
6742 if (nunits.is_constant (&const_nunits))
6743 {
6744 rtx last_index = gen_int_mode (const_nunits - 1, word_mode);
6745 rtx last_par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, last_index));
6746 ASSERT_RTX_PTR_EQ (scalar_reg,
6747 simplify_binary_operation (VEC_SELECT, inner_mode,
6748 duplicate, last_par));
6749 }
6750
6751 /* Test a scalar subreg of a VEC_DUPLICATE. */
6752 poly_uint64 offset = subreg_lowpart_offset (inner_mode, mode);
6753 ASSERT_RTX_EQ (scalar_reg,
6754 simplify_gen_subreg (inner_mode, duplicate,
6755 mode, offset));
6756
6757 machine_mode narrower_mode;
6758 if (maybe_ne (nunits, 2U)
6759 && multiple_p (nunits, 2)
6760 && mode_for_vector (inner_mode, 2).exists (&narrower_mode)
6761 && VECTOR_MODE_P (narrower_mode))
6762 {
6763 /* Test VEC_SELECT of a vector. */
6764 rtx vec_par
6765 = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, const1_rtx, const0_rtx));
6766 rtx narrower_duplicate
6767 = gen_rtx_VEC_DUPLICATE (narrower_mode, scalar_reg);
6768 ASSERT_RTX_EQ (narrower_duplicate,
6769 simplify_binary_operation (VEC_SELECT, narrower_mode,
6770 duplicate, vec_par));
6771
6772 /* Test a vector subreg of a VEC_DUPLICATE. */
6773 poly_uint64 offset = subreg_lowpart_offset (narrower_mode, mode);
6774 ASSERT_RTX_EQ (narrower_duplicate,
6775 simplify_gen_subreg (narrower_mode, duplicate,
6776 mode, offset));
6777 }
6778 }
6779
6780 /* Test vector simplifications involving VEC_SERIES in which the
6781 operands and result have vector mode MODE. SCALAR_REG is a pseudo
6782 register that holds one element of MODE. */
6783
6784 static void
6785 test_vector_ops_series (machine_mode mode, rtx scalar_reg)
6786 {
6787 /* Test unary cases with VEC_SERIES arguments. */
6788 scalar_mode inner_mode = GET_MODE_INNER (mode);
6789 rtx duplicate = gen_rtx_VEC_DUPLICATE (mode, scalar_reg);
6790 rtx neg_scalar_reg = gen_rtx_NEG (inner_mode, scalar_reg);
6791 rtx series_0_r = gen_rtx_VEC_SERIES (mode, const0_rtx, scalar_reg);
6792 rtx series_0_nr = gen_rtx_VEC_SERIES (mode, const0_rtx, neg_scalar_reg);
6793 rtx series_nr_1 = gen_rtx_VEC_SERIES (mode, neg_scalar_reg, const1_rtx);
6794 rtx series_r_m1 = gen_rtx_VEC_SERIES (mode, scalar_reg, constm1_rtx);
6795 rtx series_r_r = gen_rtx_VEC_SERIES (mode, scalar_reg, scalar_reg);
6796 rtx series_nr_nr = gen_rtx_VEC_SERIES (mode, neg_scalar_reg,
6797 neg_scalar_reg);
6798 ASSERT_RTX_EQ (series_0_r,
6799 simplify_unary_operation (NEG, mode, series_0_nr, mode));
6800 ASSERT_RTX_EQ (series_r_m1,
6801 simplify_unary_operation (NEG, mode, series_nr_1, mode));
6802 ASSERT_RTX_EQ (series_r_r,
6803 simplify_unary_operation (NEG, mode, series_nr_nr, mode));
6804
6805 /* Test that a VEC_SERIES with a zero step is simplified away. */
6806 ASSERT_RTX_EQ (duplicate,
6807 simplify_binary_operation (VEC_SERIES, mode,
6808 scalar_reg, const0_rtx));
6809
6810 /* Test PLUS and MINUS with VEC_SERIES. */
6811 rtx series_0_1 = gen_const_vec_series (mode, const0_rtx, const1_rtx);
6812 rtx series_0_m1 = gen_const_vec_series (mode, const0_rtx, constm1_rtx);
6813 rtx series_r_1 = gen_rtx_VEC_SERIES (mode, scalar_reg, const1_rtx);
6814 ASSERT_RTX_EQ (series_r_r,
6815 simplify_binary_operation (PLUS, mode, series_0_r,
6816 duplicate));
6817 ASSERT_RTX_EQ (series_r_1,
6818 simplify_binary_operation (PLUS, mode, duplicate,
6819 series_0_1));
6820 ASSERT_RTX_EQ (series_r_m1,
6821 simplify_binary_operation (PLUS, mode, duplicate,
6822 series_0_m1));
6823 ASSERT_RTX_EQ (series_0_r,
6824 simplify_binary_operation (MINUS, mode, series_r_r,
6825 duplicate));
6826 ASSERT_RTX_EQ (series_r_m1,
6827 simplify_binary_operation (MINUS, mode, duplicate,
6828 series_0_1));
6829 ASSERT_RTX_EQ (series_r_1,
6830 simplify_binary_operation (MINUS, mode, duplicate,
6831 series_0_m1));
6832 ASSERT_RTX_EQ (series_0_m1,
6833 simplify_binary_operation (VEC_SERIES, mode, const0_rtx,
6834 constm1_rtx));
6835 }
6836
6837 /* Verify some simplifications involving vectors. */
6838
6839 static void
6840 test_vector_ops ()
6841 {
6842 for (unsigned int i = 0; i < NUM_MACHINE_MODES; ++i)
6843 {
6844 machine_mode mode = (machine_mode) i;
6845 if (VECTOR_MODE_P (mode))
6846 {
6847 rtx scalar_reg = make_test_reg (GET_MODE_INNER (mode));
6848 test_vector_ops_duplicate (mode, scalar_reg);
6849 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
6850 && maybe_gt (GET_MODE_NUNITS (mode), 2))
6851 test_vector_ops_series (mode, scalar_reg);
6852 }
6853 }
6854 }
6855
6856 template<unsigned int N>
6857 struct simplify_const_poly_int_tests
6858 {
6859 static void run ();
6860 };
6861
6862 template<>
6863 struct simplify_const_poly_int_tests<1>
6864 {
6865 static void run () {}
6866 };
6867
6868 /* Test various CONST_POLY_INT properties. */
6869
6870 template<unsigned int N>
6871 void
6872 simplify_const_poly_int_tests<N>::run ()
6873 {
6874 rtx x1 = gen_int_mode (poly_int64 (1, 1), QImode);
6875 rtx x2 = gen_int_mode (poly_int64 (-80, 127), QImode);
6876 rtx x3 = gen_int_mode (poly_int64 (-79, -128), QImode);
6877 rtx x4 = gen_int_mode (poly_int64 (5, 4), QImode);
6878 rtx x5 = gen_int_mode (poly_int64 (30, 24), QImode);
6879 rtx x6 = gen_int_mode (poly_int64 (20, 16), QImode);
6880 rtx x7 = gen_int_mode (poly_int64 (7, 4), QImode);
6881 rtx x8 = gen_int_mode (poly_int64 (30, 24), HImode);
6882 rtx x9 = gen_int_mode (poly_int64 (-30, -24), HImode);
6883 rtx x10 = gen_int_mode (poly_int64 (-31, -24), HImode);
6884 rtx two = GEN_INT (2);
6885 rtx six = GEN_INT (6);
6886 poly_uint64 offset = subreg_lowpart_offset (QImode, HImode);
6887
6888 /* These tests only try limited operation combinations. Fuller arithmetic
6889 testing is done directly on poly_ints. */
6890 ASSERT_EQ (simplify_unary_operation (NEG, HImode, x8, HImode), x9);
6891 ASSERT_EQ (simplify_unary_operation (NOT, HImode, x8, HImode), x10);
6892 ASSERT_EQ (simplify_unary_operation (TRUNCATE, QImode, x8, HImode), x5);
6893 ASSERT_EQ (simplify_binary_operation (PLUS, QImode, x1, x2), x3);
6894 ASSERT_EQ (simplify_binary_operation (MINUS, QImode, x3, x1), x2);
6895 ASSERT_EQ (simplify_binary_operation (MULT, QImode, x4, six), x5);
6896 ASSERT_EQ (simplify_binary_operation (MULT, QImode, six, x4), x5);
6897 ASSERT_EQ (simplify_binary_operation (ASHIFT, QImode, x4, two), x6);
6898 ASSERT_EQ (simplify_binary_operation (IOR, QImode, x4, two), x7);
6899 ASSERT_EQ (simplify_subreg (HImode, x5, QImode, 0), x8);
6900 ASSERT_EQ (simplify_subreg (QImode, x8, HImode, offset), x5);
6901 }
6902
6903 /* Run all of the selftests within this file. */
6904
6905 void
6906 simplify_rtx_c_tests ()
6907 {
6908 test_vector_ops ();
6909 simplify_const_poly_int_tests<NUM_POLY_INT_COEFFS>::run ();
6910 }
6911
6912 } // namespace selftest
6913
6914 #endif /* CHECKING_P */