Generalise VEC_DUPLICATE folding for variable-length vectors
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2019 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "memmodel.h"
30 #include "optabs.h"
31 #include "emit-rtl.h"
32 #include "recog.h"
33 #include "diagnostic-core.h"
34 #include "varasm.h"
35 #include "flags.h"
36 #include "selftest.h"
37 #include "selftest-rtl.h"
38 #include "rtx-vector-builder.h"
39
40 /* Simplification and canonicalization of RTL. */
41
42 /* Much code operates on (low, high) pairs; the low value is an
43 unsigned wide int, the high value a signed wide int. We
44 occasionally need to sign extend from low to high as if low were a
45 signed wide int. */
46 #define HWI_SIGN_EXTEND(low) \
47 ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
48
49 static rtx neg_const_int (machine_mode, const_rtx);
50 static bool plus_minus_operand_p (const_rtx);
51 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
52 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
53 rtx, rtx);
54 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
55 machine_mode, rtx, rtx);
56 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
57 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
58 rtx, rtx, rtx, rtx);
59 \f
60 /* Negate a CONST_INT rtx. */
61 static rtx
62 neg_const_int (machine_mode mode, const_rtx i)
63 {
64 unsigned HOST_WIDE_INT val = -UINTVAL (i);
65
66 if (!HWI_COMPUTABLE_MODE_P (mode)
67 && val == UINTVAL (i))
68 return simplify_const_unary_operation (NEG, mode, CONST_CAST_RTX (i),
69 mode);
70 return gen_int_mode (val, mode);
71 }
72
73 /* Test whether expression, X, is an immediate constant that represents
74 the most significant bit of machine mode MODE. */
75
76 bool
77 mode_signbit_p (machine_mode mode, const_rtx x)
78 {
79 unsigned HOST_WIDE_INT val;
80 unsigned int width;
81 scalar_int_mode int_mode;
82
83 if (!is_int_mode (mode, &int_mode))
84 return false;
85
86 width = GET_MODE_PRECISION (int_mode);
87 if (width == 0)
88 return false;
89
90 if (width <= HOST_BITS_PER_WIDE_INT
91 && CONST_INT_P (x))
92 val = INTVAL (x);
93 #if TARGET_SUPPORTS_WIDE_INT
94 else if (CONST_WIDE_INT_P (x))
95 {
96 unsigned int i;
97 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
98 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
99 return false;
100 for (i = 0; i < elts - 1; i++)
101 if (CONST_WIDE_INT_ELT (x, i) != 0)
102 return false;
103 val = CONST_WIDE_INT_ELT (x, elts - 1);
104 width %= HOST_BITS_PER_WIDE_INT;
105 if (width == 0)
106 width = HOST_BITS_PER_WIDE_INT;
107 }
108 #else
109 else if (width <= HOST_BITS_PER_DOUBLE_INT
110 && CONST_DOUBLE_AS_INT_P (x)
111 && CONST_DOUBLE_LOW (x) == 0)
112 {
113 val = CONST_DOUBLE_HIGH (x);
114 width -= HOST_BITS_PER_WIDE_INT;
115 }
116 #endif
117 else
118 /* X is not an integer constant. */
119 return false;
120
121 if (width < HOST_BITS_PER_WIDE_INT)
122 val &= (HOST_WIDE_INT_1U << width) - 1;
123 return val == (HOST_WIDE_INT_1U << (width - 1));
124 }
125
126 /* Test whether VAL is equal to the most significant bit of mode MODE
127 (after masking with the mode mask of MODE). Returns false if the
128 precision of MODE is too large to handle. */
129
130 bool
131 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
132 {
133 unsigned int width;
134 scalar_int_mode int_mode;
135
136 if (!is_int_mode (mode, &int_mode))
137 return false;
138
139 width = GET_MODE_PRECISION (int_mode);
140 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
141 return false;
142
143 val &= GET_MODE_MASK (int_mode);
144 return val == (HOST_WIDE_INT_1U << (width - 1));
145 }
146
147 /* Test whether the most significant bit of mode MODE is set in VAL.
148 Returns false if the precision of MODE is too large to handle. */
149 bool
150 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
151 {
152 unsigned int width;
153
154 scalar_int_mode int_mode;
155 if (!is_int_mode (mode, &int_mode))
156 return false;
157
158 width = GET_MODE_PRECISION (int_mode);
159 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
160 return false;
161
162 val &= HOST_WIDE_INT_1U << (width - 1);
163 return val != 0;
164 }
165
166 /* Test whether the most significant bit of mode MODE is clear in VAL.
167 Returns false if the precision of MODE is too large to handle. */
168 bool
169 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
170 {
171 unsigned int width;
172
173 scalar_int_mode int_mode;
174 if (!is_int_mode (mode, &int_mode))
175 return false;
176
177 width = GET_MODE_PRECISION (int_mode);
178 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
179 return false;
180
181 val &= HOST_WIDE_INT_1U << (width - 1);
182 return val == 0;
183 }
184 \f
185 /* Make a binary operation by properly ordering the operands and
186 seeing if the expression folds. */
187
188 rtx
189 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
190 rtx op1)
191 {
192 rtx tem;
193
194 /* If this simplifies, do it. */
195 tem = simplify_binary_operation (code, mode, op0, op1);
196 if (tem)
197 return tem;
198
199 /* Put complex operands first and constants second if commutative. */
200 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
201 && swap_commutative_operands_p (op0, op1))
202 std::swap (op0, op1);
203
204 return gen_rtx_fmt_ee (code, mode, op0, op1);
205 }
206 \f
207 /* If X is a MEM referencing the constant pool, return the real value.
208 Otherwise return X. */
209 rtx
210 avoid_constant_pool_reference (rtx x)
211 {
212 rtx c, tmp, addr;
213 machine_mode cmode;
214 poly_int64 offset = 0;
215
216 switch (GET_CODE (x))
217 {
218 case MEM:
219 break;
220
221 case FLOAT_EXTEND:
222 /* Handle float extensions of constant pool references. */
223 tmp = XEXP (x, 0);
224 c = avoid_constant_pool_reference (tmp);
225 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
226 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c),
227 GET_MODE (x));
228 return x;
229
230 default:
231 return x;
232 }
233
234 if (GET_MODE (x) == BLKmode)
235 return x;
236
237 addr = XEXP (x, 0);
238
239 /* Call target hook to avoid the effects of -fpic etc.... */
240 addr = targetm.delegitimize_address (addr);
241
242 /* Split the address into a base and integer offset. */
243 addr = strip_offset (addr, &offset);
244
245 if (GET_CODE (addr) == LO_SUM)
246 addr = XEXP (addr, 1);
247
248 /* If this is a constant pool reference, we can turn it into its
249 constant and hope that simplifications happen. */
250 if (GET_CODE (addr) == SYMBOL_REF
251 && CONSTANT_POOL_ADDRESS_P (addr))
252 {
253 c = get_pool_constant (addr);
254 cmode = get_pool_mode (addr);
255
256 /* If we're accessing the constant in a different mode than it was
257 originally stored, attempt to fix that up via subreg simplifications.
258 If that fails we have no choice but to return the original memory. */
259 if (known_eq (offset, 0) && cmode == GET_MODE (x))
260 return c;
261 else if (known_in_range_p (offset, 0, GET_MODE_SIZE (cmode)))
262 {
263 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
264 if (tem && CONSTANT_P (tem))
265 return tem;
266 }
267 }
268
269 return x;
270 }
271 \f
272 /* Simplify a MEM based on its attributes. This is the default
273 delegitimize_address target hook, and it's recommended that every
274 overrider call it. */
275
276 rtx
277 delegitimize_mem_from_attrs (rtx x)
278 {
279 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
280 use their base addresses as equivalent. */
281 if (MEM_P (x)
282 && MEM_EXPR (x)
283 && MEM_OFFSET_KNOWN_P (x))
284 {
285 tree decl = MEM_EXPR (x);
286 machine_mode mode = GET_MODE (x);
287 poly_int64 offset = 0;
288
289 switch (TREE_CODE (decl))
290 {
291 default:
292 decl = NULL;
293 break;
294
295 case VAR_DECL:
296 break;
297
298 case ARRAY_REF:
299 case ARRAY_RANGE_REF:
300 case COMPONENT_REF:
301 case BIT_FIELD_REF:
302 case REALPART_EXPR:
303 case IMAGPART_EXPR:
304 case VIEW_CONVERT_EXPR:
305 {
306 poly_int64 bitsize, bitpos, bytepos, toffset_val = 0;
307 tree toffset;
308 int unsignedp, reversep, volatilep = 0;
309
310 decl
311 = get_inner_reference (decl, &bitsize, &bitpos, &toffset, &mode,
312 &unsignedp, &reversep, &volatilep);
313 if (maybe_ne (bitsize, GET_MODE_BITSIZE (mode))
314 || !multiple_p (bitpos, BITS_PER_UNIT, &bytepos)
315 || (toffset && !poly_int_tree_p (toffset, &toffset_val)))
316 decl = NULL;
317 else
318 offset += bytepos + toffset_val;
319 break;
320 }
321 }
322
323 if (decl
324 && mode == GET_MODE (x)
325 && VAR_P (decl)
326 && (TREE_STATIC (decl)
327 || DECL_THREAD_LOCAL_P (decl))
328 && DECL_RTL_SET_P (decl)
329 && MEM_P (DECL_RTL (decl)))
330 {
331 rtx newx;
332
333 offset += MEM_OFFSET (x);
334
335 newx = DECL_RTL (decl);
336
337 if (MEM_P (newx))
338 {
339 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
340 poly_int64 n_offset, o_offset;
341
342 /* Avoid creating a new MEM needlessly if we already had
343 the same address. We do if there's no OFFSET and the
344 old address X is identical to NEWX, or if X is of the
345 form (plus NEWX OFFSET), or the NEWX is of the form
346 (plus Y (const_int Z)) and X is that with the offset
347 added: (plus Y (const_int Z+OFFSET)). */
348 n = strip_offset (n, &n_offset);
349 o = strip_offset (o, &o_offset);
350 if (!(known_eq (o_offset, n_offset + offset)
351 && rtx_equal_p (o, n)))
352 x = adjust_address_nv (newx, mode, offset);
353 }
354 else if (GET_MODE (x) == GET_MODE (newx)
355 && known_eq (offset, 0))
356 x = newx;
357 }
358 }
359
360 return x;
361 }
362 \f
363 /* Make a unary operation by first seeing if it folds and otherwise making
364 the specified operation. */
365
366 rtx
367 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
368 machine_mode op_mode)
369 {
370 rtx tem;
371
372 /* If this simplifies, use it. */
373 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
374 return tem;
375
376 return gen_rtx_fmt_e (code, mode, op);
377 }
378
379 /* Likewise for ternary operations. */
380
381 rtx
382 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
383 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
384 {
385 rtx tem;
386
387 /* If this simplifies, use it. */
388 if ((tem = simplify_ternary_operation (code, mode, op0_mode,
389 op0, op1, op2)) != 0)
390 return tem;
391
392 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
393 }
394
395 /* Likewise, for relational operations.
396 CMP_MODE specifies mode comparison is done in. */
397
398 rtx
399 simplify_gen_relational (enum rtx_code code, machine_mode mode,
400 machine_mode cmp_mode, rtx op0, rtx op1)
401 {
402 rtx tem;
403
404 if ((tem = simplify_relational_operation (code, mode, cmp_mode,
405 op0, op1)) != 0)
406 return tem;
407
408 return gen_rtx_fmt_ee (code, mode, op0, op1);
409 }
410 \f
411 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
412 and simplify the result. If FN is non-NULL, call this callback on each
413 X, if it returns non-NULL, replace X with its return value and simplify the
414 result. */
415
416 rtx
417 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
418 rtx (*fn) (rtx, const_rtx, void *), void *data)
419 {
420 enum rtx_code code = GET_CODE (x);
421 machine_mode mode = GET_MODE (x);
422 machine_mode op_mode;
423 const char *fmt;
424 rtx op0, op1, op2, newx, op;
425 rtvec vec, newvec;
426 int i, j;
427
428 if (__builtin_expect (fn != NULL, 0))
429 {
430 newx = fn (x, old_rtx, data);
431 if (newx)
432 return newx;
433 }
434 else if (rtx_equal_p (x, old_rtx))
435 return copy_rtx ((rtx) data);
436
437 switch (GET_RTX_CLASS (code))
438 {
439 case RTX_UNARY:
440 op0 = XEXP (x, 0);
441 op_mode = GET_MODE (op0);
442 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
443 if (op0 == XEXP (x, 0))
444 return x;
445 return simplify_gen_unary (code, mode, op0, op_mode);
446
447 case RTX_BIN_ARITH:
448 case RTX_COMM_ARITH:
449 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
450 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
451 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
452 return x;
453 return simplify_gen_binary (code, mode, op0, op1);
454
455 case RTX_COMPARE:
456 case RTX_COMM_COMPARE:
457 op0 = XEXP (x, 0);
458 op1 = XEXP (x, 1);
459 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
460 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
461 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
462 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
463 return x;
464 return simplify_gen_relational (code, mode, op_mode, op0, op1);
465
466 case RTX_TERNARY:
467 case RTX_BITFIELD_OPS:
468 op0 = XEXP (x, 0);
469 op_mode = GET_MODE (op0);
470 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
471 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
472 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
473 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
474 return x;
475 if (op_mode == VOIDmode)
476 op_mode = GET_MODE (op0);
477 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
478
479 case RTX_EXTRA:
480 if (code == SUBREG)
481 {
482 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
483 if (op0 == SUBREG_REG (x))
484 return x;
485 op0 = simplify_gen_subreg (GET_MODE (x), op0,
486 GET_MODE (SUBREG_REG (x)),
487 SUBREG_BYTE (x));
488 return op0 ? op0 : x;
489 }
490 break;
491
492 case RTX_OBJ:
493 if (code == MEM)
494 {
495 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
496 if (op0 == XEXP (x, 0))
497 return x;
498 return replace_equiv_address_nv (x, op0);
499 }
500 else if (code == LO_SUM)
501 {
502 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
503 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
504
505 /* (lo_sum (high x) y) -> y where x and y have the same base. */
506 if (GET_CODE (op0) == HIGH)
507 {
508 rtx base0, base1, offset0, offset1;
509 split_const (XEXP (op0, 0), &base0, &offset0);
510 split_const (op1, &base1, &offset1);
511 if (rtx_equal_p (base0, base1))
512 return op1;
513 }
514
515 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
516 return x;
517 return gen_rtx_LO_SUM (mode, op0, op1);
518 }
519 break;
520
521 default:
522 break;
523 }
524
525 newx = x;
526 fmt = GET_RTX_FORMAT (code);
527 for (i = 0; fmt[i]; i++)
528 switch (fmt[i])
529 {
530 case 'E':
531 vec = XVEC (x, i);
532 newvec = XVEC (newx, i);
533 for (j = 0; j < GET_NUM_ELEM (vec); j++)
534 {
535 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
536 old_rtx, fn, data);
537 if (op != RTVEC_ELT (vec, j))
538 {
539 if (newvec == vec)
540 {
541 newvec = shallow_copy_rtvec (vec);
542 if (x == newx)
543 newx = shallow_copy_rtx (x);
544 XVEC (newx, i) = newvec;
545 }
546 RTVEC_ELT (newvec, j) = op;
547 }
548 }
549 break;
550
551 case 'e':
552 if (XEXP (x, i))
553 {
554 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
555 if (op != XEXP (x, i))
556 {
557 if (x == newx)
558 newx = shallow_copy_rtx (x);
559 XEXP (newx, i) = op;
560 }
561 }
562 break;
563 }
564 return newx;
565 }
566
567 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
568 resulting RTX. Return a new RTX which is as simplified as possible. */
569
570 rtx
571 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
572 {
573 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
574 }
575 \f
576 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
577 Only handle cases where the truncated value is inherently an rvalue.
578
579 RTL provides two ways of truncating a value:
580
581 1. a lowpart subreg. This form is only a truncation when both
582 the outer and inner modes (here MODE and OP_MODE respectively)
583 are scalar integers, and only then when the subreg is used as
584 an rvalue.
585
586 It is only valid to form such truncating subregs if the
587 truncation requires no action by the target. The onus for
588 proving this is on the creator of the subreg -- e.g. the
589 caller to simplify_subreg or simplify_gen_subreg -- and typically
590 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
591
592 2. a TRUNCATE. This form handles both scalar and compound integers.
593
594 The first form is preferred where valid. However, the TRUNCATE
595 handling in simplify_unary_operation turns the second form into the
596 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
597 so it is generally safe to form rvalue truncations using:
598
599 simplify_gen_unary (TRUNCATE, ...)
600
601 and leave simplify_unary_operation to work out which representation
602 should be used.
603
604 Because of the proof requirements on (1), simplify_truncation must
605 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
606 regardless of whether the outer truncation came from a SUBREG or a
607 TRUNCATE. For example, if the caller has proven that an SImode
608 truncation of:
609
610 (and:DI X Y)
611
612 is a no-op and can be represented as a subreg, it does not follow
613 that SImode truncations of X and Y are also no-ops. On a target
614 like 64-bit MIPS that requires SImode values to be stored in
615 sign-extended form, an SImode truncation of:
616
617 (and:DI (reg:DI X) (const_int 63))
618
619 is trivially a no-op because only the lower 6 bits can be set.
620 However, X is still an arbitrary 64-bit number and so we cannot
621 assume that truncating it too is a no-op. */
622
623 static rtx
624 simplify_truncation (machine_mode mode, rtx op,
625 machine_mode op_mode)
626 {
627 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
628 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
629 scalar_int_mode int_mode, int_op_mode, subreg_mode;
630
631 gcc_assert (precision <= op_precision);
632
633 /* Optimize truncations of zero and sign extended values. */
634 if (GET_CODE (op) == ZERO_EXTEND
635 || GET_CODE (op) == SIGN_EXTEND)
636 {
637 /* There are three possibilities. If MODE is the same as the
638 origmode, we can omit both the extension and the subreg.
639 If MODE is not larger than the origmode, we can apply the
640 truncation without the extension. Finally, if the outermode
641 is larger than the origmode, we can just extend to the appropriate
642 mode. */
643 machine_mode origmode = GET_MODE (XEXP (op, 0));
644 if (mode == origmode)
645 return XEXP (op, 0);
646 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
647 return simplify_gen_unary (TRUNCATE, mode,
648 XEXP (op, 0), origmode);
649 else
650 return simplify_gen_unary (GET_CODE (op), mode,
651 XEXP (op, 0), origmode);
652 }
653
654 /* If the machine can perform operations in the truncated mode, distribute
655 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
656 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
657 if (1
658 && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD)
659 && (GET_CODE (op) == PLUS
660 || GET_CODE (op) == MINUS
661 || GET_CODE (op) == MULT))
662 {
663 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
664 if (op0)
665 {
666 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
667 if (op1)
668 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
669 }
670 }
671
672 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
673 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
674 the outer subreg is effectively a truncation to the original mode. */
675 if ((GET_CODE (op) == LSHIFTRT
676 || GET_CODE (op) == ASHIFTRT)
677 /* Ensure that OP_MODE is at least twice as wide as MODE
678 to avoid the possibility that an outer LSHIFTRT shifts by more
679 than the sign extension's sign_bit_copies and introduces zeros
680 into the high bits of the result. */
681 && 2 * precision <= op_precision
682 && CONST_INT_P (XEXP (op, 1))
683 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
684 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
685 && UINTVAL (XEXP (op, 1)) < precision)
686 return simplify_gen_binary (ASHIFTRT, mode,
687 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
688
689 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
690 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
691 the outer subreg is effectively a truncation to the original mode. */
692 if ((GET_CODE (op) == LSHIFTRT
693 || GET_CODE (op) == ASHIFTRT)
694 && CONST_INT_P (XEXP (op, 1))
695 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
696 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
697 && UINTVAL (XEXP (op, 1)) < precision)
698 return simplify_gen_binary (LSHIFTRT, mode,
699 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
700
701 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
702 to (ashift:QI (x:QI) C), where C is a suitable small constant and
703 the outer subreg is effectively a truncation to the original mode. */
704 if (GET_CODE (op) == ASHIFT
705 && CONST_INT_P (XEXP (op, 1))
706 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
707 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
708 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
709 && UINTVAL (XEXP (op, 1)) < precision)
710 return simplify_gen_binary (ASHIFT, mode,
711 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
712
713 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
714 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
715 and C2. */
716 if (GET_CODE (op) == AND
717 && (GET_CODE (XEXP (op, 0)) == LSHIFTRT
718 || GET_CODE (XEXP (op, 0)) == ASHIFTRT)
719 && CONST_INT_P (XEXP (XEXP (op, 0), 1))
720 && CONST_INT_P (XEXP (op, 1)))
721 {
722 rtx op0 = (XEXP (XEXP (op, 0), 0));
723 rtx shift_op = XEXP (XEXP (op, 0), 1);
724 rtx mask_op = XEXP (op, 1);
725 unsigned HOST_WIDE_INT shift = UINTVAL (shift_op);
726 unsigned HOST_WIDE_INT mask = UINTVAL (mask_op);
727
728 if (shift < precision
729 /* If doing this transform works for an X with all bits set,
730 it works for any X. */
731 && ((GET_MODE_MASK (mode) >> shift) & mask)
732 == ((GET_MODE_MASK (op_mode) >> shift) & mask)
733 && (op0 = simplify_gen_unary (TRUNCATE, mode, op0, op_mode))
734 && (op0 = simplify_gen_binary (LSHIFTRT, mode, op0, shift_op)))
735 {
736 mask_op = GEN_INT (trunc_int_for_mode (mask, mode));
737 return simplify_gen_binary (AND, mode, op0, mask_op);
738 }
739 }
740
741 /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
742 (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
743 changing len. */
744 if ((GET_CODE (op) == ZERO_EXTRACT || GET_CODE (op) == SIGN_EXTRACT)
745 && REG_P (XEXP (op, 0))
746 && GET_MODE (XEXP (op, 0)) == GET_MODE (op)
747 && CONST_INT_P (XEXP (op, 1))
748 && CONST_INT_P (XEXP (op, 2)))
749 {
750 rtx op0 = XEXP (op, 0);
751 unsigned HOST_WIDE_INT len = UINTVAL (XEXP (op, 1));
752 unsigned HOST_WIDE_INT pos = UINTVAL (XEXP (op, 2));
753 if (BITS_BIG_ENDIAN && pos >= op_precision - precision)
754 {
755 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
756 if (op0)
757 {
758 pos -= op_precision - precision;
759 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
760 XEXP (op, 1), GEN_INT (pos));
761 }
762 }
763 else if (!BITS_BIG_ENDIAN && precision >= len + pos)
764 {
765 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
766 if (op0)
767 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
768 XEXP (op, 1), XEXP (op, 2));
769 }
770 }
771
772 /* Recognize a word extraction from a multi-word subreg. */
773 if ((GET_CODE (op) == LSHIFTRT
774 || GET_CODE (op) == ASHIFTRT)
775 && SCALAR_INT_MODE_P (mode)
776 && SCALAR_INT_MODE_P (op_mode)
777 && precision >= BITS_PER_WORD
778 && 2 * precision <= op_precision
779 && CONST_INT_P (XEXP (op, 1))
780 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
781 && UINTVAL (XEXP (op, 1)) < op_precision)
782 {
783 poly_int64 byte = subreg_lowpart_offset (mode, op_mode);
784 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
785 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
786 (WORDS_BIG_ENDIAN
787 ? byte - shifted_bytes
788 : byte + shifted_bytes));
789 }
790
791 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
792 and try replacing the TRUNCATE and shift with it. Don't do this
793 if the MEM has a mode-dependent address. */
794 if ((GET_CODE (op) == LSHIFTRT
795 || GET_CODE (op) == ASHIFTRT)
796 && is_a <scalar_int_mode> (mode, &int_mode)
797 && is_a <scalar_int_mode> (op_mode, &int_op_mode)
798 && MEM_P (XEXP (op, 0))
799 && CONST_INT_P (XEXP (op, 1))
800 && INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (int_mode) == 0
801 && INTVAL (XEXP (op, 1)) > 0
802 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (int_op_mode)
803 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
804 MEM_ADDR_SPACE (XEXP (op, 0)))
805 && ! MEM_VOLATILE_P (XEXP (op, 0))
806 && (GET_MODE_SIZE (int_mode) >= UNITS_PER_WORD
807 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
808 {
809 poly_int64 byte = subreg_lowpart_offset (int_mode, int_op_mode);
810 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
811 return adjust_address_nv (XEXP (op, 0), int_mode,
812 (WORDS_BIG_ENDIAN
813 ? byte - shifted_bytes
814 : byte + shifted_bytes));
815 }
816
817 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
818 (OP:SI foo:SI) if OP is NEG or ABS. */
819 if ((GET_CODE (op) == ABS
820 || GET_CODE (op) == NEG)
821 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
822 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
823 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
824 return simplify_gen_unary (GET_CODE (op), mode,
825 XEXP (XEXP (op, 0), 0), mode);
826
827 /* (truncate:A (subreg:B (truncate:C X) 0)) is
828 (truncate:A X). */
829 if (GET_CODE (op) == SUBREG
830 && is_a <scalar_int_mode> (mode, &int_mode)
831 && SCALAR_INT_MODE_P (op_mode)
832 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &subreg_mode)
833 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
834 && subreg_lowpart_p (op))
835 {
836 rtx inner = XEXP (SUBREG_REG (op), 0);
837 if (GET_MODE_PRECISION (int_mode) <= GET_MODE_PRECISION (subreg_mode))
838 return simplify_gen_unary (TRUNCATE, int_mode, inner,
839 GET_MODE (inner));
840 else
841 /* If subreg above is paradoxical and C is narrower
842 than A, return (subreg:A (truncate:C X) 0). */
843 return simplify_gen_subreg (int_mode, SUBREG_REG (op), subreg_mode, 0);
844 }
845
846 /* (truncate:A (truncate:B X)) is (truncate:A X). */
847 if (GET_CODE (op) == TRUNCATE)
848 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
849 GET_MODE (XEXP (op, 0)));
850
851 /* (truncate:A (ior X C)) is (const_int -1) if C is equal to that already,
852 in mode A. */
853 if (GET_CODE (op) == IOR
854 && SCALAR_INT_MODE_P (mode)
855 && SCALAR_INT_MODE_P (op_mode)
856 && CONST_INT_P (XEXP (op, 1))
857 && trunc_int_for_mode (INTVAL (XEXP (op, 1)), mode) == -1)
858 return constm1_rtx;
859
860 return NULL_RTX;
861 }
862 \f
863 /* Try to simplify a unary operation CODE whose output mode is to be
864 MODE with input operand OP whose mode was originally OP_MODE.
865 Return zero if no simplification can be made. */
866 rtx
867 simplify_unary_operation (enum rtx_code code, machine_mode mode,
868 rtx op, machine_mode op_mode)
869 {
870 rtx trueop, tem;
871
872 trueop = avoid_constant_pool_reference (op);
873
874 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
875 if (tem)
876 return tem;
877
878 return simplify_unary_operation_1 (code, mode, op);
879 }
880
881 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
882 to be exact. */
883
884 static bool
885 exact_int_to_float_conversion_p (const_rtx op)
886 {
887 int out_bits = significand_size (GET_MODE_INNER (GET_MODE (op)));
888 machine_mode op0_mode = GET_MODE (XEXP (op, 0));
889 /* Constants shouldn't reach here. */
890 gcc_assert (op0_mode != VOIDmode);
891 int in_prec = GET_MODE_UNIT_PRECISION (op0_mode);
892 int in_bits = in_prec;
893 if (HWI_COMPUTABLE_MODE_P (op0_mode))
894 {
895 unsigned HOST_WIDE_INT nonzero = nonzero_bits (XEXP (op, 0), op0_mode);
896 if (GET_CODE (op) == FLOAT)
897 in_bits -= num_sign_bit_copies (XEXP (op, 0), op0_mode);
898 else if (GET_CODE (op) == UNSIGNED_FLOAT)
899 in_bits = wi::min_precision (wi::uhwi (nonzero, in_prec), UNSIGNED);
900 else
901 gcc_unreachable ();
902 in_bits -= wi::ctz (wi::uhwi (nonzero, in_prec));
903 }
904 return in_bits <= out_bits;
905 }
906
907 /* Perform some simplifications we can do even if the operands
908 aren't constant. */
909 static rtx
910 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
911 {
912 enum rtx_code reversed;
913 rtx temp, elt, base, step;
914 scalar_int_mode inner, int_mode, op_mode, op0_mode;
915
916 switch (code)
917 {
918 case NOT:
919 /* (not (not X)) == X. */
920 if (GET_CODE (op) == NOT)
921 return XEXP (op, 0);
922
923 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
924 comparison is all ones. */
925 if (COMPARISON_P (op)
926 && (mode == BImode || STORE_FLAG_VALUE == -1)
927 && ((reversed = reversed_comparison_code (op, NULL)) != UNKNOWN))
928 return simplify_gen_relational (reversed, mode, VOIDmode,
929 XEXP (op, 0), XEXP (op, 1));
930
931 /* (not (plus X -1)) can become (neg X). */
932 if (GET_CODE (op) == PLUS
933 && XEXP (op, 1) == constm1_rtx)
934 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
935
936 /* Similarly, (not (neg X)) is (plus X -1). Only do this for
937 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
938 and MODE_VECTOR_INT. */
939 if (GET_CODE (op) == NEG && CONSTM1_RTX (mode))
940 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
941 CONSTM1_RTX (mode));
942
943 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
944 if (GET_CODE (op) == XOR
945 && CONST_INT_P (XEXP (op, 1))
946 && (temp = simplify_unary_operation (NOT, mode,
947 XEXP (op, 1), mode)) != 0)
948 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
949
950 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
951 if (GET_CODE (op) == PLUS
952 && CONST_INT_P (XEXP (op, 1))
953 && mode_signbit_p (mode, XEXP (op, 1))
954 && (temp = simplify_unary_operation (NOT, mode,
955 XEXP (op, 1), mode)) != 0)
956 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
957
958
959 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
960 operands other than 1, but that is not valid. We could do a
961 similar simplification for (not (lshiftrt C X)) where C is
962 just the sign bit, but this doesn't seem common enough to
963 bother with. */
964 if (GET_CODE (op) == ASHIFT
965 && XEXP (op, 0) == const1_rtx)
966 {
967 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
968 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
969 }
970
971 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
972 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
973 so we can perform the above simplification. */
974 if (STORE_FLAG_VALUE == -1
975 && is_a <scalar_int_mode> (mode, &int_mode)
976 && GET_CODE (op) == ASHIFTRT
977 && CONST_INT_P (XEXP (op, 1))
978 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (int_mode) - 1)
979 return simplify_gen_relational (GE, int_mode, VOIDmode,
980 XEXP (op, 0), const0_rtx);
981
982
983 if (partial_subreg_p (op)
984 && subreg_lowpart_p (op)
985 && GET_CODE (SUBREG_REG (op)) == ASHIFT
986 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
987 {
988 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
989 rtx x;
990
991 x = gen_rtx_ROTATE (inner_mode,
992 simplify_gen_unary (NOT, inner_mode, const1_rtx,
993 inner_mode),
994 XEXP (SUBREG_REG (op), 1));
995 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
996 if (temp)
997 return temp;
998 }
999
1000 /* Apply De Morgan's laws to reduce number of patterns for machines
1001 with negating logical insns (and-not, nand, etc.). If result has
1002 only one NOT, put it first, since that is how the patterns are
1003 coded. */
1004 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
1005 {
1006 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
1007 machine_mode op_mode;
1008
1009 op_mode = GET_MODE (in1);
1010 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
1011
1012 op_mode = GET_MODE (in2);
1013 if (op_mode == VOIDmode)
1014 op_mode = mode;
1015 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
1016
1017 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
1018 std::swap (in1, in2);
1019
1020 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
1021 mode, in1, in2);
1022 }
1023
1024 /* (not (bswap x)) -> (bswap (not x)). */
1025 if (GET_CODE (op) == BSWAP)
1026 {
1027 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1028 return simplify_gen_unary (BSWAP, mode, x, mode);
1029 }
1030 break;
1031
1032 case NEG:
1033 /* (neg (neg X)) == X. */
1034 if (GET_CODE (op) == NEG)
1035 return XEXP (op, 0);
1036
1037 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1038 If comparison is not reversible use
1039 x ? y : (neg y). */
1040 if (GET_CODE (op) == IF_THEN_ELSE)
1041 {
1042 rtx cond = XEXP (op, 0);
1043 rtx true_rtx = XEXP (op, 1);
1044 rtx false_rtx = XEXP (op, 2);
1045
1046 if ((GET_CODE (true_rtx) == NEG
1047 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
1048 || (GET_CODE (false_rtx) == NEG
1049 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)))
1050 {
1051 if (reversed_comparison_code (cond, NULL) != UNKNOWN)
1052 temp = reversed_comparison (cond, mode);
1053 else
1054 {
1055 temp = cond;
1056 std::swap (true_rtx, false_rtx);
1057 }
1058 return simplify_gen_ternary (IF_THEN_ELSE, mode,
1059 mode, temp, true_rtx, false_rtx);
1060 }
1061 }
1062
1063 /* (neg (plus X 1)) can become (not X). */
1064 if (GET_CODE (op) == PLUS
1065 && XEXP (op, 1) == const1_rtx)
1066 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1067
1068 /* Similarly, (neg (not X)) is (plus X 1). */
1069 if (GET_CODE (op) == NOT)
1070 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
1071 CONST1_RTX (mode));
1072
1073 /* (neg (minus X Y)) can become (minus Y X). This transformation
1074 isn't safe for modes with signed zeros, since if X and Y are
1075 both +0, (minus Y X) is the same as (minus X Y). If the
1076 rounding mode is towards +infinity (or -infinity) then the two
1077 expressions will be rounded differently. */
1078 if (GET_CODE (op) == MINUS
1079 && !HONOR_SIGNED_ZEROS (mode)
1080 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1081 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
1082
1083 if (GET_CODE (op) == PLUS
1084 && !HONOR_SIGNED_ZEROS (mode)
1085 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1086 {
1087 /* (neg (plus A C)) is simplified to (minus -C A). */
1088 if (CONST_SCALAR_INT_P (XEXP (op, 1))
1089 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
1090 {
1091 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1092 if (temp)
1093 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1094 }
1095
1096 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1097 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1098 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1099 }
1100
1101 /* (neg (mult A B)) becomes (mult A (neg B)).
1102 This works even for floating-point values. */
1103 if (GET_CODE (op) == MULT
1104 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1105 {
1106 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1107 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1108 }
1109
1110 /* NEG commutes with ASHIFT since it is multiplication. Only do
1111 this if we can then eliminate the NEG (e.g., if the operand
1112 is a constant). */
1113 if (GET_CODE (op) == ASHIFT)
1114 {
1115 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1116 if (temp)
1117 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1118 }
1119
1120 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1121 C is equal to the width of MODE minus 1. */
1122 if (GET_CODE (op) == ASHIFTRT
1123 && CONST_INT_P (XEXP (op, 1))
1124 && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
1125 return simplify_gen_binary (LSHIFTRT, mode,
1126 XEXP (op, 0), XEXP (op, 1));
1127
1128 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1129 C is equal to the width of MODE minus 1. */
1130 if (GET_CODE (op) == LSHIFTRT
1131 && CONST_INT_P (XEXP (op, 1))
1132 && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
1133 return simplify_gen_binary (ASHIFTRT, mode,
1134 XEXP (op, 0), XEXP (op, 1));
1135
1136 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1137 if (GET_CODE (op) == XOR
1138 && XEXP (op, 1) == const1_rtx
1139 && nonzero_bits (XEXP (op, 0), mode) == 1)
1140 return plus_constant (mode, XEXP (op, 0), -1);
1141
1142 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1143 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1144 if (GET_CODE (op) == LT
1145 && XEXP (op, 1) == const0_rtx
1146 && is_a <scalar_int_mode> (GET_MODE (XEXP (op, 0)), &inner))
1147 {
1148 int_mode = as_a <scalar_int_mode> (mode);
1149 int isize = GET_MODE_PRECISION (inner);
1150 if (STORE_FLAG_VALUE == 1)
1151 {
1152 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1153 gen_int_shift_amount (inner,
1154 isize - 1));
1155 if (int_mode == inner)
1156 return temp;
1157 if (GET_MODE_PRECISION (int_mode) > isize)
1158 return simplify_gen_unary (SIGN_EXTEND, int_mode, temp, inner);
1159 return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
1160 }
1161 else if (STORE_FLAG_VALUE == -1)
1162 {
1163 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1164 gen_int_shift_amount (inner,
1165 isize - 1));
1166 if (int_mode == inner)
1167 return temp;
1168 if (GET_MODE_PRECISION (int_mode) > isize)
1169 return simplify_gen_unary (ZERO_EXTEND, int_mode, temp, inner);
1170 return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
1171 }
1172 }
1173
1174 if (vec_series_p (op, &base, &step))
1175 {
1176 /* Only create a new series if we can simplify both parts. In other
1177 cases this isn't really a simplification, and it's not necessarily
1178 a win to replace a vector operation with a scalar operation. */
1179 scalar_mode inner_mode = GET_MODE_INNER (mode);
1180 base = simplify_unary_operation (NEG, inner_mode, base, inner_mode);
1181 if (base)
1182 {
1183 step = simplify_unary_operation (NEG, inner_mode,
1184 step, inner_mode);
1185 if (step)
1186 return gen_vec_series (mode, base, step);
1187 }
1188 }
1189 break;
1190
1191 case TRUNCATE:
1192 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1193 with the umulXi3_highpart patterns. */
1194 if (GET_CODE (op) == LSHIFTRT
1195 && GET_CODE (XEXP (op, 0)) == MULT)
1196 break;
1197
1198 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1199 {
1200 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1201 {
1202 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1203 if (temp)
1204 return temp;
1205 }
1206 /* We can't handle truncation to a partial integer mode here
1207 because we don't know the real bitsize of the partial
1208 integer mode. */
1209 break;
1210 }
1211
1212 if (GET_MODE (op) != VOIDmode)
1213 {
1214 temp = simplify_truncation (mode, op, GET_MODE (op));
1215 if (temp)
1216 return temp;
1217 }
1218
1219 /* If we know that the value is already truncated, we can
1220 replace the TRUNCATE with a SUBREG. */
1221 if (known_eq (GET_MODE_NUNITS (mode), 1)
1222 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1223 || truncated_to_mode (mode, op)))
1224 {
1225 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1226 if (temp)
1227 return temp;
1228 }
1229
1230 /* A truncate of a comparison can be replaced with a subreg if
1231 STORE_FLAG_VALUE permits. This is like the previous test,
1232 but it works even if the comparison is done in a mode larger
1233 than HOST_BITS_PER_WIDE_INT. */
1234 if (HWI_COMPUTABLE_MODE_P (mode)
1235 && COMPARISON_P (op)
1236 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1237 {
1238 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1239 if (temp)
1240 return temp;
1241 }
1242
1243 /* A truncate of a memory is just loading the low part of the memory
1244 if we are not changing the meaning of the address. */
1245 if (GET_CODE (op) == MEM
1246 && !VECTOR_MODE_P (mode)
1247 && !MEM_VOLATILE_P (op)
1248 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1249 {
1250 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1251 if (temp)
1252 return temp;
1253 }
1254
1255 break;
1256
1257 case FLOAT_TRUNCATE:
1258 if (DECIMAL_FLOAT_MODE_P (mode))
1259 break;
1260
1261 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1262 if (GET_CODE (op) == FLOAT_EXTEND
1263 && GET_MODE (XEXP (op, 0)) == mode)
1264 return XEXP (op, 0);
1265
1266 /* (float_truncate:SF (float_truncate:DF foo:XF))
1267 = (float_truncate:SF foo:XF).
1268 This may eliminate double rounding, so it is unsafe.
1269
1270 (float_truncate:SF (float_extend:XF foo:DF))
1271 = (float_truncate:SF foo:DF).
1272
1273 (float_truncate:DF (float_extend:XF foo:SF))
1274 = (float_extend:DF foo:SF). */
1275 if ((GET_CODE (op) == FLOAT_TRUNCATE
1276 && flag_unsafe_math_optimizations)
1277 || GET_CODE (op) == FLOAT_EXTEND)
1278 return simplify_gen_unary (GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)))
1279 > GET_MODE_UNIT_SIZE (mode)
1280 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1281 mode,
1282 XEXP (op, 0), mode);
1283
1284 /* (float_truncate (float x)) is (float x) */
1285 if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1286 && (flag_unsafe_math_optimizations
1287 || exact_int_to_float_conversion_p (op)))
1288 return simplify_gen_unary (GET_CODE (op), mode,
1289 XEXP (op, 0),
1290 GET_MODE (XEXP (op, 0)));
1291
1292 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1293 (OP:SF foo:SF) if OP is NEG or ABS. */
1294 if ((GET_CODE (op) == ABS
1295 || GET_CODE (op) == NEG)
1296 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1297 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1298 return simplify_gen_unary (GET_CODE (op), mode,
1299 XEXP (XEXP (op, 0), 0), mode);
1300
1301 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1302 is (float_truncate:SF x). */
1303 if (GET_CODE (op) == SUBREG
1304 && subreg_lowpart_p (op)
1305 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1306 return SUBREG_REG (op);
1307 break;
1308
1309 case FLOAT_EXTEND:
1310 if (DECIMAL_FLOAT_MODE_P (mode))
1311 break;
1312
1313 /* (float_extend (float_extend x)) is (float_extend x)
1314
1315 (float_extend (float x)) is (float x) assuming that double
1316 rounding can't happen.
1317 */
1318 if (GET_CODE (op) == FLOAT_EXTEND
1319 || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1320 && exact_int_to_float_conversion_p (op)))
1321 return simplify_gen_unary (GET_CODE (op), mode,
1322 XEXP (op, 0),
1323 GET_MODE (XEXP (op, 0)));
1324
1325 break;
1326
1327 case ABS:
1328 /* (abs (neg <foo>)) -> (abs <foo>) */
1329 if (GET_CODE (op) == NEG)
1330 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1331 GET_MODE (XEXP (op, 0)));
1332
1333 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1334 do nothing. */
1335 if (GET_MODE (op) == VOIDmode)
1336 break;
1337
1338 /* If operand is something known to be positive, ignore the ABS. */
1339 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1340 || val_signbit_known_clear_p (GET_MODE (op),
1341 nonzero_bits (op, GET_MODE (op))))
1342 return op;
1343
1344 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1345 if (is_a <scalar_int_mode> (mode, &int_mode)
1346 && (num_sign_bit_copies (op, int_mode)
1347 == GET_MODE_PRECISION (int_mode)))
1348 return gen_rtx_NEG (int_mode, op);
1349
1350 break;
1351
1352 case FFS:
1353 /* (ffs (*_extend <X>)) = (ffs <X>) */
1354 if (GET_CODE (op) == SIGN_EXTEND
1355 || GET_CODE (op) == ZERO_EXTEND)
1356 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1357 GET_MODE (XEXP (op, 0)));
1358 break;
1359
1360 case POPCOUNT:
1361 switch (GET_CODE (op))
1362 {
1363 case BSWAP:
1364 case ZERO_EXTEND:
1365 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1366 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1367 GET_MODE (XEXP (op, 0)));
1368
1369 case ROTATE:
1370 case ROTATERT:
1371 /* Rotations don't affect popcount. */
1372 if (!side_effects_p (XEXP (op, 1)))
1373 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1374 GET_MODE (XEXP (op, 0)));
1375 break;
1376
1377 default:
1378 break;
1379 }
1380 break;
1381
1382 case PARITY:
1383 switch (GET_CODE (op))
1384 {
1385 case NOT:
1386 case BSWAP:
1387 case ZERO_EXTEND:
1388 case SIGN_EXTEND:
1389 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1390 GET_MODE (XEXP (op, 0)));
1391
1392 case ROTATE:
1393 case ROTATERT:
1394 /* Rotations don't affect parity. */
1395 if (!side_effects_p (XEXP (op, 1)))
1396 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1397 GET_MODE (XEXP (op, 0)));
1398 break;
1399
1400 default:
1401 break;
1402 }
1403 break;
1404
1405 case BSWAP:
1406 /* (bswap (bswap x)) -> x. */
1407 if (GET_CODE (op) == BSWAP)
1408 return XEXP (op, 0);
1409 break;
1410
1411 case FLOAT:
1412 /* (float (sign_extend <X>)) = (float <X>). */
1413 if (GET_CODE (op) == SIGN_EXTEND)
1414 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1415 GET_MODE (XEXP (op, 0)));
1416 break;
1417
1418 case SIGN_EXTEND:
1419 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1420 becomes just the MINUS if its mode is MODE. This allows
1421 folding switch statements on machines using casesi (such as
1422 the VAX). */
1423 if (GET_CODE (op) == TRUNCATE
1424 && GET_MODE (XEXP (op, 0)) == mode
1425 && GET_CODE (XEXP (op, 0)) == MINUS
1426 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1427 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1428 return XEXP (op, 0);
1429
1430 /* Extending a widening multiplication should be canonicalized to
1431 a wider widening multiplication. */
1432 if (GET_CODE (op) == MULT)
1433 {
1434 rtx lhs = XEXP (op, 0);
1435 rtx rhs = XEXP (op, 1);
1436 enum rtx_code lcode = GET_CODE (lhs);
1437 enum rtx_code rcode = GET_CODE (rhs);
1438
1439 /* Widening multiplies usually extend both operands, but sometimes
1440 they use a shift to extract a portion of a register. */
1441 if ((lcode == SIGN_EXTEND
1442 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1443 && (rcode == SIGN_EXTEND
1444 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1445 {
1446 machine_mode lmode = GET_MODE (lhs);
1447 machine_mode rmode = GET_MODE (rhs);
1448 int bits;
1449
1450 if (lcode == ASHIFTRT)
1451 /* Number of bits not shifted off the end. */
1452 bits = (GET_MODE_UNIT_PRECISION (lmode)
1453 - INTVAL (XEXP (lhs, 1)));
1454 else /* lcode == SIGN_EXTEND */
1455 /* Size of inner mode. */
1456 bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0)));
1457
1458 if (rcode == ASHIFTRT)
1459 bits += (GET_MODE_UNIT_PRECISION (rmode)
1460 - INTVAL (XEXP (rhs, 1)));
1461 else /* rcode == SIGN_EXTEND */
1462 bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0)));
1463
1464 /* We can only widen multiplies if the result is mathematiclly
1465 equivalent. I.e. if overflow was impossible. */
1466 if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op)))
1467 return simplify_gen_binary
1468 (MULT, mode,
1469 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1470 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1471 }
1472 }
1473
1474 /* Check for a sign extension of a subreg of a promoted
1475 variable, where the promotion is sign-extended, and the
1476 target mode is the same as the variable's promotion. */
1477 if (GET_CODE (op) == SUBREG
1478 && SUBREG_PROMOTED_VAR_P (op)
1479 && SUBREG_PROMOTED_SIGNED_P (op)
1480 && !paradoxical_subreg_p (mode, GET_MODE (SUBREG_REG (op))))
1481 {
1482 temp = rtl_hooks.gen_lowpart_no_emit (mode, SUBREG_REG (op));
1483 if (temp)
1484 return temp;
1485 }
1486
1487 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1488 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1489 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1490 {
1491 gcc_assert (GET_MODE_UNIT_PRECISION (mode)
1492 > GET_MODE_UNIT_PRECISION (GET_MODE (op)));
1493 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1494 GET_MODE (XEXP (op, 0)));
1495 }
1496
1497 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1498 is (sign_extend:M (subreg:O <X>)) if there is mode with
1499 GET_MODE_BITSIZE (N) - I bits.
1500 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1501 is similarly (zero_extend:M (subreg:O <X>)). */
1502 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1503 && GET_CODE (XEXP (op, 0)) == ASHIFT
1504 && is_a <scalar_int_mode> (mode, &int_mode)
1505 && CONST_INT_P (XEXP (op, 1))
1506 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1507 && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
1508 GET_MODE_PRECISION (op_mode) > INTVAL (XEXP (op, 1))))
1509 {
1510 scalar_int_mode tmode;
1511 gcc_assert (GET_MODE_PRECISION (int_mode)
1512 > GET_MODE_PRECISION (op_mode));
1513 if (int_mode_for_size (GET_MODE_PRECISION (op_mode)
1514 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1515 {
1516 rtx inner =
1517 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1518 if (inner)
1519 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1520 ? SIGN_EXTEND : ZERO_EXTEND,
1521 int_mode, inner, tmode);
1522 }
1523 }
1524
1525 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1526 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1527 if (GET_CODE (op) == LSHIFTRT
1528 && CONST_INT_P (XEXP (op, 1))
1529 && XEXP (op, 1) != const0_rtx)
1530 return simplify_gen_unary (ZERO_EXTEND, mode, op, GET_MODE (op));
1531
1532 #if defined(POINTERS_EXTEND_UNSIGNED)
1533 /* As we do not know which address space the pointer is referring to,
1534 we can do this only if the target does not support different pointer
1535 or address modes depending on the address space. */
1536 if (target_default_pointer_address_modes_p ()
1537 && ! POINTERS_EXTEND_UNSIGNED
1538 && mode == Pmode && GET_MODE (op) == ptr_mode
1539 && (CONSTANT_P (op)
1540 || (GET_CODE (op) == SUBREG
1541 && REG_P (SUBREG_REG (op))
1542 && REG_POINTER (SUBREG_REG (op))
1543 && GET_MODE (SUBREG_REG (op)) == Pmode))
1544 && !targetm.have_ptr_extend ())
1545 {
1546 temp
1547 = convert_memory_address_addr_space_1 (Pmode, op,
1548 ADDR_SPACE_GENERIC, false,
1549 true);
1550 if (temp)
1551 return temp;
1552 }
1553 #endif
1554 break;
1555
1556 case ZERO_EXTEND:
1557 /* Check for a zero extension of a subreg of a promoted
1558 variable, where the promotion is zero-extended, and the
1559 target mode is the same as the variable's promotion. */
1560 if (GET_CODE (op) == SUBREG
1561 && SUBREG_PROMOTED_VAR_P (op)
1562 && SUBREG_PROMOTED_UNSIGNED_P (op)
1563 && !paradoxical_subreg_p (mode, GET_MODE (SUBREG_REG (op))))
1564 {
1565 temp = rtl_hooks.gen_lowpart_no_emit (mode, SUBREG_REG (op));
1566 if (temp)
1567 return temp;
1568 }
1569
1570 /* Extending a widening multiplication should be canonicalized to
1571 a wider widening multiplication. */
1572 if (GET_CODE (op) == MULT)
1573 {
1574 rtx lhs = XEXP (op, 0);
1575 rtx rhs = XEXP (op, 1);
1576 enum rtx_code lcode = GET_CODE (lhs);
1577 enum rtx_code rcode = GET_CODE (rhs);
1578
1579 /* Widening multiplies usually extend both operands, but sometimes
1580 they use a shift to extract a portion of a register. */
1581 if ((lcode == ZERO_EXTEND
1582 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1583 && (rcode == ZERO_EXTEND
1584 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1585 {
1586 machine_mode lmode = GET_MODE (lhs);
1587 machine_mode rmode = GET_MODE (rhs);
1588 int bits;
1589
1590 if (lcode == LSHIFTRT)
1591 /* Number of bits not shifted off the end. */
1592 bits = (GET_MODE_UNIT_PRECISION (lmode)
1593 - INTVAL (XEXP (lhs, 1)));
1594 else /* lcode == ZERO_EXTEND */
1595 /* Size of inner mode. */
1596 bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0)));
1597
1598 if (rcode == LSHIFTRT)
1599 bits += (GET_MODE_UNIT_PRECISION (rmode)
1600 - INTVAL (XEXP (rhs, 1)));
1601 else /* rcode == ZERO_EXTEND */
1602 bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0)));
1603
1604 /* We can only widen multiplies if the result is mathematiclly
1605 equivalent. I.e. if overflow was impossible. */
1606 if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op)))
1607 return simplify_gen_binary
1608 (MULT, mode,
1609 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1610 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1611 }
1612 }
1613
1614 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1615 if (GET_CODE (op) == ZERO_EXTEND)
1616 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1617 GET_MODE (XEXP (op, 0)));
1618
1619 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1620 is (zero_extend:M (subreg:O <X>)) if there is mode with
1621 GET_MODE_PRECISION (N) - I bits. */
1622 if (GET_CODE (op) == LSHIFTRT
1623 && GET_CODE (XEXP (op, 0)) == ASHIFT
1624 && is_a <scalar_int_mode> (mode, &int_mode)
1625 && CONST_INT_P (XEXP (op, 1))
1626 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1627 && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
1628 GET_MODE_PRECISION (op_mode) > INTVAL (XEXP (op, 1))))
1629 {
1630 scalar_int_mode tmode;
1631 if (int_mode_for_size (GET_MODE_PRECISION (op_mode)
1632 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1633 {
1634 rtx inner =
1635 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1636 if (inner)
1637 return simplify_gen_unary (ZERO_EXTEND, int_mode,
1638 inner, tmode);
1639 }
1640 }
1641
1642 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1643 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1644 of mode N. E.g.
1645 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1646 (and:SI (reg:SI) (const_int 63)). */
1647 if (partial_subreg_p (op)
1648 && is_a <scalar_int_mode> (mode, &int_mode)
1649 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &op0_mode)
1650 && GET_MODE_PRECISION (op0_mode) <= HOST_BITS_PER_WIDE_INT
1651 && GET_MODE_PRECISION (int_mode) >= GET_MODE_PRECISION (op0_mode)
1652 && subreg_lowpart_p (op)
1653 && (nonzero_bits (SUBREG_REG (op), op0_mode)
1654 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1655 {
1656 if (GET_MODE_PRECISION (int_mode) == GET_MODE_PRECISION (op0_mode))
1657 return SUBREG_REG (op);
1658 return simplify_gen_unary (ZERO_EXTEND, int_mode, SUBREG_REG (op),
1659 op0_mode);
1660 }
1661
1662 #if defined(POINTERS_EXTEND_UNSIGNED)
1663 /* As we do not know which address space the pointer is referring to,
1664 we can do this only if the target does not support different pointer
1665 or address modes depending on the address space. */
1666 if (target_default_pointer_address_modes_p ()
1667 && POINTERS_EXTEND_UNSIGNED > 0
1668 && mode == Pmode && GET_MODE (op) == ptr_mode
1669 && (CONSTANT_P (op)
1670 || (GET_CODE (op) == SUBREG
1671 && REG_P (SUBREG_REG (op))
1672 && REG_POINTER (SUBREG_REG (op))
1673 && GET_MODE (SUBREG_REG (op)) == Pmode))
1674 && !targetm.have_ptr_extend ())
1675 {
1676 temp
1677 = convert_memory_address_addr_space_1 (Pmode, op,
1678 ADDR_SPACE_GENERIC, false,
1679 true);
1680 if (temp)
1681 return temp;
1682 }
1683 #endif
1684 break;
1685
1686 default:
1687 break;
1688 }
1689
1690 if (VECTOR_MODE_P (mode)
1691 && vec_duplicate_p (op, &elt)
1692 && code != VEC_DUPLICATE)
1693 {
1694 /* Try applying the operator to ELT and see if that simplifies.
1695 We can duplicate the result if so.
1696
1697 The reason we don't use simplify_gen_unary is that it isn't
1698 necessarily a win to convert things like:
1699
1700 (neg:V (vec_duplicate:V (reg:S R)))
1701
1702 to:
1703
1704 (vec_duplicate:V (neg:S (reg:S R)))
1705
1706 The first might be done entirely in vector registers while the
1707 second might need a move between register files. */
1708 temp = simplify_unary_operation (code, GET_MODE_INNER (mode),
1709 elt, GET_MODE_INNER (GET_MODE (op)));
1710 if (temp)
1711 return gen_vec_duplicate (mode, temp);
1712 }
1713
1714 return 0;
1715 }
1716
1717 /* Try to compute the value of a unary operation CODE whose output mode is to
1718 be MODE with input operand OP whose mode was originally OP_MODE.
1719 Return zero if the value cannot be computed. */
1720 rtx
1721 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1722 rtx op, machine_mode op_mode)
1723 {
1724 scalar_int_mode result_mode;
1725
1726 if (code == VEC_DUPLICATE)
1727 {
1728 gcc_assert (VECTOR_MODE_P (mode));
1729 if (GET_MODE (op) != VOIDmode)
1730 {
1731 if (!VECTOR_MODE_P (GET_MODE (op)))
1732 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1733 else
1734 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1735 (GET_MODE (op)));
1736 }
1737 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op))
1738 return gen_const_vec_duplicate (mode, op);
1739 if (GET_CODE (op) == CONST_VECTOR
1740 && (CONST_VECTOR_DUPLICATE_P (op)
1741 || CONST_VECTOR_NUNITS (op).is_constant ()))
1742 {
1743 unsigned int npatterns = (CONST_VECTOR_DUPLICATE_P (op)
1744 ? CONST_VECTOR_NPATTERNS (op)
1745 : CONST_VECTOR_NUNITS (op).to_constant ());
1746 gcc_assert (multiple_p (GET_MODE_NUNITS (mode), npatterns));
1747 rtx_vector_builder builder (mode, npatterns, 1);
1748 for (unsigned i = 0; i < npatterns; i++)
1749 builder.quick_push (CONST_VECTOR_ELT (op, i));
1750 return builder.build ();
1751 }
1752 }
1753
1754 if (VECTOR_MODE_P (mode)
1755 && GET_CODE (op) == CONST_VECTOR
1756 && known_eq (GET_MODE_NUNITS (mode), CONST_VECTOR_NUNITS (op)))
1757 {
1758 gcc_assert (GET_MODE (op) == op_mode);
1759
1760 rtx_vector_builder builder;
1761 if (!builder.new_unary_operation (mode, op, false))
1762 return 0;
1763
1764 unsigned int count = builder.encoded_nelts ();
1765 for (unsigned int i = 0; i < count; i++)
1766 {
1767 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1768 CONST_VECTOR_ELT (op, i),
1769 GET_MODE_INNER (op_mode));
1770 if (!x || !valid_for_const_vector_p (mode, x))
1771 return 0;
1772 builder.quick_push (x);
1773 }
1774 return builder.build ();
1775 }
1776
1777 /* The order of these tests is critical so that, for example, we don't
1778 check the wrong mode (input vs. output) for a conversion operation,
1779 such as FIX. At some point, this should be simplified. */
1780
1781 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1782 {
1783 REAL_VALUE_TYPE d;
1784
1785 if (op_mode == VOIDmode)
1786 {
1787 /* CONST_INT have VOIDmode as the mode. We assume that all
1788 the bits of the constant are significant, though, this is
1789 a dangerous assumption as many times CONST_INTs are
1790 created and used with garbage in the bits outside of the
1791 precision of the implied mode of the const_int. */
1792 op_mode = MAX_MODE_INT;
1793 }
1794
1795 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), SIGNED);
1796
1797 /* Avoid the folding if flag_signaling_nans is on and
1798 operand is a signaling NaN. */
1799 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1800 return 0;
1801
1802 d = real_value_truncate (mode, d);
1803 return const_double_from_real_value (d, mode);
1804 }
1805 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1806 {
1807 REAL_VALUE_TYPE d;
1808
1809 if (op_mode == VOIDmode)
1810 {
1811 /* CONST_INT have VOIDmode as the mode. We assume that all
1812 the bits of the constant are significant, though, this is
1813 a dangerous assumption as many times CONST_INTs are
1814 created and used with garbage in the bits outside of the
1815 precision of the implied mode of the const_int. */
1816 op_mode = MAX_MODE_INT;
1817 }
1818
1819 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), UNSIGNED);
1820
1821 /* Avoid the folding if flag_signaling_nans is on and
1822 operand is a signaling NaN. */
1823 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1824 return 0;
1825
1826 d = real_value_truncate (mode, d);
1827 return const_double_from_real_value (d, mode);
1828 }
1829
1830 if (CONST_SCALAR_INT_P (op) && is_a <scalar_int_mode> (mode, &result_mode))
1831 {
1832 unsigned int width = GET_MODE_PRECISION (result_mode);
1833 wide_int result;
1834 scalar_int_mode imode = (op_mode == VOIDmode
1835 ? result_mode
1836 : as_a <scalar_int_mode> (op_mode));
1837 rtx_mode_t op0 = rtx_mode_t (op, imode);
1838 int int_value;
1839
1840 #if TARGET_SUPPORTS_WIDE_INT == 0
1841 /* This assert keeps the simplification from producing a result
1842 that cannot be represented in a CONST_DOUBLE but a lot of
1843 upstream callers expect that this function never fails to
1844 simplify something and so you if you added this to the test
1845 above the code would die later anyway. If this assert
1846 happens, you just need to make the port support wide int. */
1847 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1848 #endif
1849
1850 switch (code)
1851 {
1852 case NOT:
1853 result = wi::bit_not (op0);
1854 break;
1855
1856 case NEG:
1857 result = wi::neg (op0);
1858 break;
1859
1860 case ABS:
1861 result = wi::abs (op0);
1862 break;
1863
1864 case FFS:
1865 result = wi::shwi (wi::ffs (op0), result_mode);
1866 break;
1867
1868 case CLZ:
1869 if (wi::ne_p (op0, 0))
1870 int_value = wi::clz (op0);
1871 else if (! CLZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
1872 return NULL_RTX;
1873 result = wi::shwi (int_value, result_mode);
1874 break;
1875
1876 case CLRSB:
1877 result = wi::shwi (wi::clrsb (op0), result_mode);
1878 break;
1879
1880 case CTZ:
1881 if (wi::ne_p (op0, 0))
1882 int_value = wi::ctz (op0);
1883 else if (! CTZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
1884 return NULL_RTX;
1885 result = wi::shwi (int_value, result_mode);
1886 break;
1887
1888 case POPCOUNT:
1889 result = wi::shwi (wi::popcount (op0), result_mode);
1890 break;
1891
1892 case PARITY:
1893 result = wi::shwi (wi::parity (op0), result_mode);
1894 break;
1895
1896 case BSWAP:
1897 result = wide_int (op0).bswap ();
1898 break;
1899
1900 case TRUNCATE:
1901 case ZERO_EXTEND:
1902 result = wide_int::from (op0, width, UNSIGNED);
1903 break;
1904
1905 case SIGN_EXTEND:
1906 result = wide_int::from (op0, width, SIGNED);
1907 break;
1908
1909 case SQRT:
1910 default:
1911 return 0;
1912 }
1913
1914 return immed_wide_int_const (result, result_mode);
1915 }
1916
1917 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1918 && SCALAR_FLOAT_MODE_P (mode)
1919 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1920 {
1921 REAL_VALUE_TYPE d = *CONST_DOUBLE_REAL_VALUE (op);
1922 switch (code)
1923 {
1924 case SQRT:
1925 return 0;
1926 case ABS:
1927 d = real_value_abs (&d);
1928 break;
1929 case NEG:
1930 d = real_value_negate (&d);
1931 break;
1932 case FLOAT_TRUNCATE:
1933 /* Don't perform the operation if flag_signaling_nans is on
1934 and the operand is a signaling NaN. */
1935 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1936 return NULL_RTX;
1937 d = real_value_truncate (mode, d);
1938 break;
1939 case FLOAT_EXTEND:
1940 /* Don't perform the operation if flag_signaling_nans is on
1941 and the operand is a signaling NaN. */
1942 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1943 return NULL_RTX;
1944 /* All this does is change the mode, unless changing
1945 mode class. */
1946 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1947 real_convert (&d, mode, &d);
1948 break;
1949 case FIX:
1950 /* Don't perform the operation if flag_signaling_nans is on
1951 and the operand is a signaling NaN. */
1952 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1953 return NULL_RTX;
1954 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1955 break;
1956 case NOT:
1957 {
1958 long tmp[4];
1959 int i;
1960
1961 real_to_target (tmp, &d, GET_MODE (op));
1962 for (i = 0; i < 4; i++)
1963 tmp[i] = ~tmp[i];
1964 real_from_target (&d, tmp, mode);
1965 break;
1966 }
1967 default:
1968 gcc_unreachable ();
1969 }
1970 return const_double_from_real_value (d, mode);
1971 }
1972 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1973 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1974 && is_int_mode (mode, &result_mode))
1975 {
1976 unsigned int width = GET_MODE_PRECISION (result_mode);
1977 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1978 operators are intentionally left unspecified (to ease implementation
1979 by target backends), for consistency, this routine implements the
1980 same semantics for constant folding as used by the middle-end. */
1981
1982 /* This was formerly used only for non-IEEE float.
1983 eggert@twinsun.com says it is safe for IEEE also. */
1984 REAL_VALUE_TYPE t;
1985 const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (op);
1986 wide_int wmax, wmin;
1987 /* This is part of the abi to real_to_integer, but we check
1988 things before making this call. */
1989 bool fail;
1990
1991 switch (code)
1992 {
1993 case FIX:
1994 if (REAL_VALUE_ISNAN (*x))
1995 return const0_rtx;
1996
1997 /* Test against the signed upper bound. */
1998 wmax = wi::max_value (width, SIGNED);
1999 real_from_integer (&t, VOIDmode, wmax, SIGNED);
2000 if (real_less (&t, x))
2001 return immed_wide_int_const (wmax, mode);
2002
2003 /* Test against the signed lower bound. */
2004 wmin = wi::min_value (width, SIGNED);
2005 real_from_integer (&t, VOIDmode, wmin, SIGNED);
2006 if (real_less (x, &t))
2007 return immed_wide_int_const (wmin, mode);
2008
2009 return immed_wide_int_const (real_to_integer (x, &fail, width),
2010 mode);
2011
2012 case UNSIGNED_FIX:
2013 if (REAL_VALUE_ISNAN (*x) || REAL_VALUE_NEGATIVE (*x))
2014 return const0_rtx;
2015
2016 /* Test against the unsigned upper bound. */
2017 wmax = wi::max_value (width, UNSIGNED);
2018 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
2019 if (real_less (&t, x))
2020 return immed_wide_int_const (wmax, mode);
2021
2022 return immed_wide_int_const (real_to_integer (x, &fail, width),
2023 mode);
2024
2025 default:
2026 gcc_unreachable ();
2027 }
2028 }
2029
2030 /* Handle polynomial integers. */
2031 else if (CONST_POLY_INT_P (op))
2032 {
2033 poly_wide_int result;
2034 switch (code)
2035 {
2036 case NEG:
2037 result = -const_poly_int_value (op);
2038 break;
2039
2040 case NOT:
2041 result = ~const_poly_int_value (op);
2042 break;
2043
2044 default:
2045 return NULL_RTX;
2046 }
2047 return immed_wide_int_const (result, mode);
2048 }
2049
2050 return NULL_RTX;
2051 }
2052 \f
2053 /* Subroutine of simplify_binary_operation to simplify a binary operation
2054 CODE that can commute with byte swapping, with result mode MODE and
2055 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2056 Return zero if no simplification or canonicalization is possible. */
2057
2058 static rtx
2059 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
2060 rtx op0, rtx op1)
2061 {
2062 rtx tem;
2063
2064 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2065 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
2066 {
2067 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
2068 simplify_gen_unary (BSWAP, mode, op1, mode));
2069 return simplify_gen_unary (BSWAP, mode, tem, mode);
2070 }
2071
2072 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2073 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
2074 {
2075 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
2076 return simplify_gen_unary (BSWAP, mode, tem, mode);
2077 }
2078
2079 return NULL_RTX;
2080 }
2081
2082 /* Subroutine of simplify_binary_operation to simplify a commutative,
2083 associative binary operation CODE with result mode MODE, operating
2084 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2085 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2086 canonicalization is possible. */
2087
2088 static rtx
2089 simplify_associative_operation (enum rtx_code code, machine_mode mode,
2090 rtx op0, rtx op1)
2091 {
2092 rtx tem;
2093
2094 /* Linearize the operator to the left. */
2095 if (GET_CODE (op1) == code)
2096 {
2097 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2098 if (GET_CODE (op0) == code)
2099 {
2100 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2101 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2102 }
2103
2104 /* "a op (b op c)" becomes "(b op c) op a". */
2105 if (! swap_commutative_operands_p (op1, op0))
2106 return simplify_gen_binary (code, mode, op1, op0);
2107
2108 std::swap (op0, op1);
2109 }
2110
2111 if (GET_CODE (op0) == code)
2112 {
2113 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2114 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2115 {
2116 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2117 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2118 }
2119
2120 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2121 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2122 if (tem != 0)
2123 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2124
2125 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2126 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2127 if (tem != 0)
2128 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2129 }
2130
2131 return 0;
2132 }
2133
2134
2135 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2136 and OP1. Return 0 if no simplification is possible.
2137
2138 Don't use this for relational operations such as EQ or LT.
2139 Use simplify_relational_operation instead. */
2140 rtx
2141 simplify_binary_operation (enum rtx_code code, machine_mode mode,
2142 rtx op0, rtx op1)
2143 {
2144 rtx trueop0, trueop1;
2145 rtx tem;
2146
2147 /* Relational operations don't work here. We must know the mode
2148 of the operands in order to do the comparison correctly.
2149 Assuming a full word can give incorrect results.
2150 Consider comparing 128 with -128 in QImode. */
2151 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2152 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2153
2154 /* Make sure the constant is second. */
2155 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2156 && swap_commutative_operands_p (op0, op1))
2157 std::swap (op0, op1);
2158
2159 trueop0 = avoid_constant_pool_reference (op0);
2160 trueop1 = avoid_constant_pool_reference (op1);
2161
2162 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2163 if (tem)
2164 return tem;
2165 tem = simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2166
2167 if (tem)
2168 return tem;
2169
2170 /* If the above steps did not result in a simplification and op0 or op1
2171 were constant pool references, use the referenced constants directly. */
2172 if (trueop0 != op0 || trueop1 != op1)
2173 return simplify_gen_binary (code, mode, trueop0, trueop1);
2174
2175 return NULL_RTX;
2176 }
2177
2178 /* Subroutine of simplify_binary_operation_1 that looks for cases in
2179 which OP0 and OP1 are both vector series or vector duplicates
2180 (which are really just series with a step of 0). If so, try to
2181 form a new series by applying CODE to the bases and to the steps.
2182 Return null if no simplification is possible.
2183
2184 MODE is the mode of the operation and is known to be a vector
2185 integer mode. */
2186
2187 static rtx
2188 simplify_binary_operation_series (rtx_code code, machine_mode mode,
2189 rtx op0, rtx op1)
2190 {
2191 rtx base0, step0;
2192 if (vec_duplicate_p (op0, &base0))
2193 step0 = const0_rtx;
2194 else if (!vec_series_p (op0, &base0, &step0))
2195 return NULL_RTX;
2196
2197 rtx base1, step1;
2198 if (vec_duplicate_p (op1, &base1))
2199 step1 = const0_rtx;
2200 else if (!vec_series_p (op1, &base1, &step1))
2201 return NULL_RTX;
2202
2203 /* Only create a new series if we can simplify both parts. In other
2204 cases this isn't really a simplification, and it's not necessarily
2205 a win to replace a vector operation with a scalar operation. */
2206 scalar_mode inner_mode = GET_MODE_INNER (mode);
2207 rtx new_base = simplify_binary_operation (code, inner_mode, base0, base1);
2208 if (!new_base)
2209 return NULL_RTX;
2210
2211 rtx new_step = simplify_binary_operation (code, inner_mode, step0, step1);
2212 if (!new_step)
2213 return NULL_RTX;
2214
2215 return gen_vec_series (mode, new_base, new_step);
2216 }
2217
2218 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2219 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2220 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2221 actual constants. */
2222
2223 static rtx
2224 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
2225 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2226 {
2227 rtx tem, reversed, opleft, opright, elt0, elt1;
2228 HOST_WIDE_INT val;
2229 scalar_int_mode int_mode, inner_mode;
2230 poly_int64 offset;
2231
2232 /* Even if we can't compute a constant result,
2233 there are some cases worth simplifying. */
2234
2235 switch (code)
2236 {
2237 case PLUS:
2238 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2239 when x is NaN, infinite, or finite and nonzero. They aren't
2240 when x is -0 and the rounding mode is not towards -infinity,
2241 since (-0) + 0 is then 0. */
2242 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2243 return op0;
2244
2245 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2246 transformations are safe even for IEEE. */
2247 if (GET_CODE (op0) == NEG)
2248 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2249 else if (GET_CODE (op1) == NEG)
2250 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2251
2252 /* (~a) + 1 -> -a */
2253 if (INTEGRAL_MODE_P (mode)
2254 && GET_CODE (op0) == NOT
2255 && trueop1 == const1_rtx)
2256 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2257
2258 /* Handle both-operands-constant cases. We can only add
2259 CONST_INTs to constants since the sum of relocatable symbols
2260 can't be handled by most assemblers. Don't add CONST_INT
2261 to CONST_INT since overflow won't be computed properly if wider
2262 than HOST_BITS_PER_WIDE_INT. */
2263
2264 if ((GET_CODE (op0) == CONST
2265 || GET_CODE (op0) == SYMBOL_REF
2266 || GET_CODE (op0) == LABEL_REF)
2267 && poly_int_rtx_p (op1, &offset))
2268 return plus_constant (mode, op0, offset);
2269 else if ((GET_CODE (op1) == CONST
2270 || GET_CODE (op1) == SYMBOL_REF
2271 || GET_CODE (op1) == LABEL_REF)
2272 && poly_int_rtx_p (op0, &offset))
2273 return plus_constant (mode, op1, offset);
2274
2275 /* See if this is something like X * C - X or vice versa or
2276 if the multiplication is written as a shift. If so, we can
2277 distribute and make a new multiply, shift, or maybe just
2278 have X (if C is 2 in the example above). But don't make
2279 something more expensive than we had before. */
2280
2281 if (is_a <scalar_int_mode> (mode, &int_mode))
2282 {
2283 rtx lhs = op0, rhs = op1;
2284
2285 wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
2286 wide_int coeff1 = wi::one (GET_MODE_PRECISION (int_mode));
2287
2288 if (GET_CODE (lhs) == NEG)
2289 {
2290 coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2291 lhs = XEXP (lhs, 0);
2292 }
2293 else if (GET_CODE (lhs) == MULT
2294 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2295 {
2296 coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
2297 lhs = XEXP (lhs, 0);
2298 }
2299 else if (GET_CODE (lhs) == ASHIFT
2300 && CONST_INT_P (XEXP (lhs, 1))
2301 && INTVAL (XEXP (lhs, 1)) >= 0
2302 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
2303 {
2304 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2305 GET_MODE_PRECISION (int_mode));
2306 lhs = XEXP (lhs, 0);
2307 }
2308
2309 if (GET_CODE (rhs) == NEG)
2310 {
2311 coeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2312 rhs = XEXP (rhs, 0);
2313 }
2314 else if (GET_CODE (rhs) == MULT
2315 && CONST_INT_P (XEXP (rhs, 1)))
2316 {
2317 coeff1 = rtx_mode_t (XEXP (rhs, 1), int_mode);
2318 rhs = XEXP (rhs, 0);
2319 }
2320 else if (GET_CODE (rhs) == ASHIFT
2321 && CONST_INT_P (XEXP (rhs, 1))
2322 && INTVAL (XEXP (rhs, 1)) >= 0
2323 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
2324 {
2325 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2326 GET_MODE_PRECISION (int_mode));
2327 rhs = XEXP (rhs, 0);
2328 }
2329
2330 if (rtx_equal_p (lhs, rhs))
2331 {
2332 rtx orig = gen_rtx_PLUS (int_mode, op0, op1);
2333 rtx coeff;
2334 bool speed = optimize_function_for_speed_p (cfun);
2335
2336 coeff = immed_wide_int_const (coeff0 + coeff1, int_mode);
2337
2338 tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
2339 return (set_src_cost (tem, int_mode, speed)
2340 <= set_src_cost (orig, int_mode, speed) ? tem : 0);
2341 }
2342 }
2343
2344 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2345 if (CONST_SCALAR_INT_P (op1)
2346 && GET_CODE (op0) == XOR
2347 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2348 && mode_signbit_p (mode, op1))
2349 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2350 simplify_gen_binary (XOR, mode, op1,
2351 XEXP (op0, 1)));
2352
2353 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2354 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2355 && GET_CODE (op0) == MULT
2356 && GET_CODE (XEXP (op0, 0)) == NEG)
2357 {
2358 rtx in1, in2;
2359
2360 in1 = XEXP (XEXP (op0, 0), 0);
2361 in2 = XEXP (op0, 1);
2362 return simplify_gen_binary (MINUS, mode, op1,
2363 simplify_gen_binary (MULT, mode,
2364 in1, in2));
2365 }
2366
2367 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2368 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2369 is 1. */
2370 if (COMPARISON_P (op0)
2371 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2372 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2373 && (reversed = reversed_comparison (op0, mode)))
2374 return
2375 simplify_gen_unary (NEG, mode, reversed, mode);
2376
2377 /* If one of the operands is a PLUS or a MINUS, see if we can
2378 simplify this by the associative law.
2379 Don't use the associative law for floating point.
2380 The inaccuracy makes it nonassociative,
2381 and subtle programs can break if operations are associated. */
2382
2383 if (INTEGRAL_MODE_P (mode)
2384 && (plus_minus_operand_p (op0)
2385 || plus_minus_operand_p (op1))
2386 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2387 return tem;
2388
2389 /* Reassociate floating point addition only when the user
2390 specifies associative math operations. */
2391 if (FLOAT_MODE_P (mode)
2392 && flag_associative_math)
2393 {
2394 tem = simplify_associative_operation (code, mode, op0, op1);
2395 if (tem)
2396 return tem;
2397 }
2398
2399 /* Handle vector series. */
2400 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
2401 {
2402 tem = simplify_binary_operation_series (code, mode, op0, op1);
2403 if (tem)
2404 return tem;
2405 }
2406 break;
2407
2408 case COMPARE:
2409 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2410 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2411 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2412 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2413 {
2414 rtx xop00 = XEXP (op0, 0);
2415 rtx xop10 = XEXP (op1, 0);
2416
2417 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2418 return xop00;
2419
2420 if (REG_P (xop00) && REG_P (xop10)
2421 && REGNO (xop00) == REGNO (xop10)
2422 && GET_MODE (xop00) == mode
2423 && GET_MODE (xop10) == mode
2424 && GET_MODE_CLASS (mode) == MODE_CC)
2425 return xop00;
2426 }
2427 break;
2428
2429 case MINUS:
2430 /* We can't assume x-x is 0 even with non-IEEE floating point,
2431 but since it is zero except in very strange circumstances, we
2432 will treat it as zero with -ffinite-math-only. */
2433 if (rtx_equal_p (trueop0, trueop1)
2434 && ! side_effects_p (op0)
2435 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2436 return CONST0_RTX (mode);
2437
2438 /* Change subtraction from zero into negation. (0 - x) is the
2439 same as -x when x is NaN, infinite, or finite and nonzero.
2440 But if the mode has signed zeros, and does not round towards
2441 -infinity, then 0 - 0 is 0, not -0. */
2442 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2443 return simplify_gen_unary (NEG, mode, op1, mode);
2444
2445 /* (-1 - a) is ~a, unless the expression contains symbolic
2446 constants, in which case not retaining additions and
2447 subtractions could cause invalid assembly to be produced. */
2448 if (trueop0 == constm1_rtx
2449 && !contains_symbolic_reference_p (op1))
2450 return simplify_gen_unary (NOT, mode, op1, mode);
2451
2452 /* Subtracting 0 has no effect unless the mode has signed zeros
2453 and supports rounding towards -infinity. In such a case,
2454 0 - 0 is -0. */
2455 if (!(HONOR_SIGNED_ZEROS (mode)
2456 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2457 && trueop1 == CONST0_RTX (mode))
2458 return op0;
2459
2460 /* See if this is something like X * C - X or vice versa or
2461 if the multiplication is written as a shift. If so, we can
2462 distribute and make a new multiply, shift, or maybe just
2463 have X (if C is 2 in the example above). But don't make
2464 something more expensive than we had before. */
2465
2466 if (is_a <scalar_int_mode> (mode, &int_mode))
2467 {
2468 rtx lhs = op0, rhs = op1;
2469
2470 wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
2471 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2472
2473 if (GET_CODE (lhs) == NEG)
2474 {
2475 coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2476 lhs = XEXP (lhs, 0);
2477 }
2478 else if (GET_CODE (lhs) == MULT
2479 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2480 {
2481 coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
2482 lhs = XEXP (lhs, 0);
2483 }
2484 else if (GET_CODE (lhs) == ASHIFT
2485 && CONST_INT_P (XEXP (lhs, 1))
2486 && INTVAL (XEXP (lhs, 1)) >= 0
2487 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
2488 {
2489 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2490 GET_MODE_PRECISION (int_mode));
2491 lhs = XEXP (lhs, 0);
2492 }
2493
2494 if (GET_CODE (rhs) == NEG)
2495 {
2496 negcoeff1 = wi::one (GET_MODE_PRECISION (int_mode));
2497 rhs = XEXP (rhs, 0);
2498 }
2499 else if (GET_CODE (rhs) == MULT
2500 && CONST_INT_P (XEXP (rhs, 1)))
2501 {
2502 negcoeff1 = wi::neg (rtx_mode_t (XEXP (rhs, 1), int_mode));
2503 rhs = XEXP (rhs, 0);
2504 }
2505 else if (GET_CODE (rhs) == ASHIFT
2506 && CONST_INT_P (XEXP (rhs, 1))
2507 && INTVAL (XEXP (rhs, 1)) >= 0
2508 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
2509 {
2510 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2511 GET_MODE_PRECISION (int_mode));
2512 negcoeff1 = -negcoeff1;
2513 rhs = XEXP (rhs, 0);
2514 }
2515
2516 if (rtx_equal_p (lhs, rhs))
2517 {
2518 rtx orig = gen_rtx_MINUS (int_mode, op0, op1);
2519 rtx coeff;
2520 bool speed = optimize_function_for_speed_p (cfun);
2521
2522 coeff = immed_wide_int_const (coeff0 + negcoeff1, int_mode);
2523
2524 tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
2525 return (set_src_cost (tem, int_mode, speed)
2526 <= set_src_cost (orig, int_mode, speed) ? tem : 0);
2527 }
2528 }
2529
2530 /* (a - (-b)) -> (a + b). True even for IEEE. */
2531 if (GET_CODE (op1) == NEG)
2532 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2533
2534 /* (-x - c) may be simplified as (-c - x). */
2535 if (GET_CODE (op0) == NEG
2536 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2537 {
2538 tem = simplify_unary_operation (NEG, mode, op1, mode);
2539 if (tem)
2540 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2541 }
2542
2543 if ((GET_CODE (op0) == CONST
2544 || GET_CODE (op0) == SYMBOL_REF
2545 || GET_CODE (op0) == LABEL_REF)
2546 && poly_int_rtx_p (op1, &offset))
2547 return plus_constant (mode, op0, trunc_int_for_mode (-offset, mode));
2548
2549 /* Don't let a relocatable value get a negative coeff. */
2550 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2551 return simplify_gen_binary (PLUS, mode,
2552 op0,
2553 neg_const_int (mode, op1));
2554
2555 /* (x - (x & y)) -> (x & ~y) */
2556 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2557 {
2558 if (rtx_equal_p (op0, XEXP (op1, 0)))
2559 {
2560 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2561 GET_MODE (XEXP (op1, 1)));
2562 return simplify_gen_binary (AND, mode, op0, tem);
2563 }
2564 if (rtx_equal_p (op0, XEXP (op1, 1)))
2565 {
2566 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2567 GET_MODE (XEXP (op1, 0)));
2568 return simplify_gen_binary (AND, mode, op0, tem);
2569 }
2570 }
2571
2572 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2573 by reversing the comparison code if valid. */
2574 if (STORE_FLAG_VALUE == 1
2575 && trueop0 == const1_rtx
2576 && COMPARISON_P (op1)
2577 && (reversed = reversed_comparison (op1, mode)))
2578 return reversed;
2579
2580 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2581 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2582 && GET_CODE (op1) == MULT
2583 && GET_CODE (XEXP (op1, 0)) == NEG)
2584 {
2585 rtx in1, in2;
2586
2587 in1 = XEXP (XEXP (op1, 0), 0);
2588 in2 = XEXP (op1, 1);
2589 return simplify_gen_binary (PLUS, mode,
2590 simplify_gen_binary (MULT, mode,
2591 in1, in2),
2592 op0);
2593 }
2594
2595 /* Canonicalize (minus (neg A) (mult B C)) to
2596 (minus (mult (neg B) C) A). */
2597 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2598 && GET_CODE (op1) == MULT
2599 && GET_CODE (op0) == NEG)
2600 {
2601 rtx in1, in2;
2602
2603 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2604 in2 = XEXP (op1, 1);
2605 return simplify_gen_binary (MINUS, mode,
2606 simplify_gen_binary (MULT, mode,
2607 in1, in2),
2608 XEXP (op0, 0));
2609 }
2610
2611 /* If one of the operands is a PLUS or a MINUS, see if we can
2612 simplify this by the associative law. This will, for example,
2613 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2614 Don't use the associative law for floating point.
2615 The inaccuracy makes it nonassociative,
2616 and subtle programs can break if operations are associated. */
2617
2618 if (INTEGRAL_MODE_P (mode)
2619 && (plus_minus_operand_p (op0)
2620 || plus_minus_operand_p (op1))
2621 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2622 return tem;
2623
2624 /* Handle vector series. */
2625 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
2626 {
2627 tem = simplify_binary_operation_series (code, mode, op0, op1);
2628 if (tem)
2629 return tem;
2630 }
2631 break;
2632
2633 case MULT:
2634 if (trueop1 == constm1_rtx)
2635 return simplify_gen_unary (NEG, mode, op0, mode);
2636
2637 if (GET_CODE (op0) == NEG)
2638 {
2639 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2640 /* If op1 is a MULT as well and simplify_unary_operation
2641 just moved the NEG to the second operand, simplify_gen_binary
2642 below could through simplify_associative_operation move
2643 the NEG around again and recurse endlessly. */
2644 if (temp
2645 && GET_CODE (op1) == MULT
2646 && GET_CODE (temp) == MULT
2647 && XEXP (op1, 0) == XEXP (temp, 0)
2648 && GET_CODE (XEXP (temp, 1)) == NEG
2649 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2650 temp = NULL_RTX;
2651 if (temp)
2652 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2653 }
2654 if (GET_CODE (op1) == NEG)
2655 {
2656 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2657 /* If op0 is a MULT as well and simplify_unary_operation
2658 just moved the NEG to the second operand, simplify_gen_binary
2659 below could through simplify_associative_operation move
2660 the NEG around again and recurse endlessly. */
2661 if (temp
2662 && GET_CODE (op0) == MULT
2663 && GET_CODE (temp) == MULT
2664 && XEXP (op0, 0) == XEXP (temp, 0)
2665 && GET_CODE (XEXP (temp, 1)) == NEG
2666 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2667 temp = NULL_RTX;
2668 if (temp)
2669 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2670 }
2671
2672 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2673 x is NaN, since x * 0 is then also NaN. Nor is it valid
2674 when the mode has signed zeros, since multiplying a negative
2675 number by 0 will give -0, not 0. */
2676 if (!HONOR_NANS (mode)
2677 && !HONOR_SIGNED_ZEROS (mode)
2678 && trueop1 == CONST0_RTX (mode)
2679 && ! side_effects_p (op0))
2680 return op1;
2681
2682 /* In IEEE floating point, x*1 is not equivalent to x for
2683 signalling NaNs. */
2684 if (!HONOR_SNANS (mode)
2685 && trueop1 == CONST1_RTX (mode))
2686 return op0;
2687
2688 /* Convert multiply by constant power of two into shift. */
2689 if (CONST_SCALAR_INT_P (trueop1))
2690 {
2691 val = wi::exact_log2 (rtx_mode_t (trueop1, mode));
2692 if (val >= 0)
2693 return simplify_gen_binary (ASHIFT, mode, op0,
2694 gen_int_shift_amount (mode, val));
2695 }
2696
2697 /* x*2 is x+x and x*(-1) is -x */
2698 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2699 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2700 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2701 && GET_MODE (op0) == mode)
2702 {
2703 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
2704
2705 if (real_equal (d1, &dconst2))
2706 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2707
2708 if (!HONOR_SNANS (mode)
2709 && real_equal (d1, &dconstm1))
2710 return simplify_gen_unary (NEG, mode, op0, mode);
2711 }
2712
2713 /* Optimize -x * -x as x * x. */
2714 if (FLOAT_MODE_P (mode)
2715 && GET_CODE (op0) == NEG
2716 && GET_CODE (op1) == NEG
2717 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2718 && !side_effects_p (XEXP (op0, 0)))
2719 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2720
2721 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2722 if (SCALAR_FLOAT_MODE_P (mode)
2723 && GET_CODE (op0) == ABS
2724 && GET_CODE (op1) == ABS
2725 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2726 && !side_effects_p (XEXP (op0, 0)))
2727 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2728
2729 /* Reassociate multiplication, but for floating point MULTs
2730 only when the user specifies unsafe math optimizations. */
2731 if (! FLOAT_MODE_P (mode)
2732 || flag_unsafe_math_optimizations)
2733 {
2734 tem = simplify_associative_operation (code, mode, op0, op1);
2735 if (tem)
2736 return tem;
2737 }
2738 break;
2739
2740 case IOR:
2741 if (trueop1 == CONST0_RTX (mode))
2742 return op0;
2743 if (INTEGRAL_MODE_P (mode)
2744 && trueop1 == CONSTM1_RTX (mode)
2745 && !side_effects_p (op0))
2746 return op1;
2747 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2748 return op0;
2749 /* A | (~A) -> -1 */
2750 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2751 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2752 && ! side_effects_p (op0)
2753 && SCALAR_INT_MODE_P (mode))
2754 return constm1_rtx;
2755
2756 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2757 if (CONST_INT_P (op1)
2758 && HWI_COMPUTABLE_MODE_P (mode)
2759 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2760 && !side_effects_p (op0))
2761 return op1;
2762
2763 /* Canonicalize (X & C1) | C2. */
2764 if (GET_CODE (op0) == AND
2765 && CONST_INT_P (trueop1)
2766 && CONST_INT_P (XEXP (op0, 1)))
2767 {
2768 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2769 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2770 HOST_WIDE_INT c2 = INTVAL (trueop1);
2771
2772 /* If (C1&C2) == C1, then (X&C1)|C2 becomes C2. */
2773 if ((c1 & c2) == c1
2774 && !side_effects_p (XEXP (op0, 0)))
2775 return trueop1;
2776
2777 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2778 if (((c1|c2) & mask) == mask)
2779 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2780 }
2781
2782 /* Convert (A & B) | A to A. */
2783 if (GET_CODE (op0) == AND
2784 && (rtx_equal_p (XEXP (op0, 0), op1)
2785 || rtx_equal_p (XEXP (op0, 1), op1))
2786 && ! side_effects_p (XEXP (op0, 0))
2787 && ! side_effects_p (XEXP (op0, 1)))
2788 return op1;
2789
2790 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2791 mode size to (rotate A CX). */
2792
2793 if (GET_CODE (op1) == ASHIFT
2794 || GET_CODE (op1) == SUBREG)
2795 {
2796 opleft = op1;
2797 opright = op0;
2798 }
2799 else
2800 {
2801 opright = op1;
2802 opleft = op0;
2803 }
2804
2805 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2806 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2807 && CONST_INT_P (XEXP (opleft, 1))
2808 && CONST_INT_P (XEXP (opright, 1))
2809 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2810 == GET_MODE_UNIT_PRECISION (mode)))
2811 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2812
2813 /* Same, but for ashift that has been "simplified" to a wider mode
2814 by simplify_shift_const. */
2815
2816 if (GET_CODE (opleft) == SUBREG
2817 && is_a <scalar_int_mode> (mode, &int_mode)
2818 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (opleft)),
2819 &inner_mode)
2820 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2821 && GET_CODE (opright) == LSHIFTRT
2822 && GET_CODE (XEXP (opright, 0)) == SUBREG
2823 && known_eq (SUBREG_BYTE (opleft), SUBREG_BYTE (XEXP (opright, 0)))
2824 && GET_MODE_SIZE (int_mode) < GET_MODE_SIZE (inner_mode)
2825 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2826 SUBREG_REG (XEXP (opright, 0)))
2827 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2828 && CONST_INT_P (XEXP (opright, 1))
2829 && (INTVAL (XEXP (SUBREG_REG (opleft), 1))
2830 + INTVAL (XEXP (opright, 1))
2831 == GET_MODE_PRECISION (int_mode)))
2832 return gen_rtx_ROTATE (int_mode, XEXP (opright, 0),
2833 XEXP (SUBREG_REG (opleft), 1));
2834
2835 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2836 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2837 the PLUS does not affect any of the bits in OP1: then we can do
2838 the IOR as a PLUS and we can associate. This is valid if OP1
2839 can be safely shifted left C bits. */
2840 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2841 && GET_CODE (XEXP (op0, 0)) == PLUS
2842 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2843 && CONST_INT_P (XEXP (op0, 1))
2844 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2845 {
2846 int count = INTVAL (XEXP (op0, 1));
2847 HOST_WIDE_INT mask = UINTVAL (trueop1) << count;
2848
2849 if (mask >> count == INTVAL (trueop1)
2850 && trunc_int_for_mode (mask, mode) == mask
2851 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2852 return simplify_gen_binary (ASHIFTRT, mode,
2853 plus_constant (mode, XEXP (op0, 0),
2854 mask),
2855 XEXP (op0, 1));
2856 }
2857
2858 /* The following happens with bitfield merging.
2859 (X & C) | ((X | Y) & ~C) -> X | (Y & ~C) */
2860 if (GET_CODE (op0) == AND
2861 && GET_CODE (op1) == AND
2862 && CONST_INT_P (XEXP (op0, 1))
2863 && CONST_INT_P (XEXP (op1, 1))
2864 && (INTVAL (XEXP (op0, 1))
2865 == ~INTVAL (XEXP (op1, 1))))
2866 {
2867 /* The IOR may be on both sides. */
2868 rtx top0 = NULL_RTX, top1 = NULL_RTX;
2869 if (GET_CODE (XEXP (op1, 0)) == IOR)
2870 top0 = op0, top1 = op1;
2871 else if (GET_CODE (XEXP (op0, 0)) == IOR)
2872 top0 = op1, top1 = op0;
2873 if (top0 && top1)
2874 {
2875 /* X may be on either side of the inner IOR. */
2876 rtx tem = NULL_RTX;
2877 if (rtx_equal_p (XEXP (top0, 0),
2878 XEXP (XEXP (top1, 0), 0)))
2879 tem = XEXP (XEXP (top1, 0), 1);
2880 else if (rtx_equal_p (XEXP (top0, 0),
2881 XEXP (XEXP (top1, 0), 1)))
2882 tem = XEXP (XEXP (top1, 0), 0);
2883 if (tem)
2884 return simplify_gen_binary (IOR, mode, XEXP (top0, 0),
2885 simplify_gen_binary
2886 (AND, mode, tem, XEXP (top1, 1)));
2887 }
2888 }
2889
2890 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2891 if (tem)
2892 return tem;
2893
2894 tem = simplify_associative_operation (code, mode, op0, op1);
2895 if (tem)
2896 return tem;
2897 break;
2898
2899 case XOR:
2900 if (trueop1 == CONST0_RTX (mode))
2901 return op0;
2902 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2903 return simplify_gen_unary (NOT, mode, op0, mode);
2904 if (rtx_equal_p (trueop0, trueop1)
2905 && ! side_effects_p (op0)
2906 && GET_MODE_CLASS (mode) != MODE_CC)
2907 return CONST0_RTX (mode);
2908
2909 /* Canonicalize XOR of the most significant bit to PLUS. */
2910 if (CONST_SCALAR_INT_P (op1)
2911 && mode_signbit_p (mode, op1))
2912 return simplify_gen_binary (PLUS, mode, op0, op1);
2913 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2914 if (CONST_SCALAR_INT_P (op1)
2915 && GET_CODE (op0) == PLUS
2916 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2917 && mode_signbit_p (mode, XEXP (op0, 1)))
2918 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2919 simplify_gen_binary (XOR, mode, op1,
2920 XEXP (op0, 1)));
2921
2922 /* If we are XORing two things that have no bits in common,
2923 convert them into an IOR. This helps to detect rotation encoded
2924 using those methods and possibly other simplifications. */
2925
2926 if (HWI_COMPUTABLE_MODE_P (mode)
2927 && (nonzero_bits (op0, mode)
2928 & nonzero_bits (op1, mode)) == 0)
2929 return (simplify_gen_binary (IOR, mode, op0, op1));
2930
2931 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2932 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2933 (NOT y). */
2934 {
2935 int num_negated = 0;
2936
2937 if (GET_CODE (op0) == NOT)
2938 num_negated++, op0 = XEXP (op0, 0);
2939 if (GET_CODE (op1) == NOT)
2940 num_negated++, op1 = XEXP (op1, 0);
2941
2942 if (num_negated == 2)
2943 return simplify_gen_binary (XOR, mode, op0, op1);
2944 else if (num_negated == 1)
2945 return simplify_gen_unary (NOT, mode,
2946 simplify_gen_binary (XOR, mode, op0, op1),
2947 mode);
2948 }
2949
2950 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2951 correspond to a machine insn or result in further simplifications
2952 if B is a constant. */
2953
2954 if (GET_CODE (op0) == AND
2955 && rtx_equal_p (XEXP (op0, 1), op1)
2956 && ! side_effects_p (op1))
2957 return simplify_gen_binary (AND, mode,
2958 simplify_gen_unary (NOT, mode,
2959 XEXP (op0, 0), mode),
2960 op1);
2961
2962 else if (GET_CODE (op0) == AND
2963 && rtx_equal_p (XEXP (op0, 0), op1)
2964 && ! side_effects_p (op1))
2965 return simplify_gen_binary (AND, mode,
2966 simplify_gen_unary (NOT, mode,
2967 XEXP (op0, 1), mode),
2968 op1);
2969
2970 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2971 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2972 out bits inverted twice and not set by C. Similarly, given
2973 (xor (and (xor A B) C) D), simplify without inverting C in
2974 the xor operand: (xor (and A C) (B&C)^D).
2975 */
2976 else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
2977 && GET_CODE (XEXP (op0, 0)) == XOR
2978 && CONST_INT_P (op1)
2979 && CONST_INT_P (XEXP (op0, 1))
2980 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
2981 {
2982 enum rtx_code op = GET_CODE (op0);
2983 rtx a = XEXP (XEXP (op0, 0), 0);
2984 rtx b = XEXP (XEXP (op0, 0), 1);
2985 rtx c = XEXP (op0, 1);
2986 rtx d = op1;
2987 HOST_WIDE_INT bval = INTVAL (b);
2988 HOST_WIDE_INT cval = INTVAL (c);
2989 HOST_WIDE_INT dval = INTVAL (d);
2990 HOST_WIDE_INT xcval;
2991
2992 if (op == IOR)
2993 xcval = ~cval;
2994 else
2995 xcval = cval;
2996
2997 return simplify_gen_binary (XOR, mode,
2998 simplify_gen_binary (op, mode, a, c),
2999 gen_int_mode ((bval & xcval) ^ dval,
3000 mode));
3001 }
3002
3003 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
3004 we can transform like this:
3005 (A&B)^C == ~(A&B)&C | ~C&(A&B)
3006 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
3007 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
3008 Attempt a few simplifications when B and C are both constants. */
3009 if (GET_CODE (op0) == AND
3010 && CONST_INT_P (op1)
3011 && CONST_INT_P (XEXP (op0, 1)))
3012 {
3013 rtx a = XEXP (op0, 0);
3014 rtx b = XEXP (op0, 1);
3015 rtx c = op1;
3016 HOST_WIDE_INT bval = INTVAL (b);
3017 HOST_WIDE_INT cval = INTVAL (c);
3018
3019 /* Instead of computing ~A&C, we compute its negated value,
3020 ~(A|~C). If it yields -1, ~A&C is zero, so we can
3021 optimize for sure. If it does not simplify, we still try
3022 to compute ~A&C below, but since that always allocates
3023 RTL, we don't try that before committing to returning a
3024 simplified expression. */
3025 rtx n_na_c = simplify_binary_operation (IOR, mode, a,
3026 GEN_INT (~cval));
3027
3028 if ((~cval & bval) == 0)
3029 {
3030 rtx na_c = NULL_RTX;
3031 if (n_na_c)
3032 na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
3033 else
3034 {
3035 /* If ~A does not simplify, don't bother: we don't
3036 want to simplify 2 operations into 3, and if na_c
3037 were to simplify with na, n_na_c would have
3038 simplified as well. */
3039 rtx na = simplify_unary_operation (NOT, mode, a, mode);
3040 if (na)
3041 na_c = simplify_gen_binary (AND, mode, na, c);
3042 }
3043
3044 /* Try to simplify ~A&C | ~B&C. */
3045 if (na_c != NULL_RTX)
3046 return simplify_gen_binary (IOR, mode, na_c,
3047 gen_int_mode (~bval & cval, mode));
3048 }
3049 else
3050 {
3051 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
3052 if (n_na_c == CONSTM1_RTX (mode))
3053 {
3054 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
3055 gen_int_mode (~cval & bval,
3056 mode));
3057 return simplify_gen_binary (IOR, mode, a_nc_b,
3058 gen_int_mode (~bval & cval,
3059 mode));
3060 }
3061 }
3062 }
3063
3064 /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
3065 do (ior (and A ~C) (and B C)) which is a machine instruction on some
3066 machines, and also has shorter instruction path length. */
3067 if (GET_CODE (op0) == AND
3068 && GET_CODE (XEXP (op0, 0)) == XOR
3069 && CONST_INT_P (XEXP (op0, 1))
3070 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), trueop1))
3071 {
3072 rtx a = trueop1;
3073 rtx b = XEXP (XEXP (op0, 0), 1);
3074 rtx c = XEXP (op0, 1);
3075 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
3076 rtx a_nc = simplify_gen_binary (AND, mode, a, nc);
3077 rtx bc = simplify_gen_binary (AND, mode, b, c);
3078 return simplify_gen_binary (IOR, mode, a_nc, bc);
3079 }
3080 /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C)) */
3081 else if (GET_CODE (op0) == AND
3082 && GET_CODE (XEXP (op0, 0)) == XOR
3083 && CONST_INT_P (XEXP (op0, 1))
3084 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), trueop1))
3085 {
3086 rtx a = XEXP (XEXP (op0, 0), 0);
3087 rtx b = trueop1;
3088 rtx c = XEXP (op0, 1);
3089 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
3090 rtx b_nc = simplify_gen_binary (AND, mode, b, nc);
3091 rtx ac = simplify_gen_binary (AND, mode, a, c);
3092 return simplify_gen_binary (IOR, mode, ac, b_nc);
3093 }
3094
3095 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
3096 comparison if STORE_FLAG_VALUE is 1. */
3097 if (STORE_FLAG_VALUE == 1
3098 && trueop1 == const1_rtx
3099 && COMPARISON_P (op0)
3100 && (reversed = reversed_comparison (op0, mode)))
3101 return reversed;
3102
3103 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
3104 is (lt foo (const_int 0)), so we can perform the above
3105 simplification if STORE_FLAG_VALUE is 1. */
3106
3107 if (is_a <scalar_int_mode> (mode, &int_mode)
3108 && STORE_FLAG_VALUE == 1
3109 && trueop1 == const1_rtx
3110 && GET_CODE (op0) == LSHIFTRT
3111 && CONST_INT_P (XEXP (op0, 1))
3112 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (int_mode) - 1)
3113 return gen_rtx_GE (int_mode, XEXP (op0, 0), const0_rtx);
3114
3115 /* (xor (comparison foo bar) (const_int sign-bit))
3116 when STORE_FLAG_VALUE is the sign bit. */
3117 if (is_a <scalar_int_mode> (mode, &int_mode)
3118 && val_signbit_p (int_mode, STORE_FLAG_VALUE)
3119 && trueop1 == const_true_rtx
3120 && COMPARISON_P (op0)
3121 && (reversed = reversed_comparison (op0, int_mode)))
3122 return reversed;
3123
3124 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3125 if (tem)
3126 return tem;
3127
3128 tem = simplify_associative_operation (code, mode, op0, op1);
3129 if (tem)
3130 return tem;
3131 break;
3132
3133 case AND:
3134 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3135 return trueop1;
3136 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
3137 return op0;
3138 if (HWI_COMPUTABLE_MODE_P (mode))
3139 {
3140 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
3141 HOST_WIDE_INT nzop1;
3142 if (CONST_INT_P (trueop1))
3143 {
3144 HOST_WIDE_INT val1 = INTVAL (trueop1);
3145 /* If we are turning off bits already known off in OP0, we need
3146 not do an AND. */
3147 if ((nzop0 & ~val1) == 0)
3148 return op0;
3149 }
3150 nzop1 = nonzero_bits (trueop1, mode);
3151 /* If we are clearing all the nonzero bits, the result is zero. */
3152 if ((nzop1 & nzop0) == 0
3153 && !side_effects_p (op0) && !side_effects_p (op1))
3154 return CONST0_RTX (mode);
3155 }
3156 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
3157 && GET_MODE_CLASS (mode) != MODE_CC)
3158 return op0;
3159 /* A & (~A) -> 0 */
3160 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3161 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3162 && ! side_effects_p (op0)
3163 && GET_MODE_CLASS (mode) != MODE_CC)
3164 return CONST0_RTX (mode);
3165
3166 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3167 there are no nonzero bits of C outside of X's mode. */
3168 if ((GET_CODE (op0) == SIGN_EXTEND
3169 || GET_CODE (op0) == ZERO_EXTEND)
3170 && CONST_INT_P (trueop1)
3171 && HWI_COMPUTABLE_MODE_P (mode)
3172 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
3173 & UINTVAL (trueop1)) == 0)
3174 {
3175 machine_mode imode = GET_MODE (XEXP (op0, 0));
3176 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
3177 gen_int_mode (INTVAL (trueop1),
3178 imode));
3179 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
3180 }
3181
3182 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3183 we might be able to further simplify the AND with X and potentially
3184 remove the truncation altogether. */
3185 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
3186 {
3187 rtx x = XEXP (op0, 0);
3188 machine_mode xmode = GET_MODE (x);
3189 tem = simplify_gen_binary (AND, xmode, x,
3190 gen_int_mode (INTVAL (trueop1), xmode));
3191 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
3192 }
3193
3194 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3195 if (GET_CODE (op0) == IOR
3196 && CONST_INT_P (trueop1)
3197 && CONST_INT_P (XEXP (op0, 1)))
3198 {
3199 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
3200 return simplify_gen_binary (IOR, mode,
3201 simplify_gen_binary (AND, mode,
3202 XEXP (op0, 0), op1),
3203 gen_int_mode (tmp, mode));
3204 }
3205
3206 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3207 insn (and may simplify more). */
3208 if (GET_CODE (op0) == XOR
3209 && rtx_equal_p (XEXP (op0, 0), op1)
3210 && ! side_effects_p (op1))
3211 return simplify_gen_binary (AND, mode,
3212 simplify_gen_unary (NOT, mode,
3213 XEXP (op0, 1), mode),
3214 op1);
3215
3216 if (GET_CODE (op0) == XOR
3217 && rtx_equal_p (XEXP (op0, 1), op1)
3218 && ! side_effects_p (op1))
3219 return simplify_gen_binary (AND, mode,
3220 simplify_gen_unary (NOT, mode,
3221 XEXP (op0, 0), mode),
3222 op1);
3223
3224 /* Similarly for (~(A ^ B)) & A. */
3225 if (GET_CODE (op0) == NOT
3226 && GET_CODE (XEXP (op0, 0)) == XOR
3227 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3228 && ! side_effects_p (op1))
3229 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3230
3231 if (GET_CODE (op0) == NOT
3232 && GET_CODE (XEXP (op0, 0)) == XOR
3233 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3234 && ! side_effects_p (op1))
3235 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3236
3237 /* Convert (A | B) & A to A. */
3238 if (GET_CODE (op0) == IOR
3239 && (rtx_equal_p (XEXP (op0, 0), op1)
3240 || rtx_equal_p (XEXP (op0, 1), op1))
3241 && ! side_effects_p (XEXP (op0, 0))
3242 && ! side_effects_p (XEXP (op0, 1)))
3243 return op1;
3244
3245 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3246 ((A & N) + B) & M -> (A + B) & M
3247 Similarly if (N & M) == 0,
3248 ((A | N) + B) & M -> (A + B) & M
3249 and for - instead of + and/or ^ instead of |.
3250 Also, if (N & M) == 0, then
3251 (A +- N) & M -> A & M. */
3252 if (CONST_INT_P (trueop1)
3253 && HWI_COMPUTABLE_MODE_P (mode)
3254 && ~UINTVAL (trueop1)
3255 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3256 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3257 {
3258 rtx pmop[2];
3259 int which;
3260
3261 pmop[0] = XEXP (op0, 0);
3262 pmop[1] = XEXP (op0, 1);
3263
3264 if (CONST_INT_P (pmop[1])
3265 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3266 return simplify_gen_binary (AND, mode, pmop[0], op1);
3267
3268 for (which = 0; which < 2; which++)
3269 {
3270 tem = pmop[which];
3271 switch (GET_CODE (tem))
3272 {
3273 case AND:
3274 if (CONST_INT_P (XEXP (tem, 1))
3275 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3276 == UINTVAL (trueop1))
3277 pmop[which] = XEXP (tem, 0);
3278 break;
3279 case IOR:
3280 case XOR:
3281 if (CONST_INT_P (XEXP (tem, 1))
3282 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3283 pmop[which] = XEXP (tem, 0);
3284 break;
3285 default:
3286 break;
3287 }
3288 }
3289
3290 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3291 {
3292 tem = simplify_gen_binary (GET_CODE (op0), mode,
3293 pmop[0], pmop[1]);
3294 return simplify_gen_binary (code, mode, tem, op1);
3295 }
3296 }
3297
3298 /* (and X (ior (not X) Y) -> (and X Y) */
3299 if (GET_CODE (op1) == IOR
3300 && GET_CODE (XEXP (op1, 0)) == NOT
3301 && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
3302 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3303
3304 /* (and (ior (not X) Y) X) -> (and X Y) */
3305 if (GET_CODE (op0) == IOR
3306 && GET_CODE (XEXP (op0, 0)) == NOT
3307 && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
3308 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3309
3310 /* (and X (ior Y (not X)) -> (and X Y) */
3311 if (GET_CODE (op1) == IOR
3312 && GET_CODE (XEXP (op1, 1)) == NOT
3313 && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3314 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3315
3316 /* (and (ior Y (not X)) X) -> (and X Y) */
3317 if (GET_CODE (op0) == IOR
3318 && GET_CODE (XEXP (op0, 1)) == NOT
3319 && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3320 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3321
3322 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3323 if (tem)
3324 return tem;
3325
3326 tem = simplify_associative_operation (code, mode, op0, op1);
3327 if (tem)
3328 return tem;
3329 break;
3330
3331 case UDIV:
3332 /* 0/x is 0 (or x&0 if x has side-effects). */
3333 if (trueop0 == CONST0_RTX (mode)
3334 && !cfun->can_throw_non_call_exceptions)
3335 {
3336 if (side_effects_p (op1))
3337 return simplify_gen_binary (AND, mode, op1, trueop0);
3338 return trueop0;
3339 }
3340 /* x/1 is x. */
3341 if (trueop1 == CONST1_RTX (mode))
3342 {
3343 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3344 if (tem)
3345 return tem;
3346 }
3347 /* Convert divide by power of two into shift. */
3348 if (CONST_INT_P (trueop1)
3349 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3350 return simplify_gen_binary (LSHIFTRT, mode, op0,
3351 gen_int_shift_amount (mode, val));
3352 break;
3353
3354 case DIV:
3355 /* Handle floating point and integers separately. */
3356 if (SCALAR_FLOAT_MODE_P (mode))
3357 {
3358 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3359 safe for modes with NaNs, since 0.0 / 0.0 will then be
3360 NaN rather than 0.0. Nor is it safe for modes with signed
3361 zeros, since dividing 0 by a negative number gives -0.0 */
3362 if (trueop0 == CONST0_RTX (mode)
3363 && !HONOR_NANS (mode)
3364 && !HONOR_SIGNED_ZEROS (mode)
3365 && ! side_effects_p (op1))
3366 return op0;
3367 /* x/1.0 is x. */
3368 if (trueop1 == CONST1_RTX (mode)
3369 && !HONOR_SNANS (mode))
3370 return op0;
3371
3372 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3373 && trueop1 != CONST0_RTX (mode))
3374 {
3375 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
3376
3377 /* x/-1.0 is -x. */
3378 if (real_equal (d1, &dconstm1)
3379 && !HONOR_SNANS (mode))
3380 return simplify_gen_unary (NEG, mode, op0, mode);
3381
3382 /* Change FP division by a constant into multiplication.
3383 Only do this with -freciprocal-math. */
3384 if (flag_reciprocal_math
3385 && !real_equal (d1, &dconst0))
3386 {
3387 REAL_VALUE_TYPE d;
3388 real_arithmetic (&d, RDIV_EXPR, &dconst1, d1);
3389 tem = const_double_from_real_value (d, mode);
3390 return simplify_gen_binary (MULT, mode, op0, tem);
3391 }
3392 }
3393 }
3394 else if (SCALAR_INT_MODE_P (mode))
3395 {
3396 /* 0/x is 0 (or x&0 if x has side-effects). */
3397 if (trueop0 == CONST0_RTX (mode)
3398 && !cfun->can_throw_non_call_exceptions)
3399 {
3400 if (side_effects_p (op1))
3401 return simplify_gen_binary (AND, mode, op1, trueop0);
3402 return trueop0;
3403 }
3404 /* x/1 is x. */
3405 if (trueop1 == CONST1_RTX (mode))
3406 {
3407 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3408 if (tem)
3409 return tem;
3410 }
3411 /* x/-1 is -x. */
3412 if (trueop1 == constm1_rtx)
3413 {
3414 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3415 if (x)
3416 return simplify_gen_unary (NEG, mode, x, mode);
3417 }
3418 }
3419 break;
3420
3421 case UMOD:
3422 /* 0%x is 0 (or x&0 if x has side-effects). */
3423 if (trueop0 == CONST0_RTX (mode))
3424 {
3425 if (side_effects_p (op1))
3426 return simplify_gen_binary (AND, mode, op1, trueop0);
3427 return trueop0;
3428 }
3429 /* x%1 is 0 (of x&0 if x has side-effects). */
3430 if (trueop1 == CONST1_RTX (mode))
3431 {
3432 if (side_effects_p (op0))
3433 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3434 return CONST0_RTX (mode);
3435 }
3436 /* Implement modulus by power of two as AND. */
3437 if (CONST_INT_P (trueop1)
3438 && exact_log2 (UINTVAL (trueop1)) > 0)
3439 return simplify_gen_binary (AND, mode, op0,
3440 gen_int_mode (UINTVAL (trueop1) - 1,
3441 mode));
3442 break;
3443
3444 case MOD:
3445 /* 0%x is 0 (or x&0 if x has side-effects). */
3446 if (trueop0 == CONST0_RTX (mode))
3447 {
3448 if (side_effects_p (op1))
3449 return simplify_gen_binary (AND, mode, op1, trueop0);
3450 return trueop0;
3451 }
3452 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3453 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3454 {
3455 if (side_effects_p (op0))
3456 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3457 return CONST0_RTX (mode);
3458 }
3459 break;
3460
3461 case ROTATERT:
3462 case ROTATE:
3463 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3464 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3465 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3466 amount instead. */
3467 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3468 if (CONST_INT_P (trueop1)
3469 && IN_RANGE (INTVAL (trueop1),
3470 GET_MODE_UNIT_PRECISION (mode) / 2 + (code == ROTATE),
3471 GET_MODE_UNIT_PRECISION (mode) - 1))
3472 {
3473 int new_amount = GET_MODE_UNIT_PRECISION (mode) - INTVAL (trueop1);
3474 rtx new_amount_rtx = gen_int_shift_amount (mode, new_amount);
3475 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3476 mode, op0, new_amount_rtx);
3477 }
3478 #endif
3479 /* FALLTHRU */
3480 case ASHIFTRT:
3481 if (trueop1 == CONST0_RTX (mode))
3482 return op0;
3483 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3484 return op0;
3485 /* Rotating ~0 always results in ~0. */
3486 if (CONST_INT_P (trueop0)
3487 && HWI_COMPUTABLE_MODE_P (mode)
3488 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3489 && ! side_effects_p (op1))
3490 return op0;
3491
3492 canonicalize_shift:
3493 /* Given:
3494 scalar modes M1, M2
3495 scalar constants c1, c2
3496 size (M2) > size (M1)
3497 c1 == size (M2) - size (M1)
3498 optimize:
3499 ([a|l]shiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3500 <low_part>)
3501 (const_int <c2>))
3502 to:
3503 (subreg:M1 ([a|l]shiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3504 <low_part>). */
3505 if ((code == ASHIFTRT || code == LSHIFTRT)
3506 && is_a <scalar_int_mode> (mode, &int_mode)
3507 && SUBREG_P (op0)
3508 && CONST_INT_P (op1)
3509 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3510 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op0)),
3511 &inner_mode)
3512 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3513 && GET_MODE_BITSIZE (inner_mode) > GET_MODE_BITSIZE (int_mode)
3514 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3515 == GET_MODE_BITSIZE (inner_mode) - GET_MODE_BITSIZE (int_mode))
3516 && subreg_lowpart_p (op0))
3517 {
3518 rtx tmp = gen_int_shift_amount
3519 (inner_mode, INTVAL (XEXP (SUBREG_REG (op0), 1)) + INTVAL (op1));
3520 tmp = simplify_gen_binary (code, inner_mode,
3521 XEXP (SUBREG_REG (op0), 0),
3522 tmp);
3523 return lowpart_subreg (int_mode, tmp, inner_mode);
3524 }
3525
3526 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3527 {
3528 val = INTVAL (op1) & (GET_MODE_UNIT_PRECISION (mode) - 1);
3529 if (val != INTVAL (op1))
3530 return simplify_gen_binary (code, mode, op0,
3531 gen_int_shift_amount (mode, val));
3532 }
3533 break;
3534
3535 case ASHIFT:
3536 case SS_ASHIFT:
3537 case US_ASHIFT:
3538 if (trueop1 == CONST0_RTX (mode))
3539 return op0;
3540 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3541 return op0;
3542 goto canonicalize_shift;
3543
3544 case LSHIFTRT:
3545 if (trueop1 == CONST0_RTX (mode))
3546 return op0;
3547 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3548 return op0;
3549 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3550 if (GET_CODE (op0) == CLZ
3551 && is_a <scalar_int_mode> (GET_MODE (XEXP (op0, 0)), &inner_mode)
3552 && CONST_INT_P (trueop1)
3553 && STORE_FLAG_VALUE == 1
3554 && INTVAL (trueop1) < GET_MODE_UNIT_PRECISION (mode))
3555 {
3556 unsigned HOST_WIDE_INT zero_val = 0;
3557
3558 if (CLZ_DEFINED_VALUE_AT_ZERO (inner_mode, zero_val)
3559 && zero_val == GET_MODE_PRECISION (inner_mode)
3560 && INTVAL (trueop1) == exact_log2 (zero_val))
3561 return simplify_gen_relational (EQ, mode, inner_mode,
3562 XEXP (op0, 0), const0_rtx);
3563 }
3564 goto canonicalize_shift;
3565
3566 case SMIN:
3567 if (HWI_COMPUTABLE_MODE_P (mode)
3568 && mode_signbit_p (mode, trueop1)
3569 && ! side_effects_p (op0))
3570 return op1;
3571 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3572 return op0;
3573 tem = simplify_associative_operation (code, mode, op0, op1);
3574 if (tem)
3575 return tem;
3576 break;
3577
3578 case SMAX:
3579 if (HWI_COMPUTABLE_MODE_P (mode)
3580 && CONST_INT_P (trueop1)
3581 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3582 && ! side_effects_p (op0))
3583 return op1;
3584 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3585 return op0;
3586 tem = simplify_associative_operation (code, mode, op0, op1);
3587 if (tem)
3588 return tem;
3589 break;
3590
3591 case UMIN:
3592 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3593 return op1;
3594 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3595 return op0;
3596 tem = simplify_associative_operation (code, mode, op0, op1);
3597 if (tem)
3598 return tem;
3599 break;
3600
3601 case UMAX:
3602 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3603 return op1;
3604 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3605 return op0;
3606 tem = simplify_associative_operation (code, mode, op0, op1);
3607 if (tem)
3608 return tem;
3609 break;
3610
3611 case SS_PLUS:
3612 case US_PLUS:
3613 case SS_MINUS:
3614 case US_MINUS:
3615 case SS_MULT:
3616 case US_MULT:
3617 case SS_DIV:
3618 case US_DIV:
3619 /* ??? There are simplifications that can be done. */
3620 return 0;
3621
3622 case VEC_SERIES:
3623 if (op1 == CONST0_RTX (GET_MODE_INNER (mode)))
3624 return gen_vec_duplicate (mode, op0);
3625 if (valid_for_const_vector_p (mode, op0)
3626 && valid_for_const_vector_p (mode, op1))
3627 return gen_const_vec_series (mode, op0, op1);
3628 return 0;
3629
3630 case VEC_SELECT:
3631 if (!VECTOR_MODE_P (mode))
3632 {
3633 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3634 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3635 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3636 gcc_assert (XVECLEN (trueop1, 0) == 1);
3637
3638 /* We can't reason about selections made at runtime. */
3639 if (!CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3640 return 0;
3641
3642 if (vec_duplicate_p (trueop0, &elt0))
3643 return elt0;
3644
3645 if (GET_CODE (trueop0) == CONST_VECTOR)
3646 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3647 (trueop1, 0, 0)));
3648
3649 /* Extract a scalar element from a nested VEC_SELECT expression
3650 (with optional nested VEC_CONCAT expression). Some targets
3651 (i386) extract scalar element from a vector using chain of
3652 nested VEC_SELECT expressions. When input operand is a memory
3653 operand, this operation can be simplified to a simple scalar
3654 load from an offseted memory address. */
3655 int n_elts;
3656 if (GET_CODE (trueop0) == VEC_SELECT
3657 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0, 0)))
3658 .is_constant (&n_elts)))
3659 {
3660 rtx op0 = XEXP (trueop0, 0);
3661 rtx op1 = XEXP (trueop0, 1);
3662
3663 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3664 int elem;
3665
3666 rtvec vec;
3667 rtx tmp_op, tmp;
3668
3669 gcc_assert (GET_CODE (op1) == PARALLEL);
3670 gcc_assert (i < n_elts);
3671
3672 /* Select element, pointed by nested selector. */
3673 elem = INTVAL (XVECEXP (op1, 0, i));
3674
3675 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3676 if (GET_CODE (op0) == VEC_CONCAT)
3677 {
3678 rtx op00 = XEXP (op0, 0);
3679 rtx op01 = XEXP (op0, 1);
3680
3681 machine_mode mode00, mode01;
3682 int n_elts00, n_elts01;
3683
3684 mode00 = GET_MODE (op00);
3685 mode01 = GET_MODE (op01);
3686
3687 /* Find out the number of elements of each operand.
3688 Since the concatenated result has a constant number
3689 of elements, the operands must too. */
3690 n_elts00 = GET_MODE_NUNITS (mode00).to_constant ();
3691 n_elts01 = GET_MODE_NUNITS (mode01).to_constant ();
3692
3693 gcc_assert (n_elts == n_elts00 + n_elts01);
3694
3695 /* Select correct operand of VEC_CONCAT
3696 and adjust selector. */
3697 if (elem < n_elts01)
3698 tmp_op = op00;
3699 else
3700 {
3701 tmp_op = op01;
3702 elem -= n_elts00;
3703 }
3704 }
3705 else
3706 tmp_op = op0;
3707
3708 vec = rtvec_alloc (1);
3709 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3710
3711 tmp = gen_rtx_fmt_ee (code, mode,
3712 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3713 return tmp;
3714 }
3715 }
3716 else
3717 {
3718 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3719 gcc_assert (GET_MODE_INNER (mode)
3720 == GET_MODE_INNER (GET_MODE (trueop0)));
3721 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3722
3723 if (vec_duplicate_p (trueop0, &elt0))
3724 /* It doesn't matter which elements are selected by trueop1,
3725 because they are all the same. */
3726 return gen_vec_duplicate (mode, elt0);
3727
3728 if (GET_CODE (trueop0) == CONST_VECTOR)
3729 {
3730 unsigned n_elts = XVECLEN (trueop1, 0);
3731 rtvec v = rtvec_alloc (n_elts);
3732 unsigned int i;
3733
3734 gcc_assert (known_eq (n_elts, GET_MODE_NUNITS (mode)));
3735 for (i = 0; i < n_elts; i++)
3736 {
3737 rtx x = XVECEXP (trueop1, 0, i);
3738
3739 if (!CONST_INT_P (x))
3740 return 0;
3741
3742 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3743 INTVAL (x));
3744 }
3745
3746 return gen_rtx_CONST_VECTOR (mode, v);
3747 }
3748
3749 /* Recognize the identity. */
3750 if (GET_MODE (trueop0) == mode)
3751 {
3752 bool maybe_ident = true;
3753 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3754 {
3755 rtx j = XVECEXP (trueop1, 0, i);
3756 if (!CONST_INT_P (j) || INTVAL (j) != i)
3757 {
3758 maybe_ident = false;
3759 break;
3760 }
3761 }
3762 if (maybe_ident)
3763 return trueop0;
3764 }
3765
3766 /* If we build {a,b} then permute it, build the result directly. */
3767 if (XVECLEN (trueop1, 0) == 2
3768 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3769 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3770 && GET_CODE (trueop0) == VEC_CONCAT
3771 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3772 && GET_MODE (XEXP (trueop0, 0)) == mode
3773 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3774 && GET_MODE (XEXP (trueop0, 1)) == mode)
3775 {
3776 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3777 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3778 rtx subop0, subop1;
3779
3780 gcc_assert (i0 < 4 && i1 < 4);
3781 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3782 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3783
3784 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3785 }
3786
3787 if (XVECLEN (trueop1, 0) == 2
3788 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3789 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3790 && GET_CODE (trueop0) == VEC_CONCAT
3791 && GET_MODE (trueop0) == mode)
3792 {
3793 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3794 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3795 rtx subop0, subop1;
3796
3797 gcc_assert (i0 < 2 && i1 < 2);
3798 subop0 = XEXP (trueop0, i0);
3799 subop1 = XEXP (trueop0, i1);
3800
3801 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3802 }
3803
3804 /* If we select one half of a vec_concat, return that. */
3805 int l0, l1;
3806 if (GET_CODE (trueop0) == VEC_CONCAT
3807 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0, 0)))
3808 .is_constant (&l0))
3809 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0, 1)))
3810 .is_constant (&l1))
3811 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3812 {
3813 rtx subop0 = XEXP (trueop0, 0);
3814 rtx subop1 = XEXP (trueop0, 1);
3815 machine_mode mode0 = GET_MODE (subop0);
3816 machine_mode mode1 = GET_MODE (subop1);
3817 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3818 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3819 {
3820 bool success = true;
3821 for (int i = 1; i < l0; ++i)
3822 {
3823 rtx j = XVECEXP (trueop1, 0, i);
3824 if (!CONST_INT_P (j) || INTVAL (j) != i)
3825 {
3826 success = false;
3827 break;
3828 }
3829 }
3830 if (success)
3831 return subop0;
3832 }
3833 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3834 {
3835 bool success = true;
3836 for (int i = 1; i < l1; ++i)
3837 {
3838 rtx j = XVECEXP (trueop1, 0, i);
3839 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3840 {
3841 success = false;
3842 break;
3843 }
3844 }
3845 if (success)
3846 return subop1;
3847 }
3848 }
3849 }
3850
3851 if (XVECLEN (trueop1, 0) == 1
3852 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3853 && GET_CODE (trueop0) == VEC_CONCAT)
3854 {
3855 rtx vec = trueop0;
3856 offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3857
3858 /* Try to find the element in the VEC_CONCAT. */
3859 while (GET_MODE (vec) != mode
3860 && GET_CODE (vec) == VEC_CONCAT)
3861 {
3862 poly_int64 vec_size;
3863
3864 if (CONST_INT_P (XEXP (vec, 0)))
3865 {
3866 /* vec_concat of two const_ints doesn't make sense with
3867 respect to modes. */
3868 if (CONST_INT_P (XEXP (vec, 1)))
3869 return 0;
3870
3871 vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
3872 - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
3873 }
3874 else
3875 vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3876
3877 if (known_lt (offset, vec_size))
3878 vec = XEXP (vec, 0);
3879 else if (known_ge (offset, vec_size))
3880 {
3881 offset -= vec_size;
3882 vec = XEXP (vec, 1);
3883 }
3884 else
3885 break;
3886 vec = avoid_constant_pool_reference (vec);
3887 }
3888
3889 if (GET_MODE (vec) == mode)
3890 return vec;
3891 }
3892
3893 /* If we select elements in a vec_merge that all come from the same
3894 operand, select from that operand directly. */
3895 if (GET_CODE (op0) == VEC_MERGE)
3896 {
3897 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3898 if (CONST_INT_P (trueop02))
3899 {
3900 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3901 bool all_operand0 = true;
3902 bool all_operand1 = true;
3903 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3904 {
3905 rtx j = XVECEXP (trueop1, 0, i);
3906 if (sel & (HOST_WIDE_INT_1U << UINTVAL (j)))
3907 all_operand1 = false;
3908 else
3909 all_operand0 = false;
3910 }
3911 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3912 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3913 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3914 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3915 }
3916 }
3917
3918 /* If we have two nested selects that are inverses of each
3919 other, replace them with the source operand. */
3920 if (GET_CODE (trueop0) == VEC_SELECT
3921 && GET_MODE (XEXP (trueop0, 0)) == mode)
3922 {
3923 rtx op0_subop1 = XEXP (trueop0, 1);
3924 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3925 gcc_assert (known_eq (XVECLEN (trueop1, 0), GET_MODE_NUNITS (mode)));
3926
3927 /* Apply the outer ordering vector to the inner one. (The inner
3928 ordering vector is expressly permitted to be of a different
3929 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3930 then the two VEC_SELECTs cancel. */
3931 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3932 {
3933 rtx x = XVECEXP (trueop1, 0, i);
3934 if (!CONST_INT_P (x))
3935 return 0;
3936 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3937 if (!CONST_INT_P (y) || i != INTVAL (y))
3938 return 0;
3939 }
3940 return XEXP (trueop0, 0);
3941 }
3942
3943 return 0;
3944 case VEC_CONCAT:
3945 {
3946 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3947 ? GET_MODE (trueop0)
3948 : GET_MODE_INNER (mode));
3949 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3950 ? GET_MODE (trueop1)
3951 : GET_MODE_INNER (mode));
3952
3953 gcc_assert (VECTOR_MODE_P (mode));
3954 gcc_assert (known_eq (GET_MODE_SIZE (op0_mode)
3955 + GET_MODE_SIZE (op1_mode),
3956 GET_MODE_SIZE (mode)));
3957
3958 if (VECTOR_MODE_P (op0_mode))
3959 gcc_assert (GET_MODE_INNER (mode)
3960 == GET_MODE_INNER (op0_mode));
3961 else
3962 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3963
3964 if (VECTOR_MODE_P (op1_mode))
3965 gcc_assert (GET_MODE_INNER (mode)
3966 == GET_MODE_INNER (op1_mode));
3967 else
3968 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3969
3970 unsigned int n_elts, in_n_elts;
3971 if ((GET_CODE (trueop0) == CONST_VECTOR
3972 || CONST_SCALAR_INT_P (trueop0)
3973 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3974 && (GET_CODE (trueop1) == CONST_VECTOR
3975 || CONST_SCALAR_INT_P (trueop1)
3976 || CONST_DOUBLE_AS_FLOAT_P (trueop1))
3977 && GET_MODE_NUNITS (mode).is_constant (&n_elts)
3978 && GET_MODE_NUNITS (op0_mode).is_constant (&in_n_elts))
3979 {
3980 rtvec v = rtvec_alloc (n_elts);
3981 unsigned int i;
3982 for (i = 0; i < n_elts; i++)
3983 {
3984 if (i < in_n_elts)
3985 {
3986 if (!VECTOR_MODE_P (op0_mode))
3987 RTVEC_ELT (v, i) = trueop0;
3988 else
3989 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3990 }
3991 else
3992 {
3993 if (!VECTOR_MODE_P (op1_mode))
3994 RTVEC_ELT (v, i) = trueop1;
3995 else
3996 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3997 i - in_n_elts);
3998 }
3999 }
4000
4001 return gen_rtx_CONST_VECTOR (mode, v);
4002 }
4003
4004 /* Try to merge two VEC_SELECTs from the same vector into a single one.
4005 Restrict the transformation to avoid generating a VEC_SELECT with a
4006 mode unrelated to its operand. */
4007 if (GET_CODE (trueop0) == VEC_SELECT
4008 && GET_CODE (trueop1) == VEC_SELECT
4009 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
4010 && GET_MODE (XEXP (trueop0, 0)) == mode)
4011 {
4012 rtx par0 = XEXP (trueop0, 1);
4013 rtx par1 = XEXP (trueop1, 1);
4014 int len0 = XVECLEN (par0, 0);
4015 int len1 = XVECLEN (par1, 0);
4016 rtvec vec = rtvec_alloc (len0 + len1);
4017 for (int i = 0; i < len0; i++)
4018 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
4019 for (int i = 0; i < len1; i++)
4020 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
4021 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
4022 gen_rtx_PARALLEL (VOIDmode, vec));
4023 }
4024 }
4025 return 0;
4026
4027 default:
4028 gcc_unreachable ();
4029 }
4030
4031 if (mode == GET_MODE (op0)
4032 && mode == GET_MODE (op1)
4033 && vec_duplicate_p (op0, &elt0)
4034 && vec_duplicate_p (op1, &elt1))
4035 {
4036 /* Try applying the operator to ELT and see if that simplifies.
4037 We can duplicate the result if so.
4038
4039 The reason we don't use simplify_gen_binary is that it isn't
4040 necessarily a win to convert things like:
4041
4042 (plus:V (vec_duplicate:V (reg:S R1))
4043 (vec_duplicate:V (reg:S R2)))
4044
4045 to:
4046
4047 (vec_duplicate:V (plus:S (reg:S R1) (reg:S R2)))
4048
4049 The first might be done entirely in vector registers while the
4050 second might need a move between register files. */
4051 tem = simplify_binary_operation (code, GET_MODE_INNER (mode),
4052 elt0, elt1);
4053 if (tem)
4054 return gen_vec_duplicate (mode, tem);
4055 }
4056
4057 return 0;
4058 }
4059
4060 /* Return true if binary operation OP distributes over addition in operand
4061 OPNO, with the other operand being held constant. OPNO counts from 1. */
4062
4063 static bool
4064 distributes_over_addition_p (rtx_code op, int opno)
4065 {
4066 switch (op)
4067 {
4068 case PLUS:
4069 case MINUS:
4070 case MULT:
4071 return true;
4072
4073 case ASHIFT:
4074 return opno == 1;
4075
4076 default:
4077 return false;
4078 }
4079 }
4080
4081 rtx
4082 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
4083 rtx op0, rtx op1)
4084 {
4085 if (VECTOR_MODE_P (mode)
4086 && code != VEC_CONCAT
4087 && GET_CODE (op0) == CONST_VECTOR
4088 && GET_CODE (op1) == CONST_VECTOR)
4089 {
4090 bool step_ok_p;
4091 if (CONST_VECTOR_STEPPED_P (op0)
4092 && CONST_VECTOR_STEPPED_P (op1))
4093 /* We can operate directly on the encoding if:
4094
4095 a3 - a2 == a2 - a1 && b3 - b2 == b2 - b1
4096 implies
4097 (a3 op b3) - (a2 op b2) == (a2 op b2) - (a1 op b1)
4098
4099 Addition and subtraction are the supported operators
4100 for which this is true. */
4101 step_ok_p = (code == PLUS || code == MINUS);
4102 else if (CONST_VECTOR_STEPPED_P (op0))
4103 /* We can operate directly on stepped encodings if:
4104
4105 a3 - a2 == a2 - a1
4106 implies:
4107 (a3 op c) - (a2 op c) == (a2 op c) - (a1 op c)
4108
4109 which is true if (x -> x op c) distributes over addition. */
4110 step_ok_p = distributes_over_addition_p (code, 1);
4111 else
4112 /* Similarly in reverse. */
4113 step_ok_p = distributes_over_addition_p (code, 2);
4114 rtx_vector_builder builder;
4115 if (!builder.new_binary_operation (mode, op0, op1, step_ok_p))
4116 return 0;
4117
4118 unsigned int count = builder.encoded_nelts ();
4119 for (unsigned int i = 0; i < count; i++)
4120 {
4121 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
4122 CONST_VECTOR_ELT (op0, i),
4123 CONST_VECTOR_ELT (op1, i));
4124 if (!x || !valid_for_const_vector_p (mode, x))
4125 return 0;
4126 builder.quick_push (x);
4127 }
4128 return builder.build ();
4129 }
4130
4131 if (VECTOR_MODE_P (mode)
4132 && code == VEC_CONCAT
4133 && (CONST_SCALAR_INT_P (op0)
4134 || CONST_FIXED_P (op0)
4135 || CONST_DOUBLE_AS_FLOAT_P (op0))
4136 && (CONST_SCALAR_INT_P (op1)
4137 || CONST_DOUBLE_AS_FLOAT_P (op1)
4138 || CONST_FIXED_P (op1)))
4139 {
4140 /* Both inputs have a constant number of elements, so the result
4141 must too. */
4142 unsigned n_elts = GET_MODE_NUNITS (mode).to_constant ();
4143 rtvec v = rtvec_alloc (n_elts);
4144
4145 gcc_assert (n_elts >= 2);
4146 if (n_elts == 2)
4147 {
4148 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
4149 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
4150
4151 RTVEC_ELT (v, 0) = op0;
4152 RTVEC_ELT (v, 1) = op1;
4153 }
4154 else
4155 {
4156 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0)).to_constant ();
4157 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1)).to_constant ();
4158 unsigned i;
4159
4160 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
4161 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
4162 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
4163
4164 for (i = 0; i < op0_n_elts; ++i)
4165 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op0, i);
4166 for (i = 0; i < op1_n_elts; ++i)
4167 RTVEC_ELT (v, op0_n_elts+i) = CONST_VECTOR_ELT (op1, i);
4168 }
4169
4170 return gen_rtx_CONST_VECTOR (mode, v);
4171 }
4172
4173 if (SCALAR_FLOAT_MODE_P (mode)
4174 && CONST_DOUBLE_AS_FLOAT_P (op0)
4175 && CONST_DOUBLE_AS_FLOAT_P (op1)
4176 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
4177 {
4178 if (code == AND
4179 || code == IOR
4180 || code == XOR)
4181 {
4182 long tmp0[4];
4183 long tmp1[4];
4184 REAL_VALUE_TYPE r;
4185 int i;
4186
4187 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
4188 GET_MODE (op0));
4189 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
4190 GET_MODE (op1));
4191 for (i = 0; i < 4; i++)
4192 {
4193 switch (code)
4194 {
4195 case AND:
4196 tmp0[i] &= tmp1[i];
4197 break;
4198 case IOR:
4199 tmp0[i] |= tmp1[i];
4200 break;
4201 case XOR:
4202 tmp0[i] ^= tmp1[i];
4203 break;
4204 default:
4205 gcc_unreachable ();
4206 }
4207 }
4208 real_from_target (&r, tmp0, mode);
4209 return const_double_from_real_value (r, mode);
4210 }
4211 else
4212 {
4213 REAL_VALUE_TYPE f0, f1, value, result;
4214 const REAL_VALUE_TYPE *opr0, *opr1;
4215 bool inexact;
4216
4217 opr0 = CONST_DOUBLE_REAL_VALUE (op0);
4218 opr1 = CONST_DOUBLE_REAL_VALUE (op1);
4219
4220 if (HONOR_SNANS (mode)
4221 && (REAL_VALUE_ISSIGNALING_NAN (*opr0)
4222 || REAL_VALUE_ISSIGNALING_NAN (*opr1)))
4223 return 0;
4224
4225 real_convert (&f0, mode, opr0);
4226 real_convert (&f1, mode, opr1);
4227
4228 if (code == DIV
4229 && real_equal (&f1, &dconst0)
4230 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
4231 return 0;
4232
4233 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4234 && flag_trapping_math
4235 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
4236 {
4237 int s0 = REAL_VALUE_NEGATIVE (f0);
4238 int s1 = REAL_VALUE_NEGATIVE (f1);
4239
4240 switch (code)
4241 {
4242 case PLUS:
4243 /* Inf + -Inf = NaN plus exception. */
4244 if (s0 != s1)
4245 return 0;
4246 break;
4247 case MINUS:
4248 /* Inf - Inf = NaN plus exception. */
4249 if (s0 == s1)
4250 return 0;
4251 break;
4252 case DIV:
4253 /* Inf / Inf = NaN plus exception. */
4254 return 0;
4255 default:
4256 break;
4257 }
4258 }
4259
4260 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4261 && flag_trapping_math
4262 && ((REAL_VALUE_ISINF (f0) && real_equal (&f1, &dconst0))
4263 || (REAL_VALUE_ISINF (f1)
4264 && real_equal (&f0, &dconst0))))
4265 /* Inf * 0 = NaN plus exception. */
4266 return 0;
4267
4268 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
4269 &f0, &f1);
4270 real_convert (&result, mode, &value);
4271
4272 /* Don't constant fold this floating point operation if
4273 the result has overflowed and flag_trapping_math. */
4274
4275 if (flag_trapping_math
4276 && MODE_HAS_INFINITIES (mode)
4277 && REAL_VALUE_ISINF (result)
4278 && !REAL_VALUE_ISINF (f0)
4279 && !REAL_VALUE_ISINF (f1))
4280 /* Overflow plus exception. */
4281 return 0;
4282
4283 /* Don't constant fold this floating point operation if the
4284 result may dependent upon the run-time rounding mode and
4285 flag_rounding_math is set, or if GCC's software emulation
4286 is unable to accurately represent the result. */
4287
4288 if ((flag_rounding_math
4289 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
4290 && (inexact || !real_identical (&result, &value)))
4291 return NULL_RTX;
4292
4293 return const_double_from_real_value (result, mode);
4294 }
4295 }
4296
4297 /* We can fold some multi-word operations. */
4298 scalar_int_mode int_mode;
4299 if (is_a <scalar_int_mode> (mode, &int_mode)
4300 && CONST_SCALAR_INT_P (op0)
4301 && CONST_SCALAR_INT_P (op1))
4302 {
4303 wide_int result;
4304 wi::overflow_type overflow;
4305 rtx_mode_t pop0 = rtx_mode_t (op0, int_mode);
4306 rtx_mode_t pop1 = rtx_mode_t (op1, int_mode);
4307
4308 #if TARGET_SUPPORTS_WIDE_INT == 0
4309 /* This assert keeps the simplification from producing a result
4310 that cannot be represented in a CONST_DOUBLE but a lot of
4311 upstream callers expect that this function never fails to
4312 simplify something and so you if you added this to the test
4313 above the code would die later anyway. If this assert
4314 happens, you just need to make the port support wide int. */
4315 gcc_assert (GET_MODE_PRECISION (int_mode) <= HOST_BITS_PER_DOUBLE_INT);
4316 #endif
4317 switch (code)
4318 {
4319 case MINUS:
4320 result = wi::sub (pop0, pop1);
4321 break;
4322
4323 case PLUS:
4324 result = wi::add (pop0, pop1);
4325 break;
4326
4327 case MULT:
4328 result = wi::mul (pop0, pop1);
4329 break;
4330
4331 case DIV:
4332 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
4333 if (overflow)
4334 return NULL_RTX;
4335 break;
4336
4337 case MOD:
4338 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
4339 if (overflow)
4340 return NULL_RTX;
4341 break;
4342
4343 case UDIV:
4344 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
4345 if (overflow)
4346 return NULL_RTX;
4347 break;
4348
4349 case UMOD:
4350 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
4351 if (overflow)
4352 return NULL_RTX;
4353 break;
4354
4355 case AND:
4356 result = wi::bit_and (pop0, pop1);
4357 break;
4358
4359 case IOR:
4360 result = wi::bit_or (pop0, pop1);
4361 break;
4362
4363 case XOR:
4364 result = wi::bit_xor (pop0, pop1);
4365 break;
4366
4367 case SMIN:
4368 result = wi::smin (pop0, pop1);
4369 break;
4370
4371 case SMAX:
4372 result = wi::smax (pop0, pop1);
4373 break;
4374
4375 case UMIN:
4376 result = wi::umin (pop0, pop1);
4377 break;
4378
4379 case UMAX:
4380 result = wi::umax (pop0, pop1);
4381 break;
4382
4383 case LSHIFTRT:
4384 case ASHIFTRT:
4385 case ASHIFT:
4386 {
4387 wide_int wop1 = pop1;
4388 if (SHIFT_COUNT_TRUNCATED)
4389 wop1 = wi::umod_trunc (wop1, GET_MODE_PRECISION (int_mode));
4390 else if (wi::geu_p (wop1, GET_MODE_PRECISION (int_mode)))
4391 return NULL_RTX;
4392
4393 switch (code)
4394 {
4395 case LSHIFTRT:
4396 result = wi::lrshift (pop0, wop1);
4397 break;
4398
4399 case ASHIFTRT:
4400 result = wi::arshift (pop0, wop1);
4401 break;
4402
4403 case ASHIFT:
4404 result = wi::lshift (pop0, wop1);
4405 break;
4406
4407 default:
4408 gcc_unreachable ();
4409 }
4410 break;
4411 }
4412 case ROTATE:
4413 case ROTATERT:
4414 {
4415 if (wi::neg_p (pop1))
4416 return NULL_RTX;
4417
4418 switch (code)
4419 {
4420 case ROTATE:
4421 result = wi::lrotate (pop0, pop1);
4422 break;
4423
4424 case ROTATERT:
4425 result = wi::rrotate (pop0, pop1);
4426 break;
4427
4428 default:
4429 gcc_unreachable ();
4430 }
4431 break;
4432 }
4433 default:
4434 return NULL_RTX;
4435 }
4436 return immed_wide_int_const (result, int_mode);
4437 }
4438
4439 /* Handle polynomial integers. */
4440 if (NUM_POLY_INT_COEFFS > 1
4441 && is_a <scalar_int_mode> (mode, &int_mode)
4442 && poly_int_rtx_p (op0)
4443 && poly_int_rtx_p (op1))
4444 {
4445 poly_wide_int result;
4446 switch (code)
4447 {
4448 case PLUS:
4449 result = wi::to_poly_wide (op0, mode) + wi::to_poly_wide (op1, mode);
4450 break;
4451
4452 case MINUS:
4453 result = wi::to_poly_wide (op0, mode) - wi::to_poly_wide (op1, mode);
4454 break;
4455
4456 case MULT:
4457 if (CONST_SCALAR_INT_P (op1))
4458 result = wi::to_poly_wide (op0, mode) * rtx_mode_t (op1, mode);
4459 else
4460 return NULL_RTX;
4461 break;
4462
4463 case ASHIFT:
4464 if (CONST_SCALAR_INT_P (op1))
4465 {
4466 wide_int shift = rtx_mode_t (op1, mode);
4467 if (SHIFT_COUNT_TRUNCATED)
4468 shift = wi::umod_trunc (shift, GET_MODE_PRECISION (int_mode));
4469 else if (wi::geu_p (shift, GET_MODE_PRECISION (int_mode)))
4470 return NULL_RTX;
4471 result = wi::to_poly_wide (op0, mode) << shift;
4472 }
4473 else
4474 return NULL_RTX;
4475 break;
4476
4477 case IOR:
4478 if (!CONST_SCALAR_INT_P (op1)
4479 || !can_ior_p (wi::to_poly_wide (op0, mode),
4480 rtx_mode_t (op1, mode), &result))
4481 return NULL_RTX;
4482 break;
4483
4484 default:
4485 return NULL_RTX;
4486 }
4487 return immed_wide_int_const (result, int_mode);
4488 }
4489
4490 return NULL_RTX;
4491 }
4492
4493
4494 \f
4495 /* Return a positive integer if X should sort after Y. The value
4496 returned is 1 if and only if X and Y are both regs. */
4497
4498 static int
4499 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4500 {
4501 int result;
4502
4503 result = (commutative_operand_precedence (y)
4504 - commutative_operand_precedence (x));
4505 if (result)
4506 return result + result;
4507
4508 /* Group together equal REGs to do more simplification. */
4509 if (REG_P (x) && REG_P (y))
4510 return REGNO (x) > REGNO (y);
4511
4512 return 0;
4513 }
4514
4515 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4516 operands may be another PLUS or MINUS.
4517
4518 Rather than test for specific case, we do this by a brute-force method
4519 and do all possible simplifications until no more changes occur. Then
4520 we rebuild the operation.
4521
4522 May return NULL_RTX when no changes were made. */
4523
4524 static rtx
4525 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
4526 rtx op1)
4527 {
4528 struct simplify_plus_minus_op_data
4529 {
4530 rtx op;
4531 short neg;
4532 } ops[16];
4533 rtx result, tem;
4534 int n_ops = 2;
4535 int changed, n_constants, canonicalized = 0;
4536 int i, j;
4537
4538 memset (ops, 0, sizeof ops);
4539
4540 /* Set up the two operands and then expand them until nothing has been
4541 changed. If we run out of room in our array, give up; this should
4542 almost never happen. */
4543
4544 ops[0].op = op0;
4545 ops[0].neg = 0;
4546 ops[1].op = op1;
4547 ops[1].neg = (code == MINUS);
4548
4549 do
4550 {
4551 changed = 0;
4552 n_constants = 0;
4553
4554 for (i = 0; i < n_ops; i++)
4555 {
4556 rtx this_op = ops[i].op;
4557 int this_neg = ops[i].neg;
4558 enum rtx_code this_code = GET_CODE (this_op);
4559
4560 switch (this_code)
4561 {
4562 case PLUS:
4563 case MINUS:
4564 if (n_ops == ARRAY_SIZE (ops))
4565 return NULL_RTX;
4566
4567 ops[n_ops].op = XEXP (this_op, 1);
4568 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4569 n_ops++;
4570
4571 ops[i].op = XEXP (this_op, 0);
4572 changed = 1;
4573 /* If this operand was negated then we will potentially
4574 canonicalize the expression. Similarly if we don't
4575 place the operands adjacent we're re-ordering the
4576 expression and thus might be performing a
4577 canonicalization. Ignore register re-ordering.
4578 ??? It might be better to shuffle the ops array here,
4579 but then (plus (plus (A, B), plus (C, D))) wouldn't
4580 be seen as non-canonical. */
4581 if (this_neg
4582 || (i != n_ops - 2
4583 && !(REG_P (ops[i].op) && REG_P (ops[n_ops - 1].op))))
4584 canonicalized = 1;
4585 break;
4586
4587 case NEG:
4588 ops[i].op = XEXP (this_op, 0);
4589 ops[i].neg = ! this_neg;
4590 changed = 1;
4591 canonicalized = 1;
4592 break;
4593
4594 case CONST:
4595 if (n_ops != ARRAY_SIZE (ops)
4596 && GET_CODE (XEXP (this_op, 0)) == PLUS
4597 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4598 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4599 {
4600 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4601 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4602 ops[n_ops].neg = this_neg;
4603 n_ops++;
4604 changed = 1;
4605 canonicalized = 1;
4606 }
4607 break;
4608
4609 case NOT:
4610 /* ~a -> (-a - 1) */
4611 if (n_ops != ARRAY_SIZE (ops))
4612 {
4613 ops[n_ops].op = CONSTM1_RTX (mode);
4614 ops[n_ops++].neg = this_neg;
4615 ops[i].op = XEXP (this_op, 0);
4616 ops[i].neg = !this_neg;
4617 changed = 1;
4618 canonicalized = 1;
4619 }
4620 break;
4621
4622 case CONST_INT:
4623 n_constants++;
4624 if (this_neg)
4625 {
4626 ops[i].op = neg_const_int (mode, this_op);
4627 ops[i].neg = 0;
4628 changed = 1;
4629 canonicalized = 1;
4630 }
4631 break;
4632
4633 default:
4634 break;
4635 }
4636 }
4637 }
4638 while (changed);
4639
4640 if (n_constants > 1)
4641 canonicalized = 1;
4642
4643 gcc_assert (n_ops >= 2);
4644
4645 /* If we only have two operands, we can avoid the loops. */
4646 if (n_ops == 2)
4647 {
4648 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4649 rtx lhs, rhs;
4650
4651 /* Get the two operands. Be careful with the order, especially for
4652 the cases where code == MINUS. */
4653 if (ops[0].neg && ops[1].neg)
4654 {
4655 lhs = gen_rtx_NEG (mode, ops[0].op);
4656 rhs = ops[1].op;
4657 }
4658 else if (ops[0].neg)
4659 {
4660 lhs = ops[1].op;
4661 rhs = ops[0].op;
4662 }
4663 else
4664 {
4665 lhs = ops[0].op;
4666 rhs = ops[1].op;
4667 }
4668
4669 return simplify_const_binary_operation (code, mode, lhs, rhs);
4670 }
4671
4672 /* Now simplify each pair of operands until nothing changes. */
4673 while (1)
4674 {
4675 /* Insertion sort is good enough for a small array. */
4676 for (i = 1; i < n_ops; i++)
4677 {
4678 struct simplify_plus_minus_op_data save;
4679 int cmp;
4680
4681 j = i - 1;
4682 cmp = simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op);
4683 if (cmp <= 0)
4684 continue;
4685 /* Just swapping registers doesn't count as canonicalization. */
4686 if (cmp != 1)
4687 canonicalized = 1;
4688
4689 save = ops[i];
4690 do
4691 ops[j + 1] = ops[j];
4692 while (j--
4693 && simplify_plus_minus_op_data_cmp (ops[j].op, save.op) > 0);
4694 ops[j + 1] = save;
4695 }
4696
4697 changed = 0;
4698 for (i = n_ops - 1; i > 0; i--)
4699 for (j = i - 1; j >= 0; j--)
4700 {
4701 rtx lhs = ops[j].op, rhs = ops[i].op;
4702 int lneg = ops[j].neg, rneg = ops[i].neg;
4703
4704 if (lhs != 0 && rhs != 0)
4705 {
4706 enum rtx_code ncode = PLUS;
4707
4708 if (lneg != rneg)
4709 {
4710 ncode = MINUS;
4711 if (lneg)
4712 std::swap (lhs, rhs);
4713 }
4714 else if (swap_commutative_operands_p (lhs, rhs))
4715 std::swap (lhs, rhs);
4716
4717 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4718 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4719 {
4720 rtx tem_lhs, tem_rhs;
4721
4722 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4723 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4724 tem = simplify_binary_operation (ncode, mode, tem_lhs,
4725 tem_rhs);
4726
4727 if (tem && !CONSTANT_P (tem))
4728 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4729 }
4730 else
4731 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4732
4733 if (tem)
4734 {
4735 /* Reject "simplifications" that just wrap the two
4736 arguments in a CONST. Failure to do so can result
4737 in infinite recursion with simplify_binary_operation
4738 when it calls us to simplify CONST operations.
4739 Also, if we find such a simplification, don't try
4740 any more combinations with this rhs: We must have
4741 something like symbol+offset, ie. one of the
4742 trivial CONST expressions we handle later. */
4743 if (GET_CODE (tem) == CONST
4744 && GET_CODE (XEXP (tem, 0)) == ncode
4745 && XEXP (XEXP (tem, 0), 0) == lhs
4746 && XEXP (XEXP (tem, 0), 1) == rhs)
4747 break;
4748 lneg &= rneg;
4749 if (GET_CODE (tem) == NEG)
4750 tem = XEXP (tem, 0), lneg = !lneg;
4751 if (CONST_INT_P (tem) && lneg)
4752 tem = neg_const_int (mode, tem), lneg = 0;
4753
4754 ops[i].op = tem;
4755 ops[i].neg = lneg;
4756 ops[j].op = NULL_RTX;
4757 changed = 1;
4758 canonicalized = 1;
4759 }
4760 }
4761 }
4762
4763 if (!changed)
4764 break;
4765
4766 /* Pack all the operands to the lower-numbered entries. */
4767 for (i = 0, j = 0; j < n_ops; j++)
4768 if (ops[j].op)
4769 {
4770 ops[i] = ops[j];
4771 i++;
4772 }
4773 n_ops = i;
4774 }
4775
4776 /* If nothing changed, check that rematerialization of rtl instructions
4777 is still required. */
4778 if (!canonicalized)
4779 {
4780 /* Perform rematerialization if only all operands are registers and
4781 all operations are PLUS. */
4782 /* ??? Also disallow (non-global, non-frame) fixed registers to work
4783 around rs6000 and how it uses the CA register. See PR67145. */
4784 for (i = 0; i < n_ops; i++)
4785 if (ops[i].neg
4786 || !REG_P (ops[i].op)
4787 || (REGNO (ops[i].op) < FIRST_PSEUDO_REGISTER
4788 && fixed_regs[REGNO (ops[i].op)]
4789 && !global_regs[REGNO (ops[i].op)]
4790 && ops[i].op != frame_pointer_rtx
4791 && ops[i].op != arg_pointer_rtx
4792 && ops[i].op != stack_pointer_rtx))
4793 return NULL_RTX;
4794 goto gen_result;
4795 }
4796
4797 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4798 if (n_ops == 2
4799 && CONST_INT_P (ops[1].op)
4800 && CONSTANT_P (ops[0].op)
4801 && ops[0].neg)
4802 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4803
4804 /* We suppressed creation of trivial CONST expressions in the
4805 combination loop to avoid recursion. Create one manually now.
4806 The combination loop should have ensured that there is exactly
4807 one CONST_INT, and the sort will have ensured that it is last
4808 in the array and that any other constant will be next-to-last. */
4809
4810 if (n_ops > 1
4811 && CONST_INT_P (ops[n_ops - 1].op)
4812 && CONSTANT_P (ops[n_ops - 2].op))
4813 {
4814 rtx value = ops[n_ops - 1].op;
4815 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4816 value = neg_const_int (mode, value);
4817 if (CONST_INT_P (value))
4818 {
4819 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4820 INTVAL (value));
4821 n_ops--;
4822 }
4823 }
4824
4825 /* Put a non-negated operand first, if possible. */
4826
4827 for (i = 0; i < n_ops && ops[i].neg; i++)
4828 continue;
4829 if (i == n_ops)
4830 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4831 else if (i != 0)
4832 {
4833 tem = ops[0].op;
4834 ops[0] = ops[i];
4835 ops[i].op = tem;
4836 ops[i].neg = 1;
4837 }
4838
4839 /* Now make the result by performing the requested operations. */
4840 gen_result:
4841 result = ops[0].op;
4842 for (i = 1; i < n_ops; i++)
4843 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4844 mode, result, ops[i].op);
4845
4846 return result;
4847 }
4848
4849 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4850 static bool
4851 plus_minus_operand_p (const_rtx x)
4852 {
4853 return GET_CODE (x) == PLUS
4854 || GET_CODE (x) == MINUS
4855 || (GET_CODE (x) == CONST
4856 && GET_CODE (XEXP (x, 0)) == PLUS
4857 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4858 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4859 }
4860
4861 /* Like simplify_binary_operation except used for relational operators.
4862 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4863 not also be VOIDmode.
4864
4865 CMP_MODE specifies in which mode the comparison is done in, so it is
4866 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4867 the operands or, if both are VOIDmode, the operands are compared in
4868 "infinite precision". */
4869 rtx
4870 simplify_relational_operation (enum rtx_code code, machine_mode mode,
4871 machine_mode cmp_mode, rtx op0, rtx op1)
4872 {
4873 rtx tem, trueop0, trueop1;
4874
4875 if (cmp_mode == VOIDmode)
4876 cmp_mode = GET_MODE (op0);
4877 if (cmp_mode == VOIDmode)
4878 cmp_mode = GET_MODE (op1);
4879
4880 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4881 if (tem)
4882 {
4883 if (SCALAR_FLOAT_MODE_P (mode))
4884 {
4885 if (tem == const0_rtx)
4886 return CONST0_RTX (mode);
4887 #ifdef FLOAT_STORE_FLAG_VALUE
4888 {
4889 REAL_VALUE_TYPE val;
4890 val = FLOAT_STORE_FLAG_VALUE (mode);
4891 return const_double_from_real_value (val, mode);
4892 }
4893 #else
4894 return NULL_RTX;
4895 #endif
4896 }
4897 if (VECTOR_MODE_P (mode))
4898 {
4899 if (tem == const0_rtx)
4900 return CONST0_RTX (mode);
4901 #ifdef VECTOR_STORE_FLAG_VALUE
4902 {
4903 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4904 if (val == NULL_RTX)
4905 return NULL_RTX;
4906 if (val == const1_rtx)
4907 return CONST1_RTX (mode);
4908
4909 return gen_const_vec_duplicate (mode, val);
4910 }
4911 #else
4912 return NULL_RTX;
4913 #endif
4914 }
4915
4916 return tem;
4917 }
4918
4919 /* For the following tests, ensure const0_rtx is op1. */
4920 if (swap_commutative_operands_p (op0, op1)
4921 || (op0 == const0_rtx && op1 != const0_rtx))
4922 std::swap (op0, op1), code = swap_condition (code);
4923
4924 /* If op0 is a compare, extract the comparison arguments from it. */
4925 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4926 return simplify_gen_relational (code, mode, VOIDmode,
4927 XEXP (op0, 0), XEXP (op0, 1));
4928
4929 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4930 || CC0_P (op0))
4931 return NULL_RTX;
4932
4933 trueop0 = avoid_constant_pool_reference (op0);
4934 trueop1 = avoid_constant_pool_reference (op1);
4935 return simplify_relational_operation_1 (code, mode, cmp_mode,
4936 trueop0, trueop1);
4937 }
4938
4939 /* This part of simplify_relational_operation is only used when CMP_MODE
4940 is not in class MODE_CC (i.e. it is a real comparison).
4941
4942 MODE is the mode of the result, while CMP_MODE specifies in which
4943 mode the comparison is done in, so it is the mode of the operands. */
4944
4945 static rtx
4946 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4947 machine_mode cmp_mode, rtx op0, rtx op1)
4948 {
4949 enum rtx_code op0code = GET_CODE (op0);
4950
4951 if (op1 == const0_rtx && COMPARISON_P (op0))
4952 {
4953 /* If op0 is a comparison, extract the comparison arguments
4954 from it. */
4955 if (code == NE)
4956 {
4957 if (GET_MODE (op0) == mode)
4958 return simplify_rtx (op0);
4959 else
4960 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4961 XEXP (op0, 0), XEXP (op0, 1));
4962 }
4963 else if (code == EQ)
4964 {
4965 enum rtx_code new_code = reversed_comparison_code (op0, NULL);
4966 if (new_code != UNKNOWN)
4967 return simplify_gen_relational (new_code, mode, VOIDmode,
4968 XEXP (op0, 0), XEXP (op0, 1));
4969 }
4970 }
4971
4972 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4973 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4974 if ((code == LTU || code == GEU)
4975 && GET_CODE (op0) == PLUS
4976 && CONST_INT_P (XEXP (op0, 1))
4977 && (rtx_equal_p (op1, XEXP (op0, 0))
4978 || rtx_equal_p (op1, XEXP (op0, 1)))
4979 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4980 && XEXP (op0, 1) != const0_rtx)
4981 {
4982 rtx new_cmp
4983 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4984 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4985 cmp_mode, XEXP (op0, 0), new_cmp);
4986 }
4987
4988 /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
4989 transformed into (LTU a -C). */
4990 if (code == GTU && GET_CODE (op0) == PLUS && CONST_INT_P (op1)
4991 && CONST_INT_P (XEXP (op0, 1))
4992 && (UINTVAL (op1) == UINTVAL (XEXP (op0, 1)) - 1)
4993 && XEXP (op0, 1) != const0_rtx)
4994 {
4995 rtx new_cmp
4996 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4997 return simplify_gen_relational (LTU, mode, cmp_mode,
4998 XEXP (op0, 0), new_cmp);
4999 }
5000
5001 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
5002 if ((code == LTU || code == GEU)
5003 && GET_CODE (op0) == PLUS
5004 && rtx_equal_p (op1, XEXP (op0, 1))
5005 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
5006 && !rtx_equal_p (op1, XEXP (op0, 0)))
5007 return simplify_gen_relational (code, mode, cmp_mode, op0,
5008 copy_rtx (XEXP (op0, 0)));
5009
5010 if (op1 == const0_rtx)
5011 {
5012 /* Canonicalize (GTU x 0) as (NE x 0). */
5013 if (code == GTU)
5014 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
5015 /* Canonicalize (LEU x 0) as (EQ x 0). */
5016 if (code == LEU)
5017 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
5018 }
5019 else if (op1 == const1_rtx)
5020 {
5021 switch (code)
5022 {
5023 case GE:
5024 /* Canonicalize (GE x 1) as (GT x 0). */
5025 return simplify_gen_relational (GT, mode, cmp_mode,
5026 op0, const0_rtx);
5027 case GEU:
5028 /* Canonicalize (GEU x 1) as (NE x 0). */
5029 return simplify_gen_relational (NE, mode, cmp_mode,
5030 op0, const0_rtx);
5031 case LT:
5032 /* Canonicalize (LT x 1) as (LE x 0). */
5033 return simplify_gen_relational (LE, mode, cmp_mode,
5034 op0, const0_rtx);
5035 case LTU:
5036 /* Canonicalize (LTU x 1) as (EQ x 0). */
5037 return simplify_gen_relational (EQ, mode, cmp_mode,
5038 op0, const0_rtx);
5039 default:
5040 break;
5041 }
5042 }
5043 else if (op1 == constm1_rtx)
5044 {
5045 /* Canonicalize (LE x -1) as (LT x 0). */
5046 if (code == LE)
5047 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
5048 /* Canonicalize (GT x -1) as (GE x 0). */
5049 if (code == GT)
5050 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
5051 }
5052
5053 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
5054 if ((code == EQ || code == NE)
5055 && (op0code == PLUS || op0code == MINUS)
5056 && CONSTANT_P (op1)
5057 && CONSTANT_P (XEXP (op0, 1))
5058 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
5059 {
5060 rtx x = XEXP (op0, 0);
5061 rtx c = XEXP (op0, 1);
5062 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
5063 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
5064
5065 /* Detect an infinite recursive condition, where we oscillate at this
5066 simplification case between:
5067 A + B == C <---> C - B == A,
5068 where A, B, and C are all constants with non-simplifiable expressions,
5069 usually SYMBOL_REFs. */
5070 if (GET_CODE (tem) == invcode
5071 && CONSTANT_P (x)
5072 && rtx_equal_p (c, XEXP (tem, 1)))
5073 return NULL_RTX;
5074
5075 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
5076 }
5077
5078 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
5079 the same as (zero_extract:SI FOO (const_int 1) BAR). */
5080 scalar_int_mode int_mode, int_cmp_mode;
5081 if (code == NE
5082 && op1 == const0_rtx
5083 && is_int_mode (mode, &int_mode)
5084 && is_a <scalar_int_mode> (cmp_mode, &int_cmp_mode)
5085 /* ??? Work-around BImode bugs in the ia64 backend. */
5086 && int_mode != BImode
5087 && int_cmp_mode != BImode
5088 && nonzero_bits (op0, int_cmp_mode) == 1
5089 && STORE_FLAG_VALUE == 1)
5090 return GET_MODE_SIZE (int_mode) > GET_MODE_SIZE (int_cmp_mode)
5091 ? simplify_gen_unary (ZERO_EXTEND, int_mode, op0, int_cmp_mode)
5092 : lowpart_subreg (int_mode, op0, int_cmp_mode);
5093
5094 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
5095 if ((code == EQ || code == NE)
5096 && op1 == const0_rtx
5097 && op0code == XOR)
5098 return simplify_gen_relational (code, mode, cmp_mode,
5099 XEXP (op0, 0), XEXP (op0, 1));
5100
5101 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
5102 if ((code == EQ || code == NE)
5103 && op0code == XOR
5104 && rtx_equal_p (XEXP (op0, 0), op1)
5105 && !side_effects_p (XEXP (op0, 0)))
5106 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
5107 CONST0_RTX (mode));
5108
5109 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
5110 if ((code == EQ || code == NE)
5111 && op0code == XOR
5112 && rtx_equal_p (XEXP (op0, 1), op1)
5113 && !side_effects_p (XEXP (op0, 1)))
5114 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
5115 CONST0_RTX (mode));
5116
5117 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
5118 if ((code == EQ || code == NE)
5119 && op0code == XOR
5120 && CONST_SCALAR_INT_P (op1)
5121 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
5122 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
5123 simplify_gen_binary (XOR, cmp_mode,
5124 XEXP (op0, 1), op1));
5125
5126 /* Simplify eq/ne (and/ior x y) x/y) for targets with a BICS instruction or
5127 constant folding if x/y is a constant. */
5128 if ((code == EQ || code == NE)
5129 && (op0code == AND || op0code == IOR)
5130 && !side_effects_p (op1)
5131 && op1 != CONST0_RTX (cmp_mode))
5132 {
5133 /* Both (eq/ne (and x y) x) and (eq/ne (ior x y) y) simplify to
5134 (eq/ne (and (not y) x) 0). */
5135 if ((op0code == AND && rtx_equal_p (XEXP (op0, 0), op1))
5136 || (op0code == IOR && rtx_equal_p (XEXP (op0, 1), op1)))
5137 {
5138 rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1),
5139 cmp_mode);
5140 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
5141
5142 return simplify_gen_relational (code, mode, cmp_mode, lhs,
5143 CONST0_RTX (cmp_mode));
5144 }
5145
5146 /* Both (eq/ne (and x y) y) and (eq/ne (ior x y) x) simplify to
5147 (eq/ne (and (not x) y) 0). */
5148 if ((op0code == AND && rtx_equal_p (XEXP (op0, 1), op1))
5149 || (op0code == IOR && rtx_equal_p (XEXP (op0, 0), op1)))
5150 {
5151 rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0),
5152 cmp_mode);
5153 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
5154
5155 return simplify_gen_relational (code, mode, cmp_mode, lhs,
5156 CONST0_RTX (cmp_mode));
5157 }
5158 }
5159
5160 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
5161 if ((code == EQ || code == NE)
5162 && GET_CODE (op0) == BSWAP
5163 && CONST_SCALAR_INT_P (op1))
5164 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
5165 simplify_gen_unary (BSWAP, cmp_mode,
5166 op1, cmp_mode));
5167
5168 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
5169 if ((code == EQ || code == NE)
5170 && GET_CODE (op0) == BSWAP
5171 && GET_CODE (op1) == BSWAP)
5172 return simplify_gen_relational (code, mode, cmp_mode,
5173 XEXP (op0, 0), XEXP (op1, 0));
5174
5175 if (op0code == POPCOUNT && op1 == const0_rtx)
5176 switch (code)
5177 {
5178 case EQ:
5179 case LE:
5180 case LEU:
5181 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
5182 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
5183 XEXP (op0, 0), const0_rtx);
5184
5185 case NE:
5186 case GT:
5187 case GTU:
5188 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
5189 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
5190 XEXP (op0, 0), const0_rtx);
5191
5192 default:
5193 break;
5194 }
5195
5196 return NULL_RTX;
5197 }
5198
5199 enum
5200 {
5201 CMP_EQ = 1,
5202 CMP_LT = 2,
5203 CMP_GT = 4,
5204 CMP_LTU = 8,
5205 CMP_GTU = 16
5206 };
5207
5208
5209 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
5210 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
5211 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
5212 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
5213 For floating-point comparisons, assume that the operands were ordered. */
5214
5215 static rtx
5216 comparison_result (enum rtx_code code, int known_results)
5217 {
5218 switch (code)
5219 {
5220 case EQ:
5221 case UNEQ:
5222 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
5223 case NE:
5224 case LTGT:
5225 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
5226
5227 case LT:
5228 case UNLT:
5229 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
5230 case GE:
5231 case UNGE:
5232 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
5233
5234 case GT:
5235 case UNGT:
5236 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
5237 case LE:
5238 case UNLE:
5239 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
5240
5241 case LTU:
5242 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
5243 case GEU:
5244 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
5245
5246 case GTU:
5247 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
5248 case LEU:
5249 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
5250
5251 case ORDERED:
5252 return const_true_rtx;
5253 case UNORDERED:
5254 return const0_rtx;
5255 default:
5256 gcc_unreachable ();
5257 }
5258 }
5259
5260 /* Check if the given comparison (done in the given MODE) is actually
5261 a tautology or a contradiction. If the mode is VOID_mode, the
5262 comparison is done in "infinite precision". If no simplification
5263 is possible, this function returns zero. Otherwise, it returns
5264 either const_true_rtx or const0_rtx. */
5265
5266 rtx
5267 simplify_const_relational_operation (enum rtx_code code,
5268 machine_mode mode,
5269 rtx op0, rtx op1)
5270 {
5271 rtx tem;
5272 rtx trueop0;
5273 rtx trueop1;
5274
5275 gcc_assert (mode != VOIDmode
5276 || (GET_MODE (op0) == VOIDmode
5277 && GET_MODE (op1) == VOIDmode));
5278
5279 /* If op0 is a compare, extract the comparison arguments from it. */
5280 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
5281 {
5282 op1 = XEXP (op0, 1);
5283 op0 = XEXP (op0, 0);
5284
5285 if (GET_MODE (op0) != VOIDmode)
5286 mode = GET_MODE (op0);
5287 else if (GET_MODE (op1) != VOIDmode)
5288 mode = GET_MODE (op1);
5289 else
5290 return 0;
5291 }
5292
5293 /* We can't simplify MODE_CC values since we don't know what the
5294 actual comparison is. */
5295 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
5296 return 0;
5297
5298 /* Make sure the constant is second. */
5299 if (swap_commutative_operands_p (op0, op1))
5300 {
5301 std::swap (op0, op1);
5302 code = swap_condition (code);
5303 }
5304
5305 trueop0 = avoid_constant_pool_reference (op0);
5306 trueop1 = avoid_constant_pool_reference (op1);
5307
5308 /* For integer comparisons of A and B maybe we can simplify A - B and can
5309 then simplify a comparison of that with zero. If A and B are both either
5310 a register or a CONST_INT, this can't help; testing for these cases will
5311 prevent infinite recursion here and speed things up.
5312
5313 We can only do this for EQ and NE comparisons as otherwise we may
5314 lose or introduce overflow which we cannot disregard as undefined as
5315 we do not know the signedness of the operation on either the left or
5316 the right hand side of the comparison. */
5317
5318 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
5319 && (code == EQ || code == NE)
5320 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
5321 && (REG_P (op1) || CONST_INT_P (trueop1)))
5322 && (tem = simplify_binary_operation (MINUS, mode, op0, op1)) != 0
5323 /* We cannot do this if tem is a nonzero address. */
5324 && ! nonzero_address_p (tem))
5325 return simplify_const_relational_operation (signed_condition (code),
5326 mode, tem, const0_rtx);
5327
5328 if (! HONOR_NANS (mode) && code == ORDERED)
5329 return const_true_rtx;
5330
5331 if (! HONOR_NANS (mode) && code == UNORDERED)
5332 return const0_rtx;
5333
5334 /* For modes without NaNs, if the two operands are equal, we know the
5335 result except if they have side-effects. Even with NaNs we know
5336 the result of unordered comparisons and, if signaling NaNs are
5337 irrelevant, also the result of LT/GT/LTGT. */
5338 if ((! HONOR_NANS (trueop0)
5339 || code == UNEQ || code == UNLE || code == UNGE
5340 || ((code == LT || code == GT || code == LTGT)
5341 && ! HONOR_SNANS (trueop0)))
5342 && rtx_equal_p (trueop0, trueop1)
5343 && ! side_effects_p (trueop0))
5344 return comparison_result (code, CMP_EQ);
5345
5346 /* If the operands are floating-point constants, see if we can fold
5347 the result. */
5348 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
5349 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
5350 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
5351 {
5352 const REAL_VALUE_TYPE *d0 = CONST_DOUBLE_REAL_VALUE (trueop0);
5353 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
5354
5355 /* Comparisons are unordered iff at least one of the values is NaN. */
5356 if (REAL_VALUE_ISNAN (*d0) || REAL_VALUE_ISNAN (*d1))
5357 switch (code)
5358 {
5359 case UNEQ:
5360 case UNLT:
5361 case UNGT:
5362 case UNLE:
5363 case UNGE:
5364 case NE:
5365 case UNORDERED:
5366 return const_true_rtx;
5367 case EQ:
5368 case LT:
5369 case GT:
5370 case LE:
5371 case GE:
5372 case LTGT:
5373 case ORDERED:
5374 return const0_rtx;
5375 default:
5376 return 0;
5377 }
5378
5379 return comparison_result (code,
5380 (real_equal (d0, d1) ? CMP_EQ :
5381 real_less (d0, d1) ? CMP_LT : CMP_GT));
5382 }
5383
5384 /* Otherwise, see if the operands are both integers. */
5385 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
5386 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
5387 {
5388 /* It would be nice if we really had a mode here. However, the
5389 largest int representable on the target is as good as
5390 infinite. */
5391 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
5392 rtx_mode_t ptrueop0 = rtx_mode_t (trueop0, cmode);
5393 rtx_mode_t ptrueop1 = rtx_mode_t (trueop1, cmode);
5394
5395 if (wi::eq_p (ptrueop0, ptrueop1))
5396 return comparison_result (code, CMP_EQ);
5397 else
5398 {
5399 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
5400 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
5401 return comparison_result (code, cr);
5402 }
5403 }
5404
5405 /* Optimize comparisons with upper and lower bounds. */
5406 scalar_int_mode int_mode;
5407 if (CONST_INT_P (trueop1)
5408 && is_a <scalar_int_mode> (mode, &int_mode)
5409 && HWI_COMPUTABLE_MODE_P (int_mode)
5410 && !side_effects_p (trueop0))
5411 {
5412 int sign;
5413 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, int_mode);
5414 HOST_WIDE_INT val = INTVAL (trueop1);
5415 HOST_WIDE_INT mmin, mmax;
5416
5417 if (code == GEU
5418 || code == LEU
5419 || code == GTU
5420 || code == LTU)
5421 sign = 0;
5422 else
5423 sign = 1;
5424
5425 /* Get a reduced range if the sign bit is zero. */
5426 if (nonzero <= (GET_MODE_MASK (int_mode) >> 1))
5427 {
5428 mmin = 0;
5429 mmax = nonzero;
5430 }
5431 else
5432 {
5433 rtx mmin_rtx, mmax_rtx;
5434 get_mode_bounds (int_mode, sign, int_mode, &mmin_rtx, &mmax_rtx);
5435
5436 mmin = INTVAL (mmin_rtx);
5437 mmax = INTVAL (mmax_rtx);
5438 if (sign)
5439 {
5440 unsigned int sign_copies
5441 = num_sign_bit_copies (trueop0, int_mode);
5442
5443 mmin >>= (sign_copies - 1);
5444 mmax >>= (sign_copies - 1);
5445 }
5446 }
5447
5448 switch (code)
5449 {
5450 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5451 case GEU:
5452 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5453 return const_true_rtx;
5454 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5455 return const0_rtx;
5456 break;
5457 case GE:
5458 if (val <= mmin)
5459 return const_true_rtx;
5460 if (val > mmax)
5461 return const0_rtx;
5462 break;
5463
5464 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5465 case LEU:
5466 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5467 return const_true_rtx;
5468 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5469 return const0_rtx;
5470 break;
5471 case LE:
5472 if (val >= mmax)
5473 return const_true_rtx;
5474 if (val < mmin)
5475 return const0_rtx;
5476 break;
5477
5478 case EQ:
5479 /* x == y is always false for y out of range. */
5480 if (val < mmin || val > mmax)
5481 return const0_rtx;
5482 break;
5483
5484 /* x > y is always false for y >= mmax, always true for y < mmin. */
5485 case GTU:
5486 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5487 return const0_rtx;
5488 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5489 return const_true_rtx;
5490 break;
5491 case GT:
5492 if (val >= mmax)
5493 return const0_rtx;
5494 if (val < mmin)
5495 return const_true_rtx;
5496 break;
5497
5498 /* x < y is always false for y <= mmin, always true for y > mmax. */
5499 case LTU:
5500 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5501 return const0_rtx;
5502 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5503 return const_true_rtx;
5504 break;
5505 case LT:
5506 if (val <= mmin)
5507 return const0_rtx;
5508 if (val > mmax)
5509 return const_true_rtx;
5510 break;
5511
5512 case NE:
5513 /* x != y is always true for y out of range. */
5514 if (val < mmin || val > mmax)
5515 return const_true_rtx;
5516 break;
5517
5518 default:
5519 break;
5520 }
5521 }
5522
5523 /* Optimize integer comparisons with zero. */
5524 if (is_a <scalar_int_mode> (mode, &int_mode)
5525 && trueop1 == const0_rtx
5526 && !side_effects_p (trueop0))
5527 {
5528 /* Some addresses are known to be nonzero. We don't know
5529 their sign, but equality comparisons are known. */
5530 if (nonzero_address_p (trueop0))
5531 {
5532 if (code == EQ || code == LEU)
5533 return const0_rtx;
5534 if (code == NE || code == GTU)
5535 return const_true_rtx;
5536 }
5537
5538 /* See if the first operand is an IOR with a constant. If so, we
5539 may be able to determine the result of this comparison. */
5540 if (GET_CODE (op0) == IOR)
5541 {
5542 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5543 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5544 {
5545 int sign_bitnum = GET_MODE_PRECISION (int_mode) - 1;
5546 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5547 && (UINTVAL (inner_const)
5548 & (HOST_WIDE_INT_1U
5549 << sign_bitnum)));
5550
5551 switch (code)
5552 {
5553 case EQ:
5554 case LEU:
5555 return const0_rtx;
5556 case NE:
5557 case GTU:
5558 return const_true_rtx;
5559 case LT:
5560 case LE:
5561 if (has_sign)
5562 return const_true_rtx;
5563 break;
5564 case GT:
5565 case GE:
5566 if (has_sign)
5567 return const0_rtx;
5568 break;
5569 default:
5570 break;
5571 }
5572 }
5573 }
5574 }
5575
5576 /* Optimize comparison of ABS with zero. */
5577 if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
5578 && (GET_CODE (trueop0) == ABS
5579 || (GET_CODE (trueop0) == FLOAT_EXTEND
5580 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5581 {
5582 switch (code)
5583 {
5584 case LT:
5585 /* Optimize abs(x) < 0.0. */
5586 if (!INTEGRAL_MODE_P (mode) && !HONOR_SNANS (mode))
5587 return const0_rtx;
5588 break;
5589
5590 case GE:
5591 /* Optimize abs(x) >= 0.0. */
5592 if (!INTEGRAL_MODE_P (mode) && !HONOR_NANS (mode))
5593 return const_true_rtx;
5594 break;
5595
5596 case UNGE:
5597 /* Optimize ! (abs(x) < 0.0). */
5598 return const_true_rtx;
5599
5600 default:
5601 break;
5602 }
5603 }
5604
5605 return 0;
5606 }
5607
5608 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
5609 where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
5610 or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
5611 can be simplified to that or NULL_RTX if not.
5612 Assume X is compared against zero with CMP_CODE and the true
5613 arm is TRUE_VAL and the false arm is FALSE_VAL. */
5614
5615 static rtx
5616 simplify_cond_clz_ctz (rtx x, rtx_code cmp_code, rtx true_val, rtx false_val)
5617 {
5618 if (cmp_code != EQ && cmp_code != NE)
5619 return NULL_RTX;
5620
5621 /* Result on X == 0 and X !=0 respectively. */
5622 rtx on_zero, on_nonzero;
5623 if (cmp_code == EQ)
5624 {
5625 on_zero = true_val;
5626 on_nonzero = false_val;
5627 }
5628 else
5629 {
5630 on_zero = false_val;
5631 on_nonzero = true_val;
5632 }
5633
5634 rtx_code op_code = GET_CODE (on_nonzero);
5635 if ((op_code != CLZ && op_code != CTZ)
5636 || !rtx_equal_p (XEXP (on_nonzero, 0), x)
5637 || !CONST_INT_P (on_zero))
5638 return NULL_RTX;
5639
5640 HOST_WIDE_INT op_val;
5641 scalar_int_mode mode ATTRIBUTE_UNUSED
5642 = as_a <scalar_int_mode> (GET_MODE (XEXP (on_nonzero, 0)));
5643 if (((op_code == CLZ && CLZ_DEFINED_VALUE_AT_ZERO (mode, op_val))
5644 || (op_code == CTZ && CTZ_DEFINED_VALUE_AT_ZERO (mode, op_val)))
5645 && op_val == INTVAL (on_zero))
5646 return on_nonzero;
5647
5648 return NULL_RTX;
5649 }
5650
5651 /* Try to simplify X given that it appears within operand OP of a
5652 VEC_MERGE operation whose mask is MASK. X need not use the same
5653 vector mode as the VEC_MERGE, but it must have the same number of
5654 elements.
5655
5656 Return the simplified X on success, otherwise return NULL_RTX. */
5657
5658 rtx
5659 simplify_merge_mask (rtx x, rtx mask, int op)
5660 {
5661 gcc_assert (VECTOR_MODE_P (GET_MODE (x)));
5662 poly_uint64 nunits = GET_MODE_NUNITS (GET_MODE (x));
5663 if (GET_CODE (x) == VEC_MERGE && rtx_equal_p (XEXP (x, 2), mask))
5664 {
5665 if (side_effects_p (XEXP (x, 1 - op)))
5666 return NULL_RTX;
5667
5668 return XEXP (x, op);
5669 }
5670 if (UNARY_P (x)
5671 && VECTOR_MODE_P (GET_MODE (XEXP (x, 0)))
5672 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 0))), nunits))
5673 {
5674 rtx top0 = simplify_merge_mask (XEXP (x, 0), mask, op);
5675 if (top0)
5676 return simplify_gen_unary (GET_CODE (x), GET_MODE (x), top0,
5677 GET_MODE (XEXP (x, 0)));
5678 }
5679 if (BINARY_P (x)
5680 && VECTOR_MODE_P (GET_MODE (XEXP (x, 0)))
5681 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 0))), nunits)
5682 && VECTOR_MODE_P (GET_MODE (XEXP (x, 1)))
5683 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 1))), nunits))
5684 {
5685 rtx top0 = simplify_merge_mask (XEXP (x, 0), mask, op);
5686 rtx top1 = simplify_merge_mask (XEXP (x, 1), mask, op);
5687 if (top0 || top1)
5688 {
5689 if (COMPARISON_P (x))
5690 return simplify_gen_relational (GET_CODE (x), GET_MODE (x),
5691 GET_MODE (XEXP (x, 0)) != VOIDmode
5692 ? GET_MODE (XEXP (x, 0))
5693 : GET_MODE (XEXP (x, 1)),
5694 top0 ? top0 : XEXP (x, 0),
5695 top1 ? top1 : XEXP (x, 1));
5696 else
5697 return simplify_gen_binary (GET_CODE (x), GET_MODE (x),
5698 top0 ? top0 : XEXP (x, 0),
5699 top1 ? top1 : XEXP (x, 1));
5700 }
5701 }
5702 if (GET_RTX_CLASS (GET_CODE (x)) == RTX_TERNARY
5703 && VECTOR_MODE_P (GET_MODE (XEXP (x, 0)))
5704 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 0))), nunits)
5705 && VECTOR_MODE_P (GET_MODE (XEXP (x, 1)))
5706 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 1))), nunits)
5707 && VECTOR_MODE_P (GET_MODE (XEXP (x, 2)))
5708 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 2))), nunits))
5709 {
5710 rtx top0 = simplify_merge_mask (XEXP (x, 0), mask, op);
5711 rtx top1 = simplify_merge_mask (XEXP (x, 1), mask, op);
5712 rtx top2 = simplify_merge_mask (XEXP (x, 2), mask, op);
5713 if (top0 || top1 || top2)
5714 return simplify_gen_ternary (GET_CODE (x), GET_MODE (x),
5715 GET_MODE (XEXP (x, 0)),
5716 top0 ? top0 : XEXP (x, 0),
5717 top1 ? top1 : XEXP (x, 1),
5718 top2 ? top2 : XEXP (x, 2));
5719 }
5720 return NULL_RTX;
5721 }
5722
5723 \f
5724 /* Simplify CODE, an operation with result mode MODE and three operands,
5725 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5726 a constant. Return 0 if no simplifications is possible. */
5727
5728 rtx
5729 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5730 machine_mode op0_mode, rtx op0, rtx op1,
5731 rtx op2)
5732 {
5733 bool any_change = false;
5734 rtx tem, trueop2;
5735 scalar_int_mode int_mode, int_op0_mode;
5736 unsigned int n_elts;
5737
5738 switch (code)
5739 {
5740 case FMA:
5741 /* Simplify negations around the multiplication. */
5742 /* -a * -b + c => a * b + c. */
5743 if (GET_CODE (op0) == NEG)
5744 {
5745 tem = simplify_unary_operation (NEG, mode, op1, mode);
5746 if (tem)
5747 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5748 }
5749 else if (GET_CODE (op1) == NEG)
5750 {
5751 tem = simplify_unary_operation (NEG, mode, op0, mode);
5752 if (tem)
5753 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5754 }
5755
5756 /* Canonicalize the two multiplication operands. */
5757 /* a * -b + c => -b * a + c. */
5758 if (swap_commutative_operands_p (op0, op1))
5759 std::swap (op0, op1), any_change = true;
5760
5761 if (any_change)
5762 return gen_rtx_FMA (mode, op0, op1, op2);
5763 return NULL_RTX;
5764
5765 case SIGN_EXTRACT:
5766 case ZERO_EXTRACT:
5767 if (CONST_INT_P (op0)
5768 && CONST_INT_P (op1)
5769 && CONST_INT_P (op2)
5770 && is_a <scalar_int_mode> (mode, &int_mode)
5771 && INTVAL (op1) + INTVAL (op2) <= GET_MODE_PRECISION (int_mode)
5772 && HWI_COMPUTABLE_MODE_P (int_mode))
5773 {
5774 /* Extracting a bit-field from a constant */
5775 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5776 HOST_WIDE_INT op1val = INTVAL (op1);
5777 HOST_WIDE_INT op2val = INTVAL (op2);
5778 if (!BITS_BIG_ENDIAN)
5779 val >>= op2val;
5780 else if (is_a <scalar_int_mode> (op0_mode, &int_op0_mode))
5781 val >>= GET_MODE_PRECISION (int_op0_mode) - op2val - op1val;
5782 else
5783 /* Not enough information to calculate the bit position. */
5784 break;
5785
5786 if (HOST_BITS_PER_WIDE_INT != op1val)
5787 {
5788 /* First zero-extend. */
5789 val &= (HOST_WIDE_INT_1U << op1val) - 1;
5790 /* If desired, propagate sign bit. */
5791 if (code == SIGN_EXTRACT
5792 && (val & (HOST_WIDE_INT_1U << (op1val - 1)))
5793 != 0)
5794 val |= ~ ((HOST_WIDE_INT_1U << op1val) - 1);
5795 }
5796
5797 return gen_int_mode (val, int_mode);
5798 }
5799 break;
5800
5801 case IF_THEN_ELSE:
5802 if (CONST_INT_P (op0))
5803 return op0 != const0_rtx ? op1 : op2;
5804
5805 /* Convert c ? a : a into "a". */
5806 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5807 return op1;
5808
5809 /* Convert a != b ? a : b into "a". */
5810 if (GET_CODE (op0) == NE
5811 && ! side_effects_p (op0)
5812 && ! HONOR_NANS (mode)
5813 && ! HONOR_SIGNED_ZEROS (mode)
5814 && ((rtx_equal_p (XEXP (op0, 0), op1)
5815 && rtx_equal_p (XEXP (op0, 1), op2))
5816 || (rtx_equal_p (XEXP (op0, 0), op2)
5817 && rtx_equal_p (XEXP (op0, 1), op1))))
5818 return op1;
5819
5820 /* Convert a == b ? a : b into "b". */
5821 if (GET_CODE (op0) == EQ
5822 && ! side_effects_p (op0)
5823 && ! HONOR_NANS (mode)
5824 && ! HONOR_SIGNED_ZEROS (mode)
5825 && ((rtx_equal_p (XEXP (op0, 0), op1)
5826 && rtx_equal_p (XEXP (op0, 1), op2))
5827 || (rtx_equal_p (XEXP (op0, 0), op2)
5828 && rtx_equal_p (XEXP (op0, 1), op1))))
5829 return op2;
5830
5831 /* Convert (!c) != {0,...,0} ? a : b into
5832 c != {0,...,0} ? b : a for vector modes. */
5833 if (VECTOR_MODE_P (GET_MODE (op1))
5834 && GET_CODE (op0) == NE
5835 && GET_CODE (XEXP (op0, 0)) == NOT
5836 && GET_CODE (XEXP (op0, 1)) == CONST_VECTOR)
5837 {
5838 rtx cv = XEXP (op0, 1);
5839 int nunits;
5840 bool ok = true;
5841 if (!CONST_VECTOR_NUNITS (cv).is_constant (&nunits))
5842 ok = false;
5843 else
5844 for (int i = 0; i < nunits; ++i)
5845 if (CONST_VECTOR_ELT (cv, i) != const0_rtx)
5846 {
5847 ok = false;
5848 break;
5849 }
5850 if (ok)
5851 {
5852 rtx new_op0 = gen_rtx_NE (GET_MODE (op0),
5853 XEXP (XEXP (op0, 0), 0),
5854 XEXP (op0, 1));
5855 rtx retval = gen_rtx_IF_THEN_ELSE (mode, new_op0, op2, op1);
5856 return retval;
5857 }
5858 }
5859
5860 /* Convert x == 0 ? N : clz (x) into clz (x) when
5861 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
5862 Similarly for ctz (x). */
5863 if (COMPARISON_P (op0) && !side_effects_p (op0)
5864 && XEXP (op0, 1) == const0_rtx)
5865 {
5866 rtx simplified
5867 = simplify_cond_clz_ctz (XEXP (op0, 0), GET_CODE (op0),
5868 op1, op2);
5869 if (simplified)
5870 return simplified;
5871 }
5872
5873 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5874 {
5875 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5876 ? GET_MODE (XEXP (op0, 1))
5877 : GET_MODE (XEXP (op0, 0)));
5878 rtx temp;
5879
5880 /* Look for happy constants in op1 and op2. */
5881 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5882 {
5883 HOST_WIDE_INT t = INTVAL (op1);
5884 HOST_WIDE_INT f = INTVAL (op2);
5885
5886 if (t == STORE_FLAG_VALUE && f == 0)
5887 code = GET_CODE (op0);
5888 else if (t == 0 && f == STORE_FLAG_VALUE)
5889 {
5890 enum rtx_code tmp;
5891 tmp = reversed_comparison_code (op0, NULL);
5892 if (tmp == UNKNOWN)
5893 break;
5894 code = tmp;
5895 }
5896 else
5897 break;
5898
5899 return simplify_gen_relational (code, mode, cmp_mode,
5900 XEXP (op0, 0), XEXP (op0, 1));
5901 }
5902
5903 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5904 cmp_mode, XEXP (op0, 0),
5905 XEXP (op0, 1));
5906
5907 /* See if any simplifications were possible. */
5908 if (temp)
5909 {
5910 if (CONST_INT_P (temp))
5911 return temp == const0_rtx ? op2 : op1;
5912 else if (temp)
5913 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5914 }
5915 }
5916 break;
5917
5918 case VEC_MERGE:
5919 gcc_assert (GET_MODE (op0) == mode);
5920 gcc_assert (GET_MODE (op1) == mode);
5921 gcc_assert (VECTOR_MODE_P (mode));
5922 trueop2 = avoid_constant_pool_reference (op2);
5923 if (CONST_INT_P (trueop2)
5924 && GET_MODE_NUNITS (mode).is_constant (&n_elts))
5925 {
5926 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5927 unsigned HOST_WIDE_INT mask;
5928 if (n_elts == HOST_BITS_PER_WIDE_INT)
5929 mask = -1;
5930 else
5931 mask = (HOST_WIDE_INT_1U << n_elts) - 1;
5932
5933 if (!(sel & mask) && !side_effects_p (op0))
5934 return op1;
5935 if ((sel & mask) == mask && !side_effects_p (op1))
5936 return op0;
5937
5938 rtx trueop0 = avoid_constant_pool_reference (op0);
5939 rtx trueop1 = avoid_constant_pool_reference (op1);
5940 if (GET_CODE (trueop0) == CONST_VECTOR
5941 && GET_CODE (trueop1) == CONST_VECTOR)
5942 {
5943 rtvec v = rtvec_alloc (n_elts);
5944 unsigned int i;
5945
5946 for (i = 0; i < n_elts; i++)
5947 RTVEC_ELT (v, i) = ((sel & (HOST_WIDE_INT_1U << i))
5948 ? CONST_VECTOR_ELT (trueop0, i)
5949 : CONST_VECTOR_ELT (trueop1, i));
5950 return gen_rtx_CONST_VECTOR (mode, v);
5951 }
5952
5953 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5954 if no element from a appears in the result. */
5955 if (GET_CODE (op0) == VEC_MERGE)
5956 {
5957 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5958 if (CONST_INT_P (tem))
5959 {
5960 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5961 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5962 return simplify_gen_ternary (code, mode, mode,
5963 XEXP (op0, 1), op1, op2);
5964 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5965 return simplify_gen_ternary (code, mode, mode,
5966 XEXP (op0, 0), op1, op2);
5967 }
5968 }
5969 if (GET_CODE (op1) == VEC_MERGE)
5970 {
5971 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5972 if (CONST_INT_P (tem))
5973 {
5974 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5975 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5976 return simplify_gen_ternary (code, mode, mode,
5977 op0, XEXP (op1, 1), op2);
5978 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5979 return simplify_gen_ternary (code, mode, mode,
5980 op0, XEXP (op1, 0), op2);
5981 }
5982 }
5983
5984 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5985 with a. */
5986 if (GET_CODE (op0) == VEC_DUPLICATE
5987 && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
5988 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
5989 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (op0, 0))), 1))
5990 {
5991 tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
5992 if (CONST_INT_P (tem) && CONST_INT_P (op2))
5993 {
5994 if (XEXP (XEXP (op0, 0), 0) == op1
5995 && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
5996 return op1;
5997 }
5998 }
5999 /* Replace (vec_merge (vec_duplicate (X)) (const_vector [A, B])
6000 (const_int N))
6001 with (vec_concat (X) (B)) if N == 1 or
6002 (vec_concat (A) (X)) if N == 2. */
6003 if (GET_CODE (op0) == VEC_DUPLICATE
6004 && GET_CODE (op1) == CONST_VECTOR
6005 && known_eq (CONST_VECTOR_NUNITS (op1), 2)
6006 && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
6007 && IN_RANGE (sel, 1, 2))
6008 {
6009 rtx newop0 = XEXP (op0, 0);
6010 rtx newop1 = CONST_VECTOR_ELT (op1, 2 - sel);
6011 if (sel == 2)
6012 std::swap (newop0, newop1);
6013 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
6014 }
6015 /* Replace (vec_merge (vec_duplicate x) (vec_concat (y) (z)) (const_int N))
6016 with (vec_concat x z) if N == 1, or (vec_concat y x) if N == 2.
6017 Only applies for vectors of two elements. */
6018 if (GET_CODE (op0) == VEC_DUPLICATE
6019 && GET_CODE (op1) == VEC_CONCAT
6020 && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
6021 && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
6022 && IN_RANGE (sel, 1, 2))
6023 {
6024 rtx newop0 = XEXP (op0, 0);
6025 rtx newop1 = XEXP (op1, 2 - sel);
6026 rtx otherop = XEXP (op1, sel - 1);
6027 if (sel == 2)
6028 std::swap (newop0, newop1);
6029 /* Don't want to throw away the other part of the vec_concat if
6030 it has side-effects. */
6031 if (!side_effects_p (otherop))
6032 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
6033 }
6034
6035 /* Replace:
6036
6037 (vec_merge:outer (vec_duplicate:outer x:inner)
6038 (subreg:outer y:inner 0)
6039 (const_int N))
6040
6041 with (vec_concat:outer x:inner y:inner) if N == 1,
6042 or (vec_concat:outer y:inner x:inner) if N == 2.
6043
6044 Implicitly, this means we have a paradoxical subreg, but such
6045 a check is cheap, so make it anyway.
6046
6047 Only applies for vectors of two elements. */
6048 if (GET_CODE (op0) == VEC_DUPLICATE
6049 && GET_CODE (op1) == SUBREG
6050 && GET_MODE (op1) == GET_MODE (op0)
6051 && GET_MODE (SUBREG_REG (op1)) == GET_MODE (XEXP (op0, 0))
6052 && paradoxical_subreg_p (op1)
6053 && subreg_lowpart_p (op1)
6054 && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
6055 && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
6056 && IN_RANGE (sel, 1, 2))
6057 {
6058 rtx newop0 = XEXP (op0, 0);
6059 rtx newop1 = SUBREG_REG (op1);
6060 if (sel == 2)
6061 std::swap (newop0, newop1);
6062 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
6063 }
6064
6065 /* Same as above but with switched operands:
6066 Replace (vec_merge:outer (subreg:outer x:inner 0)
6067 (vec_duplicate:outer y:inner)
6068 (const_int N))
6069
6070 with (vec_concat:outer x:inner y:inner) if N == 1,
6071 or (vec_concat:outer y:inner x:inner) if N == 2. */
6072 if (GET_CODE (op1) == VEC_DUPLICATE
6073 && GET_CODE (op0) == SUBREG
6074 && GET_MODE (op0) == GET_MODE (op1)
6075 && GET_MODE (SUBREG_REG (op0)) == GET_MODE (XEXP (op1, 0))
6076 && paradoxical_subreg_p (op0)
6077 && subreg_lowpart_p (op0)
6078 && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
6079 && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
6080 && IN_RANGE (sel, 1, 2))
6081 {
6082 rtx newop0 = SUBREG_REG (op0);
6083 rtx newop1 = XEXP (op1, 0);
6084 if (sel == 2)
6085 std::swap (newop0, newop1);
6086 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
6087 }
6088
6089 /* Replace (vec_merge (vec_duplicate x) (vec_duplicate y)
6090 (const_int n))
6091 with (vec_concat x y) or (vec_concat y x) depending on value
6092 of N. */
6093 if (GET_CODE (op0) == VEC_DUPLICATE
6094 && GET_CODE (op1) == VEC_DUPLICATE
6095 && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
6096 && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
6097 && IN_RANGE (sel, 1, 2))
6098 {
6099 rtx newop0 = XEXP (op0, 0);
6100 rtx newop1 = XEXP (op1, 0);
6101 if (sel == 2)
6102 std::swap (newop0, newop1);
6103
6104 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
6105 }
6106 }
6107
6108 if (rtx_equal_p (op0, op1)
6109 && !side_effects_p (op2) && !side_effects_p (op1))
6110 return op0;
6111
6112 if (!side_effects_p (op2))
6113 {
6114 rtx top0
6115 = may_trap_p (op0) ? NULL_RTX : simplify_merge_mask (op0, op2, 0);
6116 rtx top1
6117 = may_trap_p (op1) ? NULL_RTX : simplify_merge_mask (op1, op2, 1);
6118 if (top0 || top1)
6119 return simplify_gen_ternary (code, mode, mode,
6120 top0 ? top0 : op0,
6121 top1 ? top1 : op1, op2);
6122 }
6123
6124 break;
6125
6126 default:
6127 gcc_unreachable ();
6128 }
6129
6130 return 0;
6131 }
6132
6133 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
6134 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
6135 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
6136
6137 Works by unpacking INNER_BYTES bytes of OP into a collection of 8-bit values
6138 represented as a little-endian array of 'unsigned char', selecting by BYTE,
6139 and then repacking them again for OUTERMODE. If OP is a CONST_VECTOR,
6140 FIRST_ELEM is the number of the first element to extract, otherwise
6141 FIRST_ELEM is ignored. */
6142
6143 static rtx
6144 simplify_immed_subreg (fixed_size_mode outermode, rtx op,
6145 machine_mode innermode, unsigned int byte,
6146 unsigned int first_elem, unsigned int inner_bytes)
6147 {
6148 enum {
6149 value_bit = 8,
6150 value_mask = (1 << value_bit) - 1
6151 };
6152 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
6153 int value_start;
6154 int i;
6155 int elem;
6156
6157 int num_elem;
6158 rtx * elems;
6159 int elem_bitsize;
6160 rtx result_s = NULL;
6161 rtvec result_v = NULL;
6162 enum mode_class outer_class;
6163 scalar_mode outer_submode;
6164 int max_bitsize;
6165
6166 /* Some ports misuse CCmode. */
6167 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
6168 return op;
6169
6170 /* We have no way to represent a complex constant at the rtl level. */
6171 if (COMPLEX_MODE_P (outermode))
6172 return NULL_RTX;
6173
6174 /* We support any size mode. */
6175 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
6176 inner_bytes * BITS_PER_UNIT);
6177
6178 /* Unpack the value. */
6179
6180 if (GET_CODE (op) == CONST_VECTOR)
6181 {
6182 num_elem = CEIL (inner_bytes, GET_MODE_UNIT_SIZE (innermode));
6183 elem_bitsize = GET_MODE_UNIT_BITSIZE (innermode);
6184 }
6185 else
6186 {
6187 num_elem = 1;
6188 elem_bitsize = max_bitsize;
6189 }
6190 /* If this asserts, it is too complicated; reducing value_bit may help. */
6191 gcc_assert (BITS_PER_UNIT % value_bit == 0);
6192 /* I don't know how to handle endianness of sub-units. */
6193 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
6194
6195 for (elem = 0; elem < num_elem; elem++)
6196 {
6197 unsigned char * vp;
6198 rtx el = (GET_CODE (op) == CONST_VECTOR
6199 ? CONST_VECTOR_ELT (op, first_elem + elem)
6200 : op);
6201
6202 /* Vectors are kept in target memory order. (This is probably
6203 a mistake.) */
6204 {
6205 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
6206 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
6207 / BITS_PER_UNIT);
6208 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
6209 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
6210 unsigned bytele = (subword_byte % UNITS_PER_WORD
6211 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
6212 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
6213 }
6214
6215 switch (GET_CODE (el))
6216 {
6217 case CONST_INT:
6218 for (i = 0;
6219 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
6220 i += value_bit)
6221 *vp++ = INTVAL (el) >> i;
6222 /* CONST_INTs are always logically sign-extended. */
6223 for (; i < elem_bitsize; i += value_bit)
6224 *vp++ = INTVAL (el) < 0 ? -1 : 0;
6225 break;
6226
6227 case CONST_WIDE_INT:
6228 {
6229 rtx_mode_t val = rtx_mode_t (el, GET_MODE_INNER (innermode));
6230 unsigned char extend = wi::sign_mask (val);
6231 int prec = wi::get_precision (val);
6232
6233 for (i = 0; i < prec && i < elem_bitsize; i += value_bit)
6234 *vp++ = wi::extract_uhwi (val, i, value_bit);
6235 for (; i < elem_bitsize; i += value_bit)
6236 *vp++ = extend;
6237 }
6238 break;
6239
6240 case CONST_DOUBLE:
6241 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
6242 {
6243 unsigned char extend = 0;
6244 /* If this triggers, someone should have generated a
6245 CONST_INT instead. */
6246 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
6247
6248 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
6249 *vp++ = CONST_DOUBLE_LOW (el) >> i;
6250 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
6251 {
6252 *vp++
6253 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
6254 i += value_bit;
6255 }
6256
6257 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
6258 extend = -1;
6259 for (; i < elem_bitsize; i += value_bit)
6260 *vp++ = extend;
6261 }
6262 else
6263 {
6264 /* This is big enough for anything on the platform. */
6265 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
6266 scalar_float_mode el_mode;
6267
6268 el_mode = as_a <scalar_float_mode> (GET_MODE (el));
6269 int bitsize = GET_MODE_BITSIZE (el_mode);
6270
6271 gcc_assert (bitsize <= elem_bitsize);
6272 gcc_assert (bitsize % value_bit == 0);
6273
6274 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
6275 GET_MODE (el));
6276
6277 /* real_to_target produces its result in words affected by
6278 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
6279 and use WORDS_BIG_ENDIAN instead; see the documentation
6280 of SUBREG in rtl.texi. */
6281 for (i = 0; i < bitsize; i += value_bit)
6282 {
6283 int ibase;
6284 if (WORDS_BIG_ENDIAN)
6285 ibase = bitsize - 1 - i;
6286 else
6287 ibase = i;
6288 *vp++ = tmp[ibase / 32] >> i % 32;
6289 }
6290
6291 /* It shouldn't matter what's done here, so fill it with
6292 zero. */
6293 for (; i < elem_bitsize; i += value_bit)
6294 *vp++ = 0;
6295 }
6296 break;
6297
6298 case CONST_FIXED:
6299 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
6300 {
6301 for (i = 0; i < elem_bitsize; i += value_bit)
6302 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
6303 }
6304 else
6305 {
6306 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
6307 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
6308 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
6309 i += value_bit)
6310 *vp++ = CONST_FIXED_VALUE_HIGH (el)
6311 >> (i - HOST_BITS_PER_WIDE_INT);
6312 for (; i < elem_bitsize; i += value_bit)
6313 *vp++ = 0;
6314 }
6315 break;
6316
6317 default:
6318 gcc_unreachable ();
6319 }
6320 }
6321
6322 /* Now, pick the right byte to start with. */
6323 /* Renumber BYTE so that the least-significant byte is byte 0. A special
6324 case is paradoxical SUBREGs, which shouldn't be adjusted since they
6325 will already have offset 0. */
6326 if (inner_bytes >= GET_MODE_SIZE (outermode))
6327 {
6328 unsigned ibyte = inner_bytes - GET_MODE_SIZE (outermode) - byte;
6329 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
6330 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
6331 byte = (subword_byte % UNITS_PER_WORD
6332 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
6333 }
6334
6335 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
6336 so if it's become negative it will instead be very large.) */
6337 gcc_assert (byte < inner_bytes);
6338
6339 /* Convert from bytes to chunks of size value_bit. */
6340 value_start = byte * (BITS_PER_UNIT / value_bit);
6341
6342 /* Re-pack the value. */
6343 num_elem = GET_MODE_NUNITS (outermode);
6344
6345 if (VECTOR_MODE_P (outermode))
6346 {
6347 result_v = rtvec_alloc (num_elem);
6348 elems = &RTVEC_ELT (result_v, 0);
6349 }
6350 else
6351 elems = &result_s;
6352
6353 outer_submode = GET_MODE_INNER (outermode);
6354 outer_class = GET_MODE_CLASS (outer_submode);
6355 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
6356
6357 gcc_assert (elem_bitsize % value_bit == 0);
6358 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
6359
6360 for (elem = 0; elem < num_elem; elem++)
6361 {
6362 unsigned char *vp;
6363
6364 /* Vectors are stored in target memory order. (This is probably
6365 a mistake.) */
6366 {
6367 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
6368 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
6369 / BITS_PER_UNIT);
6370 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
6371 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
6372 unsigned bytele = (subword_byte % UNITS_PER_WORD
6373 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
6374 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
6375 }
6376
6377 switch (outer_class)
6378 {
6379 case MODE_INT:
6380 case MODE_PARTIAL_INT:
6381 {
6382 int u;
6383 int base = 0;
6384 int units
6385 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
6386 / HOST_BITS_PER_WIDE_INT;
6387 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
6388 wide_int r;
6389
6390 if (GET_MODE_PRECISION (outer_submode) > MAX_BITSIZE_MODE_ANY_INT)
6391 return NULL_RTX;
6392 for (u = 0; u < units; u++)
6393 {
6394 unsigned HOST_WIDE_INT buf = 0;
6395 for (i = 0;
6396 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
6397 i += value_bit)
6398 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
6399
6400 tmp[u] = buf;
6401 base += HOST_BITS_PER_WIDE_INT;
6402 }
6403 r = wide_int::from_array (tmp, units,
6404 GET_MODE_PRECISION (outer_submode));
6405 #if TARGET_SUPPORTS_WIDE_INT == 0
6406 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
6407 if (wi::min_precision (r, SIGNED) > HOST_BITS_PER_DOUBLE_INT)
6408 return NULL_RTX;
6409 #endif
6410 elems[elem] = immed_wide_int_const (r, outer_submode);
6411 }
6412 break;
6413
6414 case MODE_FLOAT:
6415 case MODE_DECIMAL_FLOAT:
6416 {
6417 REAL_VALUE_TYPE r;
6418 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32] = { 0 };
6419
6420 /* real_from_target wants its input in words affected by
6421 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
6422 and use WORDS_BIG_ENDIAN instead; see the documentation
6423 of SUBREG in rtl.texi. */
6424 for (i = 0; i < elem_bitsize; i += value_bit)
6425 {
6426 int ibase;
6427 if (WORDS_BIG_ENDIAN)
6428 ibase = elem_bitsize - 1 - i;
6429 else
6430 ibase = i;
6431 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
6432 }
6433
6434 real_from_target (&r, tmp, outer_submode);
6435 elems[elem] = const_double_from_real_value (r, outer_submode);
6436 }
6437 break;
6438
6439 case MODE_FRACT:
6440 case MODE_UFRACT:
6441 case MODE_ACCUM:
6442 case MODE_UACCUM:
6443 {
6444 FIXED_VALUE_TYPE f;
6445 f.data.low = 0;
6446 f.data.high = 0;
6447 f.mode = outer_submode;
6448
6449 for (i = 0;
6450 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
6451 i += value_bit)
6452 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
6453 for (; i < elem_bitsize; i += value_bit)
6454 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
6455 << (i - HOST_BITS_PER_WIDE_INT));
6456
6457 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
6458 }
6459 break;
6460
6461 default:
6462 gcc_unreachable ();
6463 }
6464 }
6465 if (VECTOR_MODE_P (outermode))
6466 return gen_rtx_CONST_VECTOR (outermode, result_v);
6467 else
6468 return result_s;
6469 }
6470
6471 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
6472 Return 0 if no simplifications are possible. */
6473 rtx
6474 simplify_subreg (machine_mode outermode, rtx op,
6475 machine_mode innermode, poly_uint64 byte)
6476 {
6477 /* Little bit of sanity checking. */
6478 gcc_assert (innermode != VOIDmode);
6479 gcc_assert (outermode != VOIDmode);
6480 gcc_assert (innermode != BLKmode);
6481 gcc_assert (outermode != BLKmode);
6482
6483 gcc_assert (GET_MODE (op) == innermode
6484 || GET_MODE (op) == VOIDmode);
6485
6486 poly_uint64 outersize = GET_MODE_SIZE (outermode);
6487 if (!multiple_p (byte, outersize))
6488 return NULL_RTX;
6489
6490 poly_uint64 innersize = GET_MODE_SIZE (innermode);
6491 if (maybe_ge (byte, innersize))
6492 return NULL_RTX;
6493
6494 if (outermode == innermode && known_eq (byte, 0U))
6495 return op;
6496
6497 if (multiple_p (byte, GET_MODE_UNIT_SIZE (innermode)))
6498 {
6499 rtx elt;
6500
6501 if (VECTOR_MODE_P (outermode)
6502 && GET_MODE_INNER (outermode) == GET_MODE_INNER (innermode)
6503 && vec_duplicate_p (op, &elt))
6504 return gen_vec_duplicate (outermode, elt);
6505
6506 if (outermode == GET_MODE_INNER (innermode)
6507 && vec_duplicate_p (op, &elt))
6508 return elt;
6509 }
6510
6511 if (CONST_SCALAR_INT_P (op)
6512 || CONST_DOUBLE_AS_FLOAT_P (op)
6513 || CONST_FIXED_P (op)
6514 || GET_CODE (op) == CONST_VECTOR)
6515 {
6516 /* simplify_immed_subreg deconstructs OP into bytes and constructs
6517 the result from bytes, so it only works if the sizes of the modes
6518 and the value of the offset are known at compile time. Cases that
6519 that apply to general modes and offsets should be handled here
6520 before calling simplify_immed_subreg. */
6521 fixed_size_mode fs_outermode, fs_innermode;
6522 unsigned HOST_WIDE_INT cbyte;
6523 if (is_a <fixed_size_mode> (outermode, &fs_outermode)
6524 && is_a <fixed_size_mode> (innermode, &fs_innermode)
6525 && byte.is_constant (&cbyte))
6526 return simplify_immed_subreg (fs_outermode, op, fs_innermode, cbyte,
6527 0, GET_MODE_SIZE (fs_innermode));
6528
6529 /* Handle constant-sized outer modes and variable-sized inner modes. */
6530 unsigned HOST_WIDE_INT first_elem;
6531 if (GET_CODE (op) == CONST_VECTOR
6532 && is_a <fixed_size_mode> (outermode, &fs_outermode)
6533 && constant_multiple_p (byte, GET_MODE_UNIT_SIZE (innermode),
6534 &first_elem))
6535 return simplify_immed_subreg (fs_outermode, op, innermode, 0,
6536 first_elem,
6537 GET_MODE_SIZE (fs_outermode));
6538
6539 return NULL_RTX;
6540 }
6541
6542 /* Changing mode twice with SUBREG => just change it once,
6543 or not at all if changing back op starting mode. */
6544 if (GET_CODE (op) == SUBREG)
6545 {
6546 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
6547 poly_uint64 innermostsize = GET_MODE_SIZE (innermostmode);
6548 rtx newx;
6549
6550 if (outermode == innermostmode
6551 && known_eq (byte, 0U)
6552 && known_eq (SUBREG_BYTE (op), 0))
6553 return SUBREG_REG (op);
6554
6555 /* Work out the memory offset of the final OUTERMODE value relative
6556 to the inner value of OP. */
6557 poly_int64 mem_offset = subreg_memory_offset (outermode,
6558 innermode, byte);
6559 poly_int64 op_mem_offset = subreg_memory_offset (op);
6560 poly_int64 final_offset = mem_offset + op_mem_offset;
6561
6562 /* See whether resulting subreg will be paradoxical. */
6563 if (!paradoxical_subreg_p (outermode, innermostmode))
6564 {
6565 /* Bail out in case resulting subreg would be incorrect. */
6566 if (maybe_lt (final_offset, 0)
6567 || maybe_ge (poly_uint64 (final_offset), innermostsize)
6568 || !multiple_p (final_offset, outersize))
6569 return NULL_RTX;
6570 }
6571 else
6572 {
6573 poly_int64 required_offset = subreg_memory_offset (outermode,
6574 innermostmode, 0);
6575 if (maybe_ne (final_offset, required_offset))
6576 return NULL_RTX;
6577 /* Paradoxical subregs always have byte offset 0. */
6578 final_offset = 0;
6579 }
6580
6581 /* Recurse for further possible simplifications. */
6582 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
6583 final_offset);
6584 if (newx)
6585 return newx;
6586 if (validate_subreg (outermode, innermostmode,
6587 SUBREG_REG (op), final_offset))
6588 {
6589 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
6590 if (SUBREG_PROMOTED_VAR_P (op)
6591 && SUBREG_PROMOTED_SIGN (op) >= 0
6592 && GET_MODE_CLASS (outermode) == MODE_INT
6593 && known_ge (outersize, innersize)
6594 && known_le (outersize, innermostsize)
6595 && subreg_lowpart_p (newx))
6596 {
6597 SUBREG_PROMOTED_VAR_P (newx) = 1;
6598 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
6599 }
6600 return newx;
6601 }
6602 return NULL_RTX;
6603 }
6604
6605 /* SUBREG of a hard register => just change the register number
6606 and/or mode. If the hard register is not valid in that mode,
6607 suppress this simplification. If the hard register is the stack,
6608 frame, or argument pointer, leave this as a SUBREG. */
6609
6610 if (REG_P (op) && HARD_REGISTER_P (op))
6611 {
6612 unsigned int regno, final_regno;
6613
6614 regno = REGNO (op);
6615 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
6616 if (HARD_REGISTER_NUM_P (final_regno))
6617 {
6618 rtx x = gen_rtx_REG_offset (op, outermode, final_regno,
6619 subreg_memory_offset (outermode,
6620 innermode, byte));
6621
6622 /* Propagate original regno. We don't have any way to specify
6623 the offset inside original regno, so do so only for lowpart.
6624 The information is used only by alias analysis that cannot
6625 grog partial register anyway. */
6626
6627 if (known_eq (subreg_lowpart_offset (outermode, innermode), byte))
6628 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
6629 return x;
6630 }
6631 }
6632
6633 /* If we have a SUBREG of a register that we are replacing and we are
6634 replacing it with a MEM, make a new MEM and try replacing the
6635 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6636 or if we would be widening it. */
6637
6638 if (MEM_P (op)
6639 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
6640 /* Allow splitting of volatile memory references in case we don't
6641 have instruction to move the whole thing. */
6642 && (! MEM_VOLATILE_P (op)
6643 || ! have_insn_for (SET, innermode))
6644 && known_le (outersize, innersize))
6645 return adjust_address_nv (op, outermode, byte);
6646
6647 /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
6648 of two parts. */
6649 if (GET_CODE (op) == CONCAT
6650 || GET_CODE (op) == VEC_CONCAT)
6651 {
6652 poly_uint64 final_offset;
6653 rtx part, res;
6654
6655 machine_mode part_mode = GET_MODE (XEXP (op, 0));
6656 if (part_mode == VOIDmode)
6657 part_mode = GET_MODE_INNER (GET_MODE (op));
6658 poly_uint64 part_size = GET_MODE_SIZE (part_mode);
6659 if (known_lt (byte, part_size))
6660 {
6661 part = XEXP (op, 0);
6662 final_offset = byte;
6663 }
6664 else if (known_ge (byte, part_size))
6665 {
6666 part = XEXP (op, 1);
6667 final_offset = byte - part_size;
6668 }
6669 else
6670 return NULL_RTX;
6671
6672 if (maybe_gt (final_offset + outersize, part_size))
6673 return NULL_RTX;
6674
6675 part_mode = GET_MODE (part);
6676 if (part_mode == VOIDmode)
6677 part_mode = GET_MODE_INNER (GET_MODE (op));
6678 res = simplify_subreg (outermode, part, part_mode, final_offset);
6679 if (res)
6680 return res;
6681 if (validate_subreg (outermode, part_mode, part, final_offset))
6682 return gen_rtx_SUBREG (outermode, part, final_offset);
6683 return NULL_RTX;
6684 }
6685
6686 /* Simplify
6687 (subreg (vec_merge (X)
6688 (vector)
6689 (const_int ((1 << N) | M)))
6690 (N * sizeof (outermode)))
6691 to
6692 (subreg (X) (N * sizeof (outermode)))
6693 */
6694 unsigned int idx;
6695 if (constant_multiple_p (byte, GET_MODE_SIZE (outermode), &idx)
6696 && idx < HOST_BITS_PER_WIDE_INT
6697 && GET_CODE (op) == VEC_MERGE
6698 && GET_MODE_INNER (innermode) == outermode
6699 && CONST_INT_P (XEXP (op, 2))
6700 && (UINTVAL (XEXP (op, 2)) & (HOST_WIDE_INT_1U << idx)) != 0)
6701 return simplify_gen_subreg (outermode, XEXP (op, 0), innermode, byte);
6702
6703 /* A SUBREG resulting from a zero extension may fold to zero if
6704 it extracts higher bits that the ZERO_EXTEND's source bits. */
6705 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
6706 {
6707 poly_uint64 bitpos = subreg_lsb_1 (outermode, innermode, byte);
6708 if (known_ge (bitpos, GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))))
6709 return CONST0_RTX (outermode);
6710 }
6711
6712 scalar_int_mode int_outermode, int_innermode;
6713 if (is_a <scalar_int_mode> (outermode, &int_outermode)
6714 && is_a <scalar_int_mode> (innermode, &int_innermode)
6715 && known_eq (byte, subreg_lowpart_offset (int_outermode, int_innermode)))
6716 {
6717 /* Handle polynomial integers. The upper bits of a paradoxical
6718 subreg are undefined, so this is safe regardless of whether
6719 we're truncating or extending. */
6720 if (CONST_POLY_INT_P (op))
6721 {
6722 poly_wide_int val
6723 = poly_wide_int::from (const_poly_int_value (op),
6724 GET_MODE_PRECISION (int_outermode),
6725 SIGNED);
6726 return immed_wide_int_const (val, int_outermode);
6727 }
6728
6729 if (GET_MODE_PRECISION (int_outermode)
6730 < GET_MODE_PRECISION (int_innermode))
6731 {
6732 rtx tem = simplify_truncation (int_outermode, op, int_innermode);
6733 if (tem)
6734 return tem;
6735 }
6736 }
6737
6738 /* If OP is a vector comparison and the subreg is not changing the
6739 number of elements or the size of the elements, change the result
6740 of the comparison to the new mode. */
6741 if (COMPARISON_P (op)
6742 && VECTOR_MODE_P (outermode)
6743 && VECTOR_MODE_P (innermode)
6744 && known_eq (GET_MODE_NUNITS (outermode), GET_MODE_NUNITS (innermode))
6745 && known_eq (GET_MODE_UNIT_SIZE (outermode),
6746 GET_MODE_UNIT_SIZE (innermode)))
6747 return simplify_gen_relational (GET_CODE (op), outermode, innermode,
6748 XEXP (op, 0), XEXP (op, 1));
6749 return NULL_RTX;
6750 }
6751
6752 /* Make a SUBREG operation or equivalent if it folds. */
6753
6754 rtx
6755 simplify_gen_subreg (machine_mode outermode, rtx op,
6756 machine_mode innermode, poly_uint64 byte)
6757 {
6758 rtx newx;
6759
6760 newx = simplify_subreg (outermode, op, innermode, byte);
6761 if (newx)
6762 return newx;
6763
6764 if (GET_CODE (op) == SUBREG
6765 || GET_CODE (op) == CONCAT
6766 || GET_MODE (op) == VOIDmode)
6767 return NULL_RTX;
6768
6769 if (validate_subreg (outermode, innermode, op, byte))
6770 return gen_rtx_SUBREG (outermode, op, byte);
6771
6772 return NULL_RTX;
6773 }
6774
6775 /* Generates a subreg to get the least significant part of EXPR (in mode
6776 INNER_MODE) to OUTER_MODE. */
6777
6778 rtx
6779 lowpart_subreg (machine_mode outer_mode, rtx expr,
6780 machine_mode inner_mode)
6781 {
6782 return simplify_gen_subreg (outer_mode, expr, inner_mode,
6783 subreg_lowpart_offset (outer_mode, inner_mode));
6784 }
6785
6786 /* Simplify X, an rtx expression.
6787
6788 Return the simplified expression or NULL if no simplifications
6789 were possible.
6790
6791 This is the preferred entry point into the simplification routines;
6792 however, we still allow passes to call the more specific routines.
6793
6794 Right now GCC has three (yes, three) major bodies of RTL simplification
6795 code that need to be unified.
6796
6797 1. fold_rtx in cse.c. This code uses various CSE specific
6798 information to aid in RTL simplification.
6799
6800 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6801 it uses combine specific information to aid in RTL
6802 simplification.
6803
6804 3. The routines in this file.
6805
6806
6807 Long term we want to only have one body of simplification code; to
6808 get to that state I recommend the following steps:
6809
6810 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6811 which are not pass dependent state into these routines.
6812
6813 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6814 use this routine whenever possible.
6815
6816 3. Allow for pass dependent state to be provided to these
6817 routines and add simplifications based on the pass dependent
6818 state. Remove code from cse.c & combine.c that becomes
6819 redundant/dead.
6820
6821 It will take time, but ultimately the compiler will be easier to
6822 maintain and improve. It's totally silly that when we add a
6823 simplification that it needs to be added to 4 places (3 for RTL
6824 simplification and 1 for tree simplification. */
6825
6826 rtx
6827 simplify_rtx (const_rtx x)
6828 {
6829 const enum rtx_code code = GET_CODE (x);
6830 const machine_mode mode = GET_MODE (x);
6831
6832 switch (GET_RTX_CLASS (code))
6833 {
6834 case RTX_UNARY:
6835 return simplify_unary_operation (code, mode,
6836 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6837 case RTX_COMM_ARITH:
6838 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6839 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6840
6841 /* Fall through. */
6842
6843 case RTX_BIN_ARITH:
6844 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6845
6846 case RTX_TERNARY:
6847 case RTX_BITFIELD_OPS:
6848 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6849 XEXP (x, 0), XEXP (x, 1),
6850 XEXP (x, 2));
6851
6852 case RTX_COMPARE:
6853 case RTX_COMM_COMPARE:
6854 return simplify_relational_operation (code, mode,
6855 ((GET_MODE (XEXP (x, 0))
6856 != VOIDmode)
6857 ? GET_MODE (XEXP (x, 0))
6858 : GET_MODE (XEXP (x, 1))),
6859 XEXP (x, 0),
6860 XEXP (x, 1));
6861
6862 case RTX_EXTRA:
6863 if (code == SUBREG)
6864 return simplify_subreg (mode, SUBREG_REG (x),
6865 GET_MODE (SUBREG_REG (x)),
6866 SUBREG_BYTE (x));
6867 break;
6868
6869 case RTX_OBJ:
6870 if (code == LO_SUM)
6871 {
6872 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6873 if (GET_CODE (XEXP (x, 0)) == HIGH
6874 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6875 return XEXP (x, 1);
6876 }
6877 break;
6878
6879 default:
6880 break;
6881 }
6882 return NULL;
6883 }
6884
6885 #if CHECKING_P
6886
6887 namespace selftest {
6888
6889 /* Make a unique pseudo REG of mode MODE for use by selftests. */
6890
6891 static rtx
6892 make_test_reg (machine_mode mode)
6893 {
6894 static int test_reg_num = LAST_VIRTUAL_REGISTER + 1;
6895
6896 return gen_rtx_REG (mode, test_reg_num++);
6897 }
6898
6899 /* Test vector simplifications involving VEC_DUPLICATE in which the
6900 operands and result have vector mode MODE. SCALAR_REG is a pseudo
6901 register that holds one element of MODE. */
6902
6903 static void
6904 test_vector_ops_duplicate (machine_mode mode, rtx scalar_reg)
6905 {
6906 scalar_mode inner_mode = GET_MODE_INNER (mode);
6907 rtx duplicate = gen_rtx_VEC_DUPLICATE (mode, scalar_reg);
6908 poly_uint64 nunits = GET_MODE_NUNITS (mode);
6909 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
6910 {
6911 /* Test some simple unary cases with VEC_DUPLICATE arguments. */
6912 rtx not_scalar_reg = gen_rtx_NOT (inner_mode, scalar_reg);
6913 rtx duplicate_not = gen_rtx_VEC_DUPLICATE (mode, not_scalar_reg);
6914 ASSERT_RTX_EQ (duplicate,
6915 simplify_unary_operation (NOT, mode,
6916 duplicate_not, mode));
6917
6918 rtx neg_scalar_reg = gen_rtx_NEG (inner_mode, scalar_reg);
6919 rtx duplicate_neg = gen_rtx_VEC_DUPLICATE (mode, neg_scalar_reg);
6920 ASSERT_RTX_EQ (duplicate,
6921 simplify_unary_operation (NEG, mode,
6922 duplicate_neg, mode));
6923
6924 /* Test some simple binary cases with VEC_DUPLICATE arguments. */
6925 ASSERT_RTX_EQ (duplicate,
6926 simplify_binary_operation (PLUS, mode, duplicate,
6927 CONST0_RTX (mode)));
6928
6929 ASSERT_RTX_EQ (duplicate,
6930 simplify_binary_operation (MINUS, mode, duplicate,
6931 CONST0_RTX (mode)));
6932
6933 ASSERT_RTX_PTR_EQ (CONST0_RTX (mode),
6934 simplify_binary_operation (MINUS, mode, duplicate,
6935 duplicate));
6936 }
6937
6938 /* Test a scalar VEC_SELECT of a VEC_DUPLICATE. */
6939 rtx zero_par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
6940 ASSERT_RTX_PTR_EQ (scalar_reg,
6941 simplify_binary_operation (VEC_SELECT, inner_mode,
6942 duplicate, zero_par));
6943
6944 unsigned HOST_WIDE_INT const_nunits;
6945 if (nunits.is_constant (&const_nunits))
6946 {
6947 /* And again with the final element. */
6948 rtx last_index = gen_int_mode (const_nunits - 1, word_mode);
6949 rtx last_par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, last_index));
6950 ASSERT_RTX_PTR_EQ (scalar_reg,
6951 simplify_binary_operation (VEC_SELECT, inner_mode,
6952 duplicate, last_par));
6953
6954 /* Test a scalar subreg of a VEC_MERGE of a VEC_DUPLICATE. */
6955 rtx vector_reg = make_test_reg (mode);
6956 for (unsigned HOST_WIDE_INT i = 0; i < const_nunits; i++)
6957 {
6958 if (i >= HOST_BITS_PER_WIDE_INT)
6959 break;
6960 rtx mask = GEN_INT ((HOST_WIDE_INT_1U << i) | (i + 1));
6961 rtx vm = gen_rtx_VEC_MERGE (mode, duplicate, vector_reg, mask);
6962 poly_uint64 offset = i * GET_MODE_SIZE (inner_mode);
6963 ASSERT_RTX_EQ (scalar_reg,
6964 simplify_gen_subreg (inner_mode, vm,
6965 mode, offset));
6966 }
6967 }
6968
6969 /* Test a scalar subreg of a VEC_DUPLICATE. */
6970 poly_uint64 offset = subreg_lowpart_offset (inner_mode, mode);
6971 ASSERT_RTX_EQ (scalar_reg,
6972 simplify_gen_subreg (inner_mode, duplicate,
6973 mode, offset));
6974
6975 machine_mode narrower_mode;
6976 if (maybe_ne (nunits, 2U)
6977 && multiple_p (nunits, 2)
6978 && mode_for_vector (inner_mode, 2).exists (&narrower_mode)
6979 && VECTOR_MODE_P (narrower_mode))
6980 {
6981 /* Test VEC_DUPLICATE of a vector. */
6982 rtx_vector_builder nbuilder (narrower_mode, 2, 1);
6983 nbuilder.quick_push (const0_rtx);
6984 nbuilder.quick_push (const1_rtx);
6985 rtx_vector_builder builder (mode, 2, 1);
6986 builder.quick_push (const0_rtx);
6987 builder.quick_push (const1_rtx);
6988 ASSERT_RTX_EQ (builder.build (),
6989 simplify_unary_operation (VEC_DUPLICATE, mode,
6990 nbuilder.build (),
6991 narrower_mode));
6992
6993 /* Test VEC_SELECT of a vector. */
6994 rtx vec_par
6995 = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, const1_rtx, const0_rtx));
6996 rtx narrower_duplicate
6997 = gen_rtx_VEC_DUPLICATE (narrower_mode, scalar_reg);
6998 ASSERT_RTX_EQ (narrower_duplicate,
6999 simplify_binary_operation (VEC_SELECT, narrower_mode,
7000 duplicate, vec_par));
7001
7002 /* Test a vector subreg of a VEC_DUPLICATE. */
7003 poly_uint64 offset = subreg_lowpart_offset (narrower_mode, mode);
7004 ASSERT_RTX_EQ (narrower_duplicate,
7005 simplify_gen_subreg (narrower_mode, duplicate,
7006 mode, offset));
7007 }
7008 }
7009
7010 /* Test vector simplifications involving VEC_SERIES in which the
7011 operands and result have vector mode MODE. SCALAR_REG is a pseudo
7012 register that holds one element of MODE. */
7013
7014 static void
7015 test_vector_ops_series (machine_mode mode, rtx scalar_reg)
7016 {
7017 /* Test unary cases with VEC_SERIES arguments. */
7018 scalar_mode inner_mode = GET_MODE_INNER (mode);
7019 rtx duplicate = gen_rtx_VEC_DUPLICATE (mode, scalar_reg);
7020 rtx neg_scalar_reg = gen_rtx_NEG (inner_mode, scalar_reg);
7021 rtx series_0_r = gen_rtx_VEC_SERIES (mode, const0_rtx, scalar_reg);
7022 rtx series_0_nr = gen_rtx_VEC_SERIES (mode, const0_rtx, neg_scalar_reg);
7023 rtx series_nr_1 = gen_rtx_VEC_SERIES (mode, neg_scalar_reg, const1_rtx);
7024 rtx series_r_m1 = gen_rtx_VEC_SERIES (mode, scalar_reg, constm1_rtx);
7025 rtx series_r_r = gen_rtx_VEC_SERIES (mode, scalar_reg, scalar_reg);
7026 rtx series_nr_nr = gen_rtx_VEC_SERIES (mode, neg_scalar_reg,
7027 neg_scalar_reg);
7028 ASSERT_RTX_EQ (series_0_r,
7029 simplify_unary_operation (NEG, mode, series_0_nr, mode));
7030 ASSERT_RTX_EQ (series_r_m1,
7031 simplify_unary_operation (NEG, mode, series_nr_1, mode));
7032 ASSERT_RTX_EQ (series_r_r,
7033 simplify_unary_operation (NEG, mode, series_nr_nr, mode));
7034
7035 /* Test that a VEC_SERIES with a zero step is simplified away. */
7036 ASSERT_RTX_EQ (duplicate,
7037 simplify_binary_operation (VEC_SERIES, mode,
7038 scalar_reg, const0_rtx));
7039
7040 /* Test PLUS and MINUS with VEC_SERIES. */
7041 rtx series_0_1 = gen_const_vec_series (mode, const0_rtx, const1_rtx);
7042 rtx series_0_m1 = gen_const_vec_series (mode, const0_rtx, constm1_rtx);
7043 rtx series_r_1 = gen_rtx_VEC_SERIES (mode, scalar_reg, const1_rtx);
7044 ASSERT_RTX_EQ (series_r_r,
7045 simplify_binary_operation (PLUS, mode, series_0_r,
7046 duplicate));
7047 ASSERT_RTX_EQ (series_r_1,
7048 simplify_binary_operation (PLUS, mode, duplicate,
7049 series_0_1));
7050 ASSERT_RTX_EQ (series_r_m1,
7051 simplify_binary_operation (PLUS, mode, duplicate,
7052 series_0_m1));
7053 ASSERT_RTX_EQ (series_0_r,
7054 simplify_binary_operation (MINUS, mode, series_r_r,
7055 duplicate));
7056 ASSERT_RTX_EQ (series_r_m1,
7057 simplify_binary_operation (MINUS, mode, duplicate,
7058 series_0_1));
7059 ASSERT_RTX_EQ (series_r_1,
7060 simplify_binary_operation (MINUS, mode, duplicate,
7061 series_0_m1));
7062 ASSERT_RTX_EQ (series_0_m1,
7063 simplify_binary_operation (VEC_SERIES, mode, const0_rtx,
7064 constm1_rtx));
7065
7066 /* Test NEG on constant vector series. */
7067 ASSERT_RTX_EQ (series_0_m1,
7068 simplify_unary_operation (NEG, mode, series_0_1, mode));
7069 ASSERT_RTX_EQ (series_0_1,
7070 simplify_unary_operation (NEG, mode, series_0_m1, mode));
7071
7072 /* Test PLUS and MINUS on constant vector series. */
7073 rtx scalar2 = gen_int_mode (2, inner_mode);
7074 rtx scalar3 = gen_int_mode (3, inner_mode);
7075 rtx series_1_1 = gen_const_vec_series (mode, const1_rtx, const1_rtx);
7076 rtx series_0_2 = gen_const_vec_series (mode, const0_rtx, scalar2);
7077 rtx series_1_3 = gen_const_vec_series (mode, const1_rtx, scalar3);
7078 ASSERT_RTX_EQ (series_1_1,
7079 simplify_binary_operation (PLUS, mode, series_0_1,
7080 CONST1_RTX (mode)));
7081 ASSERT_RTX_EQ (series_0_m1,
7082 simplify_binary_operation (PLUS, mode, CONST0_RTX (mode),
7083 series_0_m1));
7084 ASSERT_RTX_EQ (series_1_3,
7085 simplify_binary_operation (PLUS, mode, series_1_1,
7086 series_0_2));
7087 ASSERT_RTX_EQ (series_0_1,
7088 simplify_binary_operation (MINUS, mode, series_1_1,
7089 CONST1_RTX (mode)));
7090 ASSERT_RTX_EQ (series_1_1,
7091 simplify_binary_operation (MINUS, mode, CONST1_RTX (mode),
7092 series_0_m1));
7093 ASSERT_RTX_EQ (series_1_1,
7094 simplify_binary_operation (MINUS, mode, series_1_3,
7095 series_0_2));
7096
7097 /* Test MULT between constant vectors. */
7098 rtx vec2 = gen_const_vec_duplicate (mode, scalar2);
7099 rtx vec3 = gen_const_vec_duplicate (mode, scalar3);
7100 rtx scalar9 = gen_int_mode (9, inner_mode);
7101 rtx series_3_9 = gen_const_vec_series (mode, scalar3, scalar9);
7102 ASSERT_RTX_EQ (series_0_2,
7103 simplify_binary_operation (MULT, mode, series_0_1, vec2));
7104 ASSERT_RTX_EQ (series_3_9,
7105 simplify_binary_operation (MULT, mode, vec3, series_1_3));
7106 if (!GET_MODE_NUNITS (mode).is_constant ())
7107 ASSERT_FALSE (simplify_binary_operation (MULT, mode, series_0_1,
7108 series_0_1));
7109
7110 /* Test ASHIFT between constant vectors. */
7111 ASSERT_RTX_EQ (series_0_2,
7112 simplify_binary_operation (ASHIFT, mode, series_0_1,
7113 CONST1_RTX (mode)));
7114 if (!GET_MODE_NUNITS (mode).is_constant ())
7115 ASSERT_FALSE (simplify_binary_operation (ASHIFT, mode, CONST1_RTX (mode),
7116 series_0_1));
7117 }
7118
7119 /* Verify simplify_merge_mask works correctly. */
7120
7121 static void
7122 test_vec_merge (machine_mode mode)
7123 {
7124 rtx op0 = make_test_reg (mode);
7125 rtx op1 = make_test_reg (mode);
7126 rtx op2 = make_test_reg (mode);
7127 rtx op3 = make_test_reg (mode);
7128 rtx op4 = make_test_reg (mode);
7129 rtx op5 = make_test_reg (mode);
7130 rtx mask1 = make_test_reg (SImode);
7131 rtx mask2 = make_test_reg (SImode);
7132 rtx vm1 = gen_rtx_VEC_MERGE (mode, op0, op1, mask1);
7133 rtx vm2 = gen_rtx_VEC_MERGE (mode, op2, op3, mask1);
7134 rtx vm3 = gen_rtx_VEC_MERGE (mode, op4, op5, mask1);
7135
7136 /* Simple vec_merge. */
7137 ASSERT_EQ (op0, simplify_merge_mask (vm1, mask1, 0));
7138 ASSERT_EQ (op1, simplify_merge_mask (vm1, mask1, 1));
7139 ASSERT_EQ (NULL_RTX, simplify_merge_mask (vm1, mask2, 0));
7140 ASSERT_EQ (NULL_RTX, simplify_merge_mask (vm1, mask2, 1));
7141
7142 /* Nested vec_merge.
7143 It's tempting to make this simplify right down to opN, but we don't
7144 because all the simplify_* functions assume that the operands have
7145 already been simplified. */
7146 rtx nvm = gen_rtx_VEC_MERGE (mode, vm1, vm2, mask1);
7147 ASSERT_EQ (vm1, simplify_merge_mask (nvm, mask1, 0));
7148 ASSERT_EQ (vm2, simplify_merge_mask (nvm, mask1, 1));
7149
7150 /* Intermediate unary op. */
7151 rtx unop = gen_rtx_NOT (mode, vm1);
7152 ASSERT_RTX_EQ (gen_rtx_NOT (mode, op0),
7153 simplify_merge_mask (unop, mask1, 0));
7154 ASSERT_RTX_EQ (gen_rtx_NOT (mode, op1),
7155 simplify_merge_mask (unop, mask1, 1));
7156
7157 /* Intermediate binary op. */
7158 rtx binop = gen_rtx_PLUS (mode, vm1, vm2);
7159 ASSERT_RTX_EQ (gen_rtx_PLUS (mode, op0, op2),
7160 simplify_merge_mask (binop, mask1, 0));
7161 ASSERT_RTX_EQ (gen_rtx_PLUS (mode, op1, op3),
7162 simplify_merge_mask (binop, mask1, 1));
7163
7164 /* Intermediate ternary op. */
7165 rtx tenop = gen_rtx_FMA (mode, vm1, vm2, vm3);
7166 ASSERT_RTX_EQ (gen_rtx_FMA (mode, op0, op2, op4),
7167 simplify_merge_mask (tenop, mask1, 0));
7168 ASSERT_RTX_EQ (gen_rtx_FMA (mode, op1, op3, op5),
7169 simplify_merge_mask (tenop, mask1, 1));
7170
7171 /* Side effects. */
7172 rtx badop0 = gen_rtx_PRE_INC (mode, op0);
7173 rtx badvm = gen_rtx_VEC_MERGE (mode, badop0, op1, mask1);
7174 ASSERT_EQ (badop0, simplify_merge_mask (badvm, mask1, 0));
7175 ASSERT_EQ (NULL_RTX, simplify_merge_mask (badvm, mask1, 1));
7176
7177 /* Called indirectly. */
7178 ASSERT_RTX_EQ (gen_rtx_VEC_MERGE (mode, op0, op3, mask1),
7179 simplify_rtx (nvm));
7180 }
7181
7182 /* Verify some simplifications involving vectors. */
7183
7184 static void
7185 test_vector_ops ()
7186 {
7187 for (unsigned int i = 0; i < NUM_MACHINE_MODES; ++i)
7188 {
7189 machine_mode mode = (machine_mode) i;
7190 if (VECTOR_MODE_P (mode))
7191 {
7192 rtx scalar_reg = make_test_reg (GET_MODE_INNER (mode));
7193 test_vector_ops_duplicate (mode, scalar_reg);
7194 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
7195 && maybe_gt (GET_MODE_NUNITS (mode), 2))
7196 test_vector_ops_series (mode, scalar_reg);
7197 test_vec_merge (mode);
7198 }
7199 }
7200 }
7201
7202 template<unsigned int N>
7203 struct simplify_const_poly_int_tests
7204 {
7205 static void run ();
7206 };
7207
7208 template<>
7209 struct simplify_const_poly_int_tests<1>
7210 {
7211 static void run () {}
7212 };
7213
7214 /* Test various CONST_POLY_INT properties. */
7215
7216 template<unsigned int N>
7217 void
7218 simplify_const_poly_int_tests<N>::run ()
7219 {
7220 rtx x1 = gen_int_mode (poly_int64 (1, 1), QImode);
7221 rtx x2 = gen_int_mode (poly_int64 (-80, 127), QImode);
7222 rtx x3 = gen_int_mode (poly_int64 (-79, -128), QImode);
7223 rtx x4 = gen_int_mode (poly_int64 (5, 4), QImode);
7224 rtx x5 = gen_int_mode (poly_int64 (30, 24), QImode);
7225 rtx x6 = gen_int_mode (poly_int64 (20, 16), QImode);
7226 rtx x7 = gen_int_mode (poly_int64 (7, 4), QImode);
7227 rtx x8 = gen_int_mode (poly_int64 (30, 24), HImode);
7228 rtx x9 = gen_int_mode (poly_int64 (-30, -24), HImode);
7229 rtx x10 = gen_int_mode (poly_int64 (-31, -24), HImode);
7230 rtx two = GEN_INT (2);
7231 rtx six = GEN_INT (6);
7232 poly_uint64 offset = subreg_lowpart_offset (QImode, HImode);
7233
7234 /* These tests only try limited operation combinations. Fuller arithmetic
7235 testing is done directly on poly_ints. */
7236 ASSERT_EQ (simplify_unary_operation (NEG, HImode, x8, HImode), x9);
7237 ASSERT_EQ (simplify_unary_operation (NOT, HImode, x8, HImode), x10);
7238 ASSERT_EQ (simplify_unary_operation (TRUNCATE, QImode, x8, HImode), x5);
7239 ASSERT_EQ (simplify_binary_operation (PLUS, QImode, x1, x2), x3);
7240 ASSERT_EQ (simplify_binary_operation (MINUS, QImode, x3, x1), x2);
7241 ASSERT_EQ (simplify_binary_operation (MULT, QImode, x4, six), x5);
7242 ASSERT_EQ (simplify_binary_operation (MULT, QImode, six, x4), x5);
7243 ASSERT_EQ (simplify_binary_operation (ASHIFT, QImode, x4, two), x6);
7244 ASSERT_EQ (simplify_binary_operation (IOR, QImode, x4, two), x7);
7245 ASSERT_EQ (simplify_subreg (HImode, x5, QImode, 0), x8);
7246 ASSERT_EQ (simplify_subreg (QImode, x8, HImode, offset), x5);
7247 }
7248
7249 /* Run all of the selftests within this file. */
7250
7251 void
7252 simplify_rtx_c_tests ()
7253 {
7254 test_vector_ops ();
7255 simplify_const_poly_int_tests<NUM_POLY_INT_COEFFS>::run ();
7256 }
7257
7258 } // namespace selftest
7259
7260 #endif /* CHECKING_P */