PR c++/80891 (#1)
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "memmodel.h"
30 #include "optabs.h"
31 #include "emit-rtl.h"
32 #include "recog.h"
33 #include "diagnostic-core.h"
34 #include "varasm.h"
35 #include "flags.h"
36
37 /* Simplification and canonicalization of RTL. */
38
39 /* Much code operates on (low, high) pairs; the low value is an
40 unsigned wide int, the high value a signed wide int. We
41 occasionally need to sign extend from low to high as if low were a
42 signed wide int. */
43 #define HWI_SIGN_EXTEND(low) \
44 ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
45
46 static rtx neg_const_int (machine_mode, const_rtx);
47 static bool plus_minus_operand_p (const_rtx);
48 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
49 static rtx simplify_immed_subreg (machine_mode, rtx, machine_mode,
50 unsigned int);
51 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
52 rtx, rtx);
53 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
54 machine_mode, rtx, rtx);
55 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
56 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
57 rtx, rtx, rtx, rtx);
58 \f
59 /* Negate a CONST_INT rtx. */
60 static rtx
61 neg_const_int (machine_mode mode, const_rtx i)
62 {
63 unsigned HOST_WIDE_INT val = -UINTVAL (i);
64
65 if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT
66 && val == UINTVAL (i))
67 return simplify_const_unary_operation (NEG, mode, CONST_CAST_RTX (i),
68 mode);
69 return gen_int_mode (val, mode);
70 }
71
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
74
75 bool
76 mode_signbit_p (machine_mode mode, const_rtx x)
77 {
78 unsigned HOST_WIDE_INT val;
79 unsigned int width;
80
81 if (GET_MODE_CLASS (mode) != MODE_INT)
82 return false;
83
84 width = GET_MODE_PRECISION (mode);
85 if (width == 0)
86 return false;
87
88 if (width <= HOST_BITS_PER_WIDE_INT
89 && CONST_INT_P (x))
90 val = INTVAL (x);
91 #if TARGET_SUPPORTS_WIDE_INT
92 else if (CONST_WIDE_INT_P (x))
93 {
94 unsigned int i;
95 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
96 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
97 return false;
98 for (i = 0; i < elts - 1; i++)
99 if (CONST_WIDE_INT_ELT (x, i) != 0)
100 return false;
101 val = CONST_WIDE_INT_ELT (x, elts - 1);
102 width %= HOST_BITS_PER_WIDE_INT;
103 if (width == 0)
104 width = HOST_BITS_PER_WIDE_INT;
105 }
106 #else
107 else if (width <= HOST_BITS_PER_DOUBLE_INT
108 && CONST_DOUBLE_AS_INT_P (x)
109 && CONST_DOUBLE_LOW (x) == 0)
110 {
111 val = CONST_DOUBLE_HIGH (x);
112 width -= HOST_BITS_PER_WIDE_INT;
113 }
114 #endif
115 else
116 /* X is not an integer constant. */
117 return false;
118
119 if (width < HOST_BITS_PER_WIDE_INT)
120 val &= (HOST_WIDE_INT_1U << width) - 1;
121 return val == (HOST_WIDE_INT_1U << (width - 1));
122 }
123
124 /* Test whether VAL is equal to the most significant bit of mode MODE
125 (after masking with the mode mask of MODE). Returns false if the
126 precision of MODE is too large to handle. */
127
128 bool
129 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
130 {
131 unsigned int width;
132
133 if (GET_MODE_CLASS (mode) != MODE_INT)
134 return false;
135
136 width = GET_MODE_PRECISION (mode);
137 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
138 return false;
139
140 val &= GET_MODE_MASK (mode);
141 return val == (HOST_WIDE_INT_1U << (width - 1));
142 }
143
144 /* Test whether the most significant bit of mode MODE is set in VAL.
145 Returns false if the precision of MODE is too large to handle. */
146 bool
147 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
148 {
149 unsigned int width;
150
151 if (GET_MODE_CLASS (mode) != MODE_INT)
152 return false;
153
154 width = GET_MODE_PRECISION (mode);
155 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
156 return false;
157
158 val &= HOST_WIDE_INT_1U << (width - 1);
159 return val != 0;
160 }
161
162 /* Test whether the most significant bit of mode MODE is clear in VAL.
163 Returns false if the precision of MODE is too large to handle. */
164 bool
165 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
166 {
167 unsigned int width;
168
169 if (GET_MODE_CLASS (mode) != MODE_INT)
170 return false;
171
172 width = GET_MODE_PRECISION (mode);
173 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
174 return false;
175
176 val &= HOST_WIDE_INT_1U << (width - 1);
177 return val == 0;
178 }
179 \f
180 /* Make a binary operation by properly ordering the operands and
181 seeing if the expression folds. */
182
183 rtx
184 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
185 rtx op1)
186 {
187 rtx tem;
188
189 /* If this simplifies, do it. */
190 tem = simplify_binary_operation (code, mode, op0, op1);
191 if (tem)
192 return tem;
193
194 /* Put complex operands first and constants second if commutative. */
195 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
196 && swap_commutative_operands_p (op0, op1))
197 std::swap (op0, op1);
198
199 return gen_rtx_fmt_ee (code, mode, op0, op1);
200 }
201 \f
202 /* If X is a MEM referencing the constant pool, return the real value.
203 Otherwise return X. */
204 rtx
205 avoid_constant_pool_reference (rtx x)
206 {
207 rtx c, tmp, addr;
208 machine_mode cmode;
209 HOST_WIDE_INT offset = 0;
210
211 switch (GET_CODE (x))
212 {
213 case MEM:
214 break;
215
216 case FLOAT_EXTEND:
217 /* Handle float extensions of constant pool references. */
218 tmp = XEXP (x, 0);
219 c = avoid_constant_pool_reference (tmp);
220 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
221 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c),
222 GET_MODE (x));
223 return x;
224
225 default:
226 return x;
227 }
228
229 if (GET_MODE (x) == BLKmode)
230 return x;
231
232 addr = XEXP (x, 0);
233
234 /* Call target hook to avoid the effects of -fpic etc.... */
235 addr = targetm.delegitimize_address (addr);
236
237 /* Split the address into a base and integer offset. */
238 if (GET_CODE (addr) == CONST
239 && GET_CODE (XEXP (addr, 0)) == PLUS
240 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
241 {
242 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
243 addr = XEXP (XEXP (addr, 0), 0);
244 }
245
246 if (GET_CODE (addr) == LO_SUM)
247 addr = XEXP (addr, 1);
248
249 /* If this is a constant pool reference, we can turn it into its
250 constant and hope that simplifications happen. */
251 if (GET_CODE (addr) == SYMBOL_REF
252 && CONSTANT_POOL_ADDRESS_P (addr))
253 {
254 c = get_pool_constant (addr);
255 cmode = get_pool_mode (addr);
256
257 /* If we're accessing the constant in a different mode than it was
258 originally stored, attempt to fix that up via subreg simplifications.
259 If that fails we have no choice but to return the original memory. */
260 if (offset == 0 && cmode == GET_MODE (x))
261 return c;
262 else if (offset >= 0 && offset < GET_MODE_SIZE (cmode))
263 {
264 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
265 if (tem && CONSTANT_P (tem))
266 return tem;
267 }
268 }
269
270 return x;
271 }
272 \f
273 /* Simplify a MEM based on its attributes. This is the default
274 delegitimize_address target hook, and it's recommended that every
275 overrider call it. */
276
277 rtx
278 delegitimize_mem_from_attrs (rtx x)
279 {
280 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
281 use their base addresses as equivalent. */
282 if (MEM_P (x)
283 && MEM_EXPR (x)
284 && MEM_OFFSET_KNOWN_P (x))
285 {
286 tree decl = MEM_EXPR (x);
287 machine_mode mode = GET_MODE (x);
288 HOST_WIDE_INT offset = 0;
289
290 switch (TREE_CODE (decl))
291 {
292 default:
293 decl = NULL;
294 break;
295
296 case VAR_DECL:
297 break;
298
299 case ARRAY_REF:
300 case ARRAY_RANGE_REF:
301 case COMPONENT_REF:
302 case BIT_FIELD_REF:
303 case REALPART_EXPR:
304 case IMAGPART_EXPR:
305 case VIEW_CONVERT_EXPR:
306 {
307 HOST_WIDE_INT bitsize, bitpos;
308 tree toffset;
309 int unsignedp, reversep, volatilep = 0;
310
311 decl
312 = get_inner_reference (decl, &bitsize, &bitpos, &toffset, &mode,
313 &unsignedp, &reversep, &volatilep);
314 if (bitsize != GET_MODE_BITSIZE (mode)
315 || (bitpos % BITS_PER_UNIT)
316 || (toffset && !tree_fits_shwi_p (toffset)))
317 decl = NULL;
318 else
319 {
320 offset += bitpos / BITS_PER_UNIT;
321 if (toffset)
322 offset += tree_to_shwi (toffset);
323 }
324 break;
325 }
326 }
327
328 if (decl
329 && mode == GET_MODE (x)
330 && VAR_P (decl)
331 && (TREE_STATIC (decl)
332 || DECL_THREAD_LOCAL_P (decl))
333 && DECL_RTL_SET_P (decl)
334 && MEM_P (DECL_RTL (decl)))
335 {
336 rtx newx;
337
338 offset += MEM_OFFSET (x);
339
340 newx = DECL_RTL (decl);
341
342 if (MEM_P (newx))
343 {
344 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
345
346 /* Avoid creating a new MEM needlessly if we already had
347 the same address. We do if there's no OFFSET and the
348 old address X is identical to NEWX, or if X is of the
349 form (plus NEWX OFFSET), or the NEWX is of the form
350 (plus Y (const_int Z)) and X is that with the offset
351 added: (plus Y (const_int Z+OFFSET)). */
352 if (!((offset == 0
353 || (GET_CODE (o) == PLUS
354 && GET_CODE (XEXP (o, 1)) == CONST_INT
355 && (offset == INTVAL (XEXP (o, 1))
356 || (GET_CODE (n) == PLUS
357 && GET_CODE (XEXP (n, 1)) == CONST_INT
358 && (INTVAL (XEXP (n, 1)) + offset
359 == INTVAL (XEXP (o, 1)))
360 && (n = XEXP (n, 0))))
361 && (o = XEXP (o, 0))))
362 && rtx_equal_p (o, n)))
363 x = adjust_address_nv (newx, mode, offset);
364 }
365 else if (GET_MODE (x) == GET_MODE (newx)
366 && offset == 0)
367 x = newx;
368 }
369 }
370
371 return x;
372 }
373 \f
374 /* Make a unary operation by first seeing if it folds and otherwise making
375 the specified operation. */
376
377 rtx
378 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
379 machine_mode op_mode)
380 {
381 rtx tem;
382
383 /* If this simplifies, use it. */
384 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
385 return tem;
386
387 return gen_rtx_fmt_e (code, mode, op);
388 }
389
390 /* Likewise for ternary operations. */
391
392 rtx
393 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
394 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
395 {
396 rtx tem;
397
398 /* If this simplifies, use it. */
399 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
400 op0, op1, op2)))
401 return tem;
402
403 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
404 }
405
406 /* Likewise, for relational operations.
407 CMP_MODE specifies mode comparison is done in. */
408
409 rtx
410 simplify_gen_relational (enum rtx_code code, machine_mode mode,
411 machine_mode cmp_mode, rtx op0, rtx op1)
412 {
413 rtx tem;
414
415 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
416 op0, op1)))
417 return tem;
418
419 return gen_rtx_fmt_ee (code, mode, op0, op1);
420 }
421 \f
422 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
423 and simplify the result. If FN is non-NULL, call this callback on each
424 X, if it returns non-NULL, replace X with its return value and simplify the
425 result. */
426
427 rtx
428 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
429 rtx (*fn) (rtx, const_rtx, void *), void *data)
430 {
431 enum rtx_code code = GET_CODE (x);
432 machine_mode mode = GET_MODE (x);
433 machine_mode op_mode;
434 const char *fmt;
435 rtx op0, op1, op2, newx, op;
436 rtvec vec, newvec;
437 int i, j;
438
439 if (__builtin_expect (fn != NULL, 0))
440 {
441 newx = fn (x, old_rtx, data);
442 if (newx)
443 return newx;
444 }
445 else if (rtx_equal_p (x, old_rtx))
446 return copy_rtx ((rtx) data);
447
448 switch (GET_RTX_CLASS (code))
449 {
450 case RTX_UNARY:
451 op0 = XEXP (x, 0);
452 op_mode = GET_MODE (op0);
453 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
454 if (op0 == XEXP (x, 0))
455 return x;
456 return simplify_gen_unary (code, mode, op0, op_mode);
457
458 case RTX_BIN_ARITH:
459 case RTX_COMM_ARITH:
460 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
461 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
462 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
463 return x;
464 return simplify_gen_binary (code, mode, op0, op1);
465
466 case RTX_COMPARE:
467 case RTX_COMM_COMPARE:
468 op0 = XEXP (x, 0);
469 op1 = XEXP (x, 1);
470 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
471 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
472 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
473 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
474 return x;
475 return simplify_gen_relational (code, mode, op_mode, op0, op1);
476
477 case RTX_TERNARY:
478 case RTX_BITFIELD_OPS:
479 op0 = XEXP (x, 0);
480 op_mode = GET_MODE (op0);
481 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
482 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
483 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
484 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
485 return x;
486 if (op_mode == VOIDmode)
487 op_mode = GET_MODE (op0);
488 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
489
490 case RTX_EXTRA:
491 if (code == SUBREG)
492 {
493 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
494 if (op0 == SUBREG_REG (x))
495 return x;
496 op0 = simplify_gen_subreg (GET_MODE (x), op0,
497 GET_MODE (SUBREG_REG (x)),
498 SUBREG_BYTE (x));
499 return op0 ? op0 : x;
500 }
501 break;
502
503 case RTX_OBJ:
504 if (code == MEM)
505 {
506 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
507 if (op0 == XEXP (x, 0))
508 return x;
509 return replace_equiv_address_nv (x, op0);
510 }
511 else if (code == LO_SUM)
512 {
513 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
514 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
515
516 /* (lo_sum (high x) y) -> y where x and y have the same base. */
517 if (GET_CODE (op0) == HIGH)
518 {
519 rtx base0, base1, offset0, offset1;
520 split_const (XEXP (op0, 0), &base0, &offset0);
521 split_const (op1, &base1, &offset1);
522 if (rtx_equal_p (base0, base1))
523 return op1;
524 }
525
526 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
527 return x;
528 return gen_rtx_LO_SUM (mode, op0, op1);
529 }
530 break;
531
532 default:
533 break;
534 }
535
536 newx = x;
537 fmt = GET_RTX_FORMAT (code);
538 for (i = 0; fmt[i]; i++)
539 switch (fmt[i])
540 {
541 case 'E':
542 vec = XVEC (x, i);
543 newvec = XVEC (newx, i);
544 for (j = 0; j < GET_NUM_ELEM (vec); j++)
545 {
546 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
547 old_rtx, fn, data);
548 if (op != RTVEC_ELT (vec, j))
549 {
550 if (newvec == vec)
551 {
552 newvec = shallow_copy_rtvec (vec);
553 if (x == newx)
554 newx = shallow_copy_rtx (x);
555 XVEC (newx, i) = newvec;
556 }
557 RTVEC_ELT (newvec, j) = op;
558 }
559 }
560 break;
561
562 case 'e':
563 if (XEXP (x, i))
564 {
565 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
566 if (op != XEXP (x, i))
567 {
568 if (x == newx)
569 newx = shallow_copy_rtx (x);
570 XEXP (newx, i) = op;
571 }
572 }
573 break;
574 }
575 return newx;
576 }
577
578 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
579 resulting RTX. Return a new RTX which is as simplified as possible. */
580
581 rtx
582 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
583 {
584 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
585 }
586 \f
587 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
588 Only handle cases where the truncated value is inherently an rvalue.
589
590 RTL provides two ways of truncating a value:
591
592 1. a lowpart subreg. This form is only a truncation when both
593 the outer and inner modes (here MODE and OP_MODE respectively)
594 are scalar integers, and only then when the subreg is used as
595 an rvalue.
596
597 It is only valid to form such truncating subregs if the
598 truncation requires no action by the target. The onus for
599 proving this is on the creator of the subreg -- e.g. the
600 caller to simplify_subreg or simplify_gen_subreg -- and typically
601 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
602
603 2. a TRUNCATE. This form handles both scalar and compound integers.
604
605 The first form is preferred where valid. However, the TRUNCATE
606 handling in simplify_unary_operation turns the second form into the
607 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
608 so it is generally safe to form rvalue truncations using:
609
610 simplify_gen_unary (TRUNCATE, ...)
611
612 and leave simplify_unary_operation to work out which representation
613 should be used.
614
615 Because of the proof requirements on (1), simplify_truncation must
616 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
617 regardless of whether the outer truncation came from a SUBREG or a
618 TRUNCATE. For example, if the caller has proven that an SImode
619 truncation of:
620
621 (and:DI X Y)
622
623 is a no-op and can be represented as a subreg, it does not follow
624 that SImode truncations of X and Y are also no-ops. On a target
625 like 64-bit MIPS that requires SImode values to be stored in
626 sign-extended form, an SImode truncation of:
627
628 (and:DI (reg:DI X) (const_int 63))
629
630 is trivially a no-op because only the lower 6 bits can be set.
631 However, X is still an arbitrary 64-bit number and so we cannot
632 assume that truncating it too is a no-op. */
633
634 static rtx
635 simplify_truncation (machine_mode mode, rtx op,
636 machine_mode op_mode)
637 {
638 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
639 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
640 gcc_assert (precision <= op_precision);
641
642 /* Optimize truncations of zero and sign extended values. */
643 if (GET_CODE (op) == ZERO_EXTEND
644 || GET_CODE (op) == SIGN_EXTEND)
645 {
646 /* There are three possibilities. If MODE is the same as the
647 origmode, we can omit both the extension and the subreg.
648 If MODE is not larger than the origmode, we can apply the
649 truncation without the extension. Finally, if the outermode
650 is larger than the origmode, we can just extend to the appropriate
651 mode. */
652 machine_mode origmode = GET_MODE (XEXP (op, 0));
653 if (mode == origmode)
654 return XEXP (op, 0);
655 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
656 return simplify_gen_unary (TRUNCATE, mode,
657 XEXP (op, 0), origmode);
658 else
659 return simplify_gen_unary (GET_CODE (op), mode,
660 XEXP (op, 0), origmode);
661 }
662
663 /* If the machine can perform operations in the truncated mode, distribute
664 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
665 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
666 if (1
667 && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD)
668 && (GET_CODE (op) == PLUS
669 || GET_CODE (op) == MINUS
670 || GET_CODE (op) == MULT))
671 {
672 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
673 if (op0)
674 {
675 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
676 if (op1)
677 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
678 }
679 }
680
681 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
682 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
683 the outer subreg is effectively a truncation to the original mode. */
684 if ((GET_CODE (op) == LSHIFTRT
685 || GET_CODE (op) == ASHIFTRT)
686 /* Ensure that OP_MODE is at least twice as wide as MODE
687 to avoid the possibility that an outer LSHIFTRT shifts by more
688 than the sign extension's sign_bit_copies and introduces zeros
689 into the high bits of the result. */
690 && 2 * precision <= op_precision
691 && CONST_INT_P (XEXP (op, 1))
692 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
693 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
694 && UINTVAL (XEXP (op, 1)) < precision)
695 return simplify_gen_binary (ASHIFTRT, mode,
696 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
697
698 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
699 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
700 the outer subreg is effectively a truncation to the original mode. */
701 if ((GET_CODE (op) == LSHIFTRT
702 || GET_CODE (op) == ASHIFTRT)
703 && CONST_INT_P (XEXP (op, 1))
704 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
705 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
706 && UINTVAL (XEXP (op, 1)) < precision)
707 return simplify_gen_binary (LSHIFTRT, mode,
708 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
709
710 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
711 to (ashift:QI (x:QI) C), where C is a suitable small constant and
712 the outer subreg is effectively a truncation to the original mode. */
713 if (GET_CODE (op) == ASHIFT
714 && CONST_INT_P (XEXP (op, 1))
715 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
716 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
717 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
718 && UINTVAL (XEXP (op, 1)) < precision)
719 return simplify_gen_binary (ASHIFT, mode,
720 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
721
722 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
723 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
724 and C2. */
725 if (GET_CODE (op) == AND
726 && (GET_CODE (XEXP (op, 0)) == LSHIFTRT
727 || GET_CODE (XEXP (op, 0)) == ASHIFTRT)
728 && CONST_INT_P (XEXP (XEXP (op, 0), 1))
729 && CONST_INT_P (XEXP (op, 1)))
730 {
731 rtx op0 = (XEXP (XEXP (op, 0), 0));
732 rtx shift_op = XEXP (XEXP (op, 0), 1);
733 rtx mask_op = XEXP (op, 1);
734 unsigned HOST_WIDE_INT shift = UINTVAL (shift_op);
735 unsigned HOST_WIDE_INT mask = UINTVAL (mask_op);
736
737 if (shift < precision
738 /* If doing this transform works for an X with all bits set,
739 it works for any X. */
740 && ((GET_MODE_MASK (mode) >> shift) & mask)
741 == ((GET_MODE_MASK (op_mode) >> shift) & mask)
742 && (op0 = simplify_gen_unary (TRUNCATE, mode, op0, op_mode))
743 && (op0 = simplify_gen_binary (LSHIFTRT, mode, op0, shift_op)))
744 {
745 mask_op = GEN_INT (trunc_int_for_mode (mask, mode));
746 return simplify_gen_binary (AND, mode, op0, mask_op);
747 }
748 }
749
750 /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
751 (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
752 changing len. */
753 if ((GET_CODE (op) == ZERO_EXTRACT || GET_CODE (op) == SIGN_EXTRACT)
754 && REG_P (XEXP (op, 0))
755 && GET_MODE (XEXP (op, 0)) == GET_MODE (op)
756 && CONST_INT_P (XEXP (op, 1))
757 && CONST_INT_P (XEXP (op, 2)))
758 {
759 rtx op0 = XEXP (op, 0);
760 unsigned HOST_WIDE_INT len = UINTVAL (XEXP (op, 1));
761 unsigned HOST_WIDE_INT pos = UINTVAL (XEXP (op, 2));
762 if (BITS_BIG_ENDIAN && pos >= op_precision - precision)
763 {
764 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
765 if (op0)
766 {
767 pos -= op_precision - precision;
768 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
769 XEXP (op, 1), GEN_INT (pos));
770 }
771 }
772 else if (!BITS_BIG_ENDIAN && precision >= len + pos)
773 {
774 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
775 if (op0)
776 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
777 XEXP (op, 1), XEXP (op, 2));
778 }
779 }
780
781 /* Recognize a word extraction from a multi-word subreg. */
782 if ((GET_CODE (op) == LSHIFTRT
783 || GET_CODE (op) == ASHIFTRT)
784 && SCALAR_INT_MODE_P (mode)
785 && SCALAR_INT_MODE_P (op_mode)
786 && precision >= BITS_PER_WORD
787 && 2 * precision <= op_precision
788 && CONST_INT_P (XEXP (op, 1))
789 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
790 && UINTVAL (XEXP (op, 1)) < op_precision)
791 {
792 int byte = subreg_lowpart_offset (mode, op_mode);
793 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
794 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
795 (WORDS_BIG_ENDIAN
796 ? byte - shifted_bytes
797 : byte + shifted_bytes));
798 }
799
800 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
801 and try replacing the TRUNCATE and shift with it. Don't do this
802 if the MEM has a mode-dependent address. */
803 if ((GET_CODE (op) == LSHIFTRT
804 || GET_CODE (op) == ASHIFTRT)
805 && SCALAR_INT_MODE_P (op_mode)
806 && MEM_P (XEXP (op, 0))
807 && CONST_INT_P (XEXP (op, 1))
808 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
809 && INTVAL (XEXP (op, 1)) > 0
810 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
811 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
812 MEM_ADDR_SPACE (XEXP (op, 0)))
813 && ! MEM_VOLATILE_P (XEXP (op, 0))
814 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
815 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
816 {
817 int byte = subreg_lowpart_offset (mode, op_mode);
818 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
819 return adjust_address_nv (XEXP (op, 0), mode,
820 (WORDS_BIG_ENDIAN
821 ? byte - shifted_bytes
822 : byte + shifted_bytes));
823 }
824
825 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
826 (OP:SI foo:SI) if OP is NEG or ABS. */
827 if ((GET_CODE (op) == ABS
828 || GET_CODE (op) == NEG)
829 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
830 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
831 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
832 return simplify_gen_unary (GET_CODE (op), mode,
833 XEXP (XEXP (op, 0), 0), mode);
834
835 /* (truncate:A (subreg:B (truncate:C X) 0)) is
836 (truncate:A X). */
837 if (GET_CODE (op) == SUBREG
838 && SCALAR_INT_MODE_P (mode)
839 && SCALAR_INT_MODE_P (op_mode)
840 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
841 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
842 && subreg_lowpart_p (op))
843 {
844 rtx inner = XEXP (SUBREG_REG (op), 0);
845 if (GET_MODE_PRECISION (mode)
846 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
847 return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
848 else
849 /* If subreg above is paradoxical and C is narrower
850 than A, return (subreg:A (truncate:C X) 0). */
851 return simplify_gen_subreg (mode, SUBREG_REG (op),
852 GET_MODE (SUBREG_REG (op)), 0);
853 }
854
855 /* (truncate:A (truncate:B X)) is (truncate:A X). */
856 if (GET_CODE (op) == TRUNCATE)
857 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
858 GET_MODE (XEXP (op, 0)));
859
860 return NULL_RTX;
861 }
862 \f
863 /* Try to simplify a unary operation CODE whose output mode is to be
864 MODE with input operand OP whose mode was originally OP_MODE.
865 Return zero if no simplification can be made. */
866 rtx
867 simplify_unary_operation (enum rtx_code code, machine_mode mode,
868 rtx op, machine_mode op_mode)
869 {
870 rtx trueop, tem;
871
872 trueop = avoid_constant_pool_reference (op);
873
874 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
875 if (tem)
876 return tem;
877
878 return simplify_unary_operation_1 (code, mode, op);
879 }
880
881 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
882 to be exact. */
883
884 static bool
885 exact_int_to_float_conversion_p (const_rtx op)
886 {
887 int out_bits = significand_size (GET_MODE_INNER (GET_MODE (op)));
888 machine_mode op0_mode = GET_MODE (XEXP (op, 0));
889 /* Constants shouldn't reach here. */
890 gcc_assert (op0_mode != VOIDmode);
891 int in_prec = GET_MODE_UNIT_PRECISION (op0_mode);
892 int in_bits = in_prec;
893 if (HWI_COMPUTABLE_MODE_P (op0_mode))
894 {
895 unsigned HOST_WIDE_INT nonzero = nonzero_bits (XEXP (op, 0), op0_mode);
896 if (GET_CODE (op) == FLOAT)
897 in_bits -= num_sign_bit_copies (XEXP (op, 0), op0_mode);
898 else if (GET_CODE (op) == UNSIGNED_FLOAT)
899 in_bits = wi::min_precision (wi::uhwi (nonzero, in_prec), UNSIGNED);
900 else
901 gcc_unreachable ();
902 in_bits -= wi::ctz (wi::uhwi (nonzero, in_prec));
903 }
904 return in_bits <= out_bits;
905 }
906
907 /* Perform some simplifications we can do even if the operands
908 aren't constant. */
909 static rtx
910 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
911 {
912 enum rtx_code reversed;
913 rtx temp;
914
915 switch (code)
916 {
917 case NOT:
918 /* (not (not X)) == X. */
919 if (GET_CODE (op) == NOT)
920 return XEXP (op, 0);
921
922 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
923 comparison is all ones. */
924 if (COMPARISON_P (op)
925 && (mode == BImode || STORE_FLAG_VALUE == -1)
926 && ((reversed = reversed_comparison_code (op, NULL)) != UNKNOWN))
927 return simplify_gen_relational (reversed, mode, VOIDmode,
928 XEXP (op, 0), XEXP (op, 1));
929
930 /* (not (plus X -1)) can become (neg X). */
931 if (GET_CODE (op) == PLUS
932 && XEXP (op, 1) == constm1_rtx)
933 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
934
935 /* Similarly, (not (neg X)) is (plus X -1). Only do this for
936 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
937 and MODE_VECTOR_INT. */
938 if (GET_CODE (op) == NEG && CONSTM1_RTX (mode))
939 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
940 CONSTM1_RTX (mode));
941
942 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
943 if (GET_CODE (op) == XOR
944 && CONST_INT_P (XEXP (op, 1))
945 && (temp = simplify_unary_operation (NOT, mode,
946 XEXP (op, 1), mode)) != 0)
947 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
948
949 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
950 if (GET_CODE (op) == PLUS
951 && CONST_INT_P (XEXP (op, 1))
952 && mode_signbit_p (mode, XEXP (op, 1))
953 && (temp = simplify_unary_operation (NOT, mode,
954 XEXP (op, 1), mode)) != 0)
955 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
956
957
958 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
959 operands other than 1, but that is not valid. We could do a
960 similar simplification for (not (lshiftrt C X)) where C is
961 just the sign bit, but this doesn't seem common enough to
962 bother with. */
963 if (GET_CODE (op) == ASHIFT
964 && XEXP (op, 0) == const1_rtx)
965 {
966 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
967 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
968 }
969
970 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
971 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
972 so we can perform the above simplification. */
973 if (STORE_FLAG_VALUE == -1
974 && GET_CODE (op) == ASHIFTRT
975 && CONST_INT_P (XEXP (op, 1))
976 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
977 return simplify_gen_relational (GE, mode, VOIDmode,
978 XEXP (op, 0), const0_rtx);
979
980
981 if (GET_CODE (op) == SUBREG
982 && subreg_lowpart_p (op)
983 && (GET_MODE_SIZE (GET_MODE (op))
984 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
985 && GET_CODE (SUBREG_REG (op)) == ASHIFT
986 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
987 {
988 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
989 rtx x;
990
991 x = gen_rtx_ROTATE (inner_mode,
992 simplify_gen_unary (NOT, inner_mode, const1_rtx,
993 inner_mode),
994 XEXP (SUBREG_REG (op), 1));
995 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
996 if (temp)
997 return temp;
998 }
999
1000 /* Apply De Morgan's laws to reduce number of patterns for machines
1001 with negating logical insns (and-not, nand, etc.). If result has
1002 only one NOT, put it first, since that is how the patterns are
1003 coded. */
1004 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
1005 {
1006 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
1007 machine_mode op_mode;
1008
1009 op_mode = GET_MODE (in1);
1010 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
1011
1012 op_mode = GET_MODE (in2);
1013 if (op_mode == VOIDmode)
1014 op_mode = mode;
1015 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
1016
1017 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
1018 std::swap (in1, in2);
1019
1020 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
1021 mode, in1, in2);
1022 }
1023
1024 /* (not (bswap x)) -> (bswap (not x)). */
1025 if (GET_CODE (op) == BSWAP)
1026 {
1027 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1028 return simplify_gen_unary (BSWAP, mode, x, mode);
1029 }
1030 break;
1031
1032 case NEG:
1033 /* (neg (neg X)) == X. */
1034 if (GET_CODE (op) == NEG)
1035 return XEXP (op, 0);
1036
1037 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1038 If comparison is not reversible use
1039 x ? y : (neg y). */
1040 if (GET_CODE (op) == IF_THEN_ELSE)
1041 {
1042 rtx cond = XEXP (op, 0);
1043 rtx true_rtx = XEXP (op, 1);
1044 rtx false_rtx = XEXP (op, 2);
1045
1046 if ((GET_CODE (true_rtx) == NEG
1047 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
1048 || (GET_CODE (false_rtx) == NEG
1049 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)))
1050 {
1051 if (reversed_comparison_code (cond, NULL) != UNKNOWN)
1052 temp = reversed_comparison (cond, mode);
1053 else
1054 {
1055 temp = cond;
1056 std::swap (true_rtx, false_rtx);
1057 }
1058 return simplify_gen_ternary (IF_THEN_ELSE, mode,
1059 mode, temp, true_rtx, false_rtx);
1060 }
1061 }
1062
1063 /* (neg (plus X 1)) can become (not X). */
1064 if (GET_CODE (op) == PLUS
1065 && XEXP (op, 1) == const1_rtx)
1066 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1067
1068 /* Similarly, (neg (not X)) is (plus X 1). */
1069 if (GET_CODE (op) == NOT)
1070 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
1071 CONST1_RTX (mode));
1072
1073 /* (neg (minus X Y)) can become (minus Y X). This transformation
1074 isn't safe for modes with signed zeros, since if X and Y are
1075 both +0, (minus Y X) is the same as (minus X Y). If the
1076 rounding mode is towards +infinity (or -infinity) then the two
1077 expressions will be rounded differently. */
1078 if (GET_CODE (op) == MINUS
1079 && !HONOR_SIGNED_ZEROS (mode)
1080 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1081 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
1082
1083 if (GET_CODE (op) == PLUS
1084 && !HONOR_SIGNED_ZEROS (mode)
1085 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1086 {
1087 /* (neg (plus A C)) is simplified to (minus -C A). */
1088 if (CONST_SCALAR_INT_P (XEXP (op, 1))
1089 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
1090 {
1091 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1092 if (temp)
1093 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1094 }
1095
1096 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1097 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1098 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1099 }
1100
1101 /* (neg (mult A B)) becomes (mult A (neg B)).
1102 This works even for floating-point values. */
1103 if (GET_CODE (op) == MULT
1104 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1105 {
1106 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1107 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1108 }
1109
1110 /* NEG commutes with ASHIFT since it is multiplication. Only do
1111 this if we can then eliminate the NEG (e.g., if the operand
1112 is a constant). */
1113 if (GET_CODE (op) == ASHIFT)
1114 {
1115 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1116 if (temp)
1117 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1118 }
1119
1120 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1121 C is equal to the width of MODE minus 1. */
1122 if (GET_CODE (op) == ASHIFTRT
1123 && CONST_INT_P (XEXP (op, 1))
1124 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1125 return simplify_gen_binary (LSHIFTRT, mode,
1126 XEXP (op, 0), XEXP (op, 1));
1127
1128 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1129 C is equal to the width of MODE minus 1. */
1130 if (GET_CODE (op) == LSHIFTRT
1131 && CONST_INT_P (XEXP (op, 1))
1132 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1133 return simplify_gen_binary (ASHIFTRT, mode,
1134 XEXP (op, 0), XEXP (op, 1));
1135
1136 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1137 if (GET_CODE (op) == XOR
1138 && XEXP (op, 1) == const1_rtx
1139 && nonzero_bits (XEXP (op, 0), mode) == 1)
1140 return plus_constant (mode, XEXP (op, 0), -1);
1141
1142 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1143 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1144 if (GET_CODE (op) == LT
1145 && XEXP (op, 1) == const0_rtx
1146 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
1147 {
1148 machine_mode inner = GET_MODE (XEXP (op, 0));
1149 int isize = GET_MODE_PRECISION (inner);
1150 if (STORE_FLAG_VALUE == 1)
1151 {
1152 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1153 GEN_INT (isize - 1));
1154 if (mode == inner)
1155 return temp;
1156 if (GET_MODE_PRECISION (mode) > isize)
1157 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1158 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1159 }
1160 else if (STORE_FLAG_VALUE == -1)
1161 {
1162 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1163 GEN_INT (isize - 1));
1164 if (mode == inner)
1165 return temp;
1166 if (GET_MODE_PRECISION (mode) > isize)
1167 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1168 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1169 }
1170 }
1171 break;
1172
1173 case TRUNCATE:
1174 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1175 with the umulXi3_highpart patterns. */
1176 if (GET_CODE (op) == LSHIFTRT
1177 && GET_CODE (XEXP (op, 0)) == MULT)
1178 break;
1179
1180 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1181 {
1182 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1183 {
1184 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1185 if (temp)
1186 return temp;
1187 }
1188 /* We can't handle truncation to a partial integer mode here
1189 because we don't know the real bitsize of the partial
1190 integer mode. */
1191 break;
1192 }
1193
1194 if (GET_MODE (op) != VOIDmode)
1195 {
1196 temp = simplify_truncation (mode, op, GET_MODE (op));
1197 if (temp)
1198 return temp;
1199 }
1200
1201 /* If we know that the value is already truncated, we can
1202 replace the TRUNCATE with a SUBREG. */
1203 if (GET_MODE_NUNITS (mode) == 1
1204 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1205 || truncated_to_mode (mode, op)))
1206 {
1207 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1208 if (temp)
1209 return temp;
1210 }
1211
1212 /* A truncate of a comparison can be replaced with a subreg if
1213 STORE_FLAG_VALUE permits. This is like the previous test,
1214 but it works even if the comparison is done in a mode larger
1215 than HOST_BITS_PER_WIDE_INT. */
1216 if (HWI_COMPUTABLE_MODE_P (mode)
1217 && COMPARISON_P (op)
1218 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1219 {
1220 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1221 if (temp)
1222 return temp;
1223 }
1224
1225 /* A truncate of a memory is just loading the low part of the memory
1226 if we are not changing the meaning of the address. */
1227 if (GET_CODE (op) == MEM
1228 && !VECTOR_MODE_P (mode)
1229 && !MEM_VOLATILE_P (op)
1230 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1231 {
1232 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1233 if (temp)
1234 return temp;
1235 }
1236
1237 break;
1238
1239 case FLOAT_TRUNCATE:
1240 if (DECIMAL_FLOAT_MODE_P (mode))
1241 break;
1242
1243 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1244 if (GET_CODE (op) == FLOAT_EXTEND
1245 && GET_MODE (XEXP (op, 0)) == mode)
1246 return XEXP (op, 0);
1247
1248 /* (float_truncate:SF (float_truncate:DF foo:XF))
1249 = (float_truncate:SF foo:XF).
1250 This may eliminate double rounding, so it is unsafe.
1251
1252 (float_truncate:SF (float_extend:XF foo:DF))
1253 = (float_truncate:SF foo:DF).
1254
1255 (float_truncate:DF (float_extend:XF foo:SF))
1256 = (float_extend:DF foo:SF). */
1257 if ((GET_CODE (op) == FLOAT_TRUNCATE
1258 && flag_unsafe_math_optimizations)
1259 || GET_CODE (op) == FLOAT_EXTEND)
1260 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1261 0)))
1262 > GET_MODE_SIZE (mode)
1263 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1264 mode,
1265 XEXP (op, 0), mode);
1266
1267 /* (float_truncate (float x)) is (float x) */
1268 if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1269 && (flag_unsafe_math_optimizations
1270 || exact_int_to_float_conversion_p (op)))
1271 return simplify_gen_unary (GET_CODE (op), mode,
1272 XEXP (op, 0),
1273 GET_MODE (XEXP (op, 0)));
1274
1275 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1276 (OP:SF foo:SF) if OP is NEG or ABS. */
1277 if ((GET_CODE (op) == ABS
1278 || GET_CODE (op) == NEG)
1279 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1280 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1281 return simplify_gen_unary (GET_CODE (op), mode,
1282 XEXP (XEXP (op, 0), 0), mode);
1283
1284 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1285 is (float_truncate:SF x). */
1286 if (GET_CODE (op) == SUBREG
1287 && subreg_lowpart_p (op)
1288 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1289 return SUBREG_REG (op);
1290 break;
1291
1292 case FLOAT_EXTEND:
1293 if (DECIMAL_FLOAT_MODE_P (mode))
1294 break;
1295
1296 /* (float_extend (float_extend x)) is (float_extend x)
1297
1298 (float_extend (float x)) is (float x) assuming that double
1299 rounding can't happen.
1300 */
1301 if (GET_CODE (op) == FLOAT_EXTEND
1302 || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1303 && exact_int_to_float_conversion_p (op)))
1304 return simplify_gen_unary (GET_CODE (op), mode,
1305 XEXP (op, 0),
1306 GET_MODE (XEXP (op, 0)));
1307
1308 break;
1309
1310 case ABS:
1311 /* (abs (neg <foo>)) -> (abs <foo>) */
1312 if (GET_CODE (op) == NEG)
1313 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1314 GET_MODE (XEXP (op, 0)));
1315
1316 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1317 do nothing. */
1318 if (GET_MODE (op) == VOIDmode)
1319 break;
1320
1321 /* If operand is something known to be positive, ignore the ABS. */
1322 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1323 || val_signbit_known_clear_p (GET_MODE (op),
1324 nonzero_bits (op, GET_MODE (op))))
1325 return op;
1326
1327 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1328 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1329 return gen_rtx_NEG (mode, op);
1330
1331 break;
1332
1333 case FFS:
1334 /* (ffs (*_extend <X>)) = (ffs <X>) */
1335 if (GET_CODE (op) == SIGN_EXTEND
1336 || GET_CODE (op) == ZERO_EXTEND)
1337 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1338 GET_MODE (XEXP (op, 0)));
1339 break;
1340
1341 case POPCOUNT:
1342 switch (GET_CODE (op))
1343 {
1344 case BSWAP:
1345 case ZERO_EXTEND:
1346 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1347 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1348 GET_MODE (XEXP (op, 0)));
1349
1350 case ROTATE:
1351 case ROTATERT:
1352 /* Rotations don't affect popcount. */
1353 if (!side_effects_p (XEXP (op, 1)))
1354 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1355 GET_MODE (XEXP (op, 0)));
1356 break;
1357
1358 default:
1359 break;
1360 }
1361 break;
1362
1363 case PARITY:
1364 switch (GET_CODE (op))
1365 {
1366 case NOT:
1367 case BSWAP:
1368 case ZERO_EXTEND:
1369 case SIGN_EXTEND:
1370 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1371 GET_MODE (XEXP (op, 0)));
1372
1373 case ROTATE:
1374 case ROTATERT:
1375 /* Rotations don't affect parity. */
1376 if (!side_effects_p (XEXP (op, 1)))
1377 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1378 GET_MODE (XEXP (op, 0)));
1379 break;
1380
1381 default:
1382 break;
1383 }
1384 break;
1385
1386 case BSWAP:
1387 /* (bswap (bswap x)) -> x. */
1388 if (GET_CODE (op) == BSWAP)
1389 return XEXP (op, 0);
1390 break;
1391
1392 case FLOAT:
1393 /* (float (sign_extend <X>)) = (float <X>). */
1394 if (GET_CODE (op) == SIGN_EXTEND)
1395 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1396 GET_MODE (XEXP (op, 0)));
1397 break;
1398
1399 case SIGN_EXTEND:
1400 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1401 becomes just the MINUS if its mode is MODE. This allows
1402 folding switch statements on machines using casesi (such as
1403 the VAX). */
1404 if (GET_CODE (op) == TRUNCATE
1405 && GET_MODE (XEXP (op, 0)) == mode
1406 && GET_CODE (XEXP (op, 0)) == MINUS
1407 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1408 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1409 return XEXP (op, 0);
1410
1411 /* Extending a widening multiplication should be canonicalized to
1412 a wider widening multiplication. */
1413 if (GET_CODE (op) == MULT)
1414 {
1415 rtx lhs = XEXP (op, 0);
1416 rtx rhs = XEXP (op, 1);
1417 enum rtx_code lcode = GET_CODE (lhs);
1418 enum rtx_code rcode = GET_CODE (rhs);
1419
1420 /* Widening multiplies usually extend both operands, but sometimes
1421 they use a shift to extract a portion of a register. */
1422 if ((lcode == SIGN_EXTEND
1423 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1424 && (rcode == SIGN_EXTEND
1425 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1426 {
1427 machine_mode lmode = GET_MODE (lhs);
1428 machine_mode rmode = GET_MODE (rhs);
1429 int bits;
1430
1431 if (lcode == ASHIFTRT)
1432 /* Number of bits not shifted off the end. */
1433 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1434 else /* lcode == SIGN_EXTEND */
1435 /* Size of inner mode. */
1436 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1437
1438 if (rcode == ASHIFTRT)
1439 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1440 else /* rcode == SIGN_EXTEND */
1441 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1442
1443 /* We can only widen multiplies if the result is mathematiclly
1444 equivalent. I.e. if overflow was impossible. */
1445 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1446 return simplify_gen_binary
1447 (MULT, mode,
1448 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1449 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1450 }
1451 }
1452
1453 /* Check for a sign extension of a subreg of a promoted
1454 variable, where the promotion is sign-extended, and the
1455 target mode is the same as the variable's promotion. */
1456 if (GET_CODE (op) == SUBREG
1457 && SUBREG_PROMOTED_VAR_P (op)
1458 && SUBREG_PROMOTED_SIGNED_P (op)
1459 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1460 {
1461 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1462 if (temp)
1463 return temp;
1464 }
1465
1466 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1467 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1468 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1469 {
1470 gcc_assert (GET_MODE_PRECISION (mode)
1471 > GET_MODE_PRECISION (GET_MODE (op)));
1472 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1473 GET_MODE (XEXP (op, 0)));
1474 }
1475
1476 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1477 is (sign_extend:M (subreg:O <X>)) if there is mode with
1478 GET_MODE_BITSIZE (N) - I bits.
1479 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1480 is similarly (zero_extend:M (subreg:O <X>)). */
1481 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1482 && GET_CODE (XEXP (op, 0)) == ASHIFT
1483 && CONST_INT_P (XEXP (op, 1))
1484 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1485 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1486 {
1487 machine_mode tmode
1488 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1489 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1490 gcc_assert (GET_MODE_BITSIZE (mode)
1491 > GET_MODE_BITSIZE (GET_MODE (op)));
1492 if (tmode != BLKmode)
1493 {
1494 rtx inner =
1495 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1496 if (inner)
1497 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1498 ? SIGN_EXTEND : ZERO_EXTEND,
1499 mode, inner, tmode);
1500 }
1501 }
1502
1503 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1504 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1505 if (GET_CODE (op) == LSHIFTRT
1506 && CONST_INT_P (XEXP (op, 1))
1507 && XEXP (op, 1) != const0_rtx)
1508 return simplify_gen_unary (ZERO_EXTEND, mode, op, GET_MODE (op));
1509
1510 #if defined(POINTERS_EXTEND_UNSIGNED)
1511 /* As we do not know which address space the pointer is referring to,
1512 we can do this only if the target does not support different pointer
1513 or address modes depending on the address space. */
1514 if (target_default_pointer_address_modes_p ()
1515 && ! POINTERS_EXTEND_UNSIGNED
1516 && mode == Pmode && GET_MODE (op) == ptr_mode
1517 && (CONSTANT_P (op)
1518 || (GET_CODE (op) == SUBREG
1519 && REG_P (SUBREG_REG (op))
1520 && REG_POINTER (SUBREG_REG (op))
1521 && GET_MODE (SUBREG_REG (op)) == Pmode))
1522 && !targetm.have_ptr_extend ())
1523 {
1524 temp
1525 = convert_memory_address_addr_space_1 (Pmode, op,
1526 ADDR_SPACE_GENERIC, false,
1527 true);
1528 if (temp)
1529 return temp;
1530 }
1531 #endif
1532 break;
1533
1534 case ZERO_EXTEND:
1535 /* Check for a zero extension of a subreg of a promoted
1536 variable, where the promotion is zero-extended, and the
1537 target mode is the same as the variable's promotion. */
1538 if (GET_CODE (op) == SUBREG
1539 && SUBREG_PROMOTED_VAR_P (op)
1540 && SUBREG_PROMOTED_UNSIGNED_P (op)
1541 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1542 {
1543 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1544 if (temp)
1545 return temp;
1546 }
1547
1548 /* Extending a widening multiplication should be canonicalized to
1549 a wider widening multiplication. */
1550 if (GET_CODE (op) == MULT)
1551 {
1552 rtx lhs = XEXP (op, 0);
1553 rtx rhs = XEXP (op, 1);
1554 enum rtx_code lcode = GET_CODE (lhs);
1555 enum rtx_code rcode = GET_CODE (rhs);
1556
1557 /* Widening multiplies usually extend both operands, but sometimes
1558 they use a shift to extract a portion of a register. */
1559 if ((lcode == ZERO_EXTEND
1560 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1561 && (rcode == ZERO_EXTEND
1562 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1563 {
1564 machine_mode lmode = GET_MODE (lhs);
1565 machine_mode rmode = GET_MODE (rhs);
1566 int bits;
1567
1568 if (lcode == LSHIFTRT)
1569 /* Number of bits not shifted off the end. */
1570 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1571 else /* lcode == ZERO_EXTEND */
1572 /* Size of inner mode. */
1573 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1574
1575 if (rcode == LSHIFTRT)
1576 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1577 else /* rcode == ZERO_EXTEND */
1578 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1579
1580 /* We can only widen multiplies if the result is mathematiclly
1581 equivalent. I.e. if overflow was impossible. */
1582 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1583 return simplify_gen_binary
1584 (MULT, mode,
1585 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1586 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1587 }
1588 }
1589
1590 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1591 if (GET_CODE (op) == ZERO_EXTEND)
1592 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1593 GET_MODE (XEXP (op, 0)));
1594
1595 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1596 is (zero_extend:M (subreg:O <X>)) if there is mode with
1597 GET_MODE_PRECISION (N) - I bits. */
1598 if (GET_CODE (op) == LSHIFTRT
1599 && GET_CODE (XEXP (op, 0)) == ASHIFT
1600 && CONST_INT_P (XEXP (op, 1))
1601 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1602 && GET_MODE_PRECISION (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1603 {
1604 machine_mode tmode
1605 = mode_for_size (GET_MODE_PRECISION (GET_MODE (op))
1606 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1607 if (tmode != BLKmode)
1608 {
1609 rtx inner =
1610 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1611 if (inner)
1612 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1613 }
1614 }
1615
1616 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1617 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1618 of mode N. E.g.
1619 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1620 (and:SI (reg:SI) (const_int 63)). */
1621 if (GET_CODE (op) == SUBREG
1622 && GET_MODE_PRECISION (GET_MODE (op))
1623 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1624 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1625 <= HOST_BITS_PER_WIDE_INT
1626 && GET_MODE_PRECISION (mode)
1627 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1628 && subreg_lowpart_p (op)
1629 && (nonzero_bits (SUBREG_REG (op), GET_MODE (SUBREG_REG (op)))
1630 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1631 {
1632 if (GET_MODE_PRECISION (mode)
1633 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
1634 return SUBREG_REG (op);
1635 return simplify_gen_unary (ZERO_EXTEND, mode, SUBREG_REG (op),
1636 GET_MODE (SUBREG_REG (op)));
1637 }
1638
1639 #if defined(POINTERS_EXTEND_UNSIGNED)
1640 /* As we do not know which address space the pointer is referring to,
1641 we can do this only if the target does not support different pointer
1642 or address modes depending on the address space. */
1643 if (target_default_pointer_address_modes_p ()
1644 && POINTERS_EXTEND_UNSIGNED > 0
1645 && mode == Pmode && GET_MODE (op) == ptr_mode
1646 && (CONSTANT_P (op)
1647 || (GET_CODE (op) == SUBREG
1648 && REG_P (SUBREG_REG (op))
1649 && REG_POINTER (SUBREG_REG (op))
1650 && GET_MODE (SUBREG_REG (op)) == Pmode))
1651 && !targetm.have_ptr_extend ())
1652 {
1653 temp
1654 = convert_memory_address_addr_space_1 (Pmode, op,
1655 ADDR_SPACE_GENERIC, false,
1656 true);
1657 if (temp)
1658 return temp;
1659 }
1660 #endif
1661 break;
1662
1663 default:
1664 break;
1665 }
1666
1667 return 0;
1668 }
1669
1670 /* Try to compute the value of a unary operation CODE whose output mode is to
1671 be MODE with input operand OP whose mode was originally OP_MODE.
1672 Return zero if the value cannot be computed. */
1673 rtx
1674 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1675 rtx op, machine_mode op_mode)
1676 {
1677 unsigned int width = GET_MODE_PRECISION (mode);
1678
1679 if (code == VEC_DUPLICATE)
1680 {
1681 gcc_assert (VECTOR_MODE_P (mode));
1682 if (GET_MODE (op) != VOIDmode)
1683 {
1684 if (!VECTOR_MODE_P (GET_MODE (op)))
1685 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1686 else
1687 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1688 (GET_MODE (op)));
1689 }
1690 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1691 || GET_CODE (op) == CONST_VECTOR)
1692 {
1693 int elt_size = GET_MODE_UNIT_SIZE (mode);
1694 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1695 rtvec v = rtvec_alloc (n_elts);
1696 unsigned int i;
1697
1698 if (GET_CODE (op) != CONST_VECTOR)
1699 for (i = 0; i < n_elts; i++)
1700 RTVEC_ELT (v, i) = op;
1701 else
1702 {
1703 machine_mode inmode = GET_MODE (op);
1704 int in_elt_size = GET_MODE_UNIT_SIZE (inmode);
1705 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1706
1707 gcc_assert (in_n_elts < n_elts);
1708 gcc_assert ((n_elts % in_n_elts) == 0);
1709 for (i = 0; i < n_elts; i++)
1710 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1711 }
1712 return gen_rtx_CONST_VECTOR (mode, v);
1713 }
1714 }
1715
1716 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1717 {
1718 int elt_size = GET_MODE_UNIT_SIZE (mode);
1719 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1720 machine_mode opmode = GET_MODE (op);
1721 int op_elt_size = GET_MODE_UNIT_SIZE (opmode);
1722 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1723 rtvec v = rtvec_alloc (n_elts);
1724 unsigned int i;
1725
1726 gcc_assert (op_n_elts == n_elts);
1727 for (i = 0; i < n_elts; i++)
1728 {
1729 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1730 CONST_VECTOR_ELT (op, i),
1731 GET_MODE_INNER (opmode));
1732 if (!x)
1733 return 0;
1734 RTVEC_ELT (v, i) = x;
1735 }
1736 return gen_rtx_CONST_VECTOR (mode, v);
1737 }
1738
1739 /* The order of these tests is critical so that, for example, we don't
1740 check the wrong mode (input vs. output) for a conversion operation,
1741 such as FIX. At some point, this should be simplified. */
1742
1743 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1744 {
1745 REAL_VALUE_TYPE d;
1746
1747 if (op_mode == VOIDmode)
1748 {
1749 /* CONST_INT have VOIDmode as the mode. We assume that all
1750 the bits of the constant are significant, though, this is
1751 a dangerous assumption as many times CONST_INTs are
1752 created and used with garbage in the bits outside of the
1753 precision of the implied mode of the const_int. */
1754 op_mode = MAX_MODE_INT;
1755 }
1756
1757 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), SIGNED);
1758
1759 /* Avoid the folding if flag_signaling_nans is on and
1760 operand is a signaling NaN. */
1761 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1762 return 0;
1763
1764 d = real_value_truncate (mode, d);
1765 return const_double_from_real_value (d, mode);
1766 }
1767 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1768 {
1769 REAL_VALUE_TYPE d;
1770
1771 if (op_mode == VOIDmode)
1772 {
1773 /* CONST_INT have VOIDmode as the mode. We assume that all
1774 the bits of the constant are significant, though, this is
1775 a dangerous assumption as many times CONST_INTs are
1776 created and used with garbage in the bits outside of the
1777 precision of the implied mode of the const_int. */
1778 op_mode = MAX_MODE_INT;
1779 }
1780
1781 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), UNSIGNED);
1782
1783 /* Avoid the folding if flag_signaling_nans is on and
1784 operand is a signaling NaN. */
1785 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1786 return 0;
1787
1788 d = real_value_truncate (mode, d);
1789 return const_double_from_real_value (d, mode);
1790 }
1791
1792 if (CONST_SCALAR_INT_P (op) && width > 0)
1793 {
1794 wide_int result;
1795 machine_mode imode = op_mode == VOIDmode ? mode : op_mode;
1796 rtx_mode_t op0 = rtx_mode_t (op, imode);
1797 int int_value;
1798
1799 #if TARGET_SUPPORTS_WIDE_INT == 0
1800 /* This assert keeps the simplification from producing a result
1801 that cannot be represented in a CONST_DOUBLE but a lot of
1802 upstream callers expect that this function never fails to
1803 simplify something and so you if you added this to the test
1804 above the code would die later anyway. If this assert
1805 happens, you just need to make the port support wide int. */
1806 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1807 #endif
1808
1809 switch (code)
1810 {
1811 case NOT:
1812 result = wi::bit_not (op0);
1813 break;
1814
1815 case NEG:
1816 result = wi::neg (op0);
1817 break;
1818
1819 case ABS:
1820 result = wi::abs (op0);
1821 break;
1822
1823 case FFS:
1824 result = wi::shwi (wi::ffs (op0), mode);
1825 break;
1826
1827 case CLZ:
1828 if (wi::ne_p (op0, 0))
1829 int_value = wi::clz (op0);
1830 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1831 int_value = GET_MODE_PRECISION (mode);
1832 result = wi::shwi (int_value, mode);
1833 break;
1834
1835 case CLRSB:
1836 result = wi::shwi (wi::clrsb (op0), mode);
1837 break;
1838
1839 case CTZ:
1840 if (wi::ne_p (op0, 0))
1841 int_value = wi::ctz (op0);
1842 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1843 int_value = GET_MODE_PRECISION (mode);
1844 result = wi::shwi (int_value, mode);
1845 break;
1846
1847 case POPCOUNT:
1848 result = wi::shwi (wi::popcount (op0), mode);
1849 break;
1850
1851 case PARITY:
1852 result = wi::shwi (wi::parity (op0), mode);
1853 break;
1854
1855 case BSWAP:
1856 result = wide_int (op0).bswap ();
1857 break;
1858
1859 case TRUNCATE:
1860 case ZERO_EXTEND:
1861 result = wide_int::from (op0, width, UNSIGNED);
1862 break;
1863
1864 case SIGN_EXTEND:
1865 result = wide_int::from (op0, width, SIGNED);
1866 break;
1867
1868 case SQRT:
1869 default:
1870 return 0;
1871 }
1872
1873 return immed_wide_int_const (result, mode);
1874 }
1875
1876 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1877 && SCALAR_FLOAT_MODE_P (mode)
1878 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1879 {
1880 REAL_VALUE_TYPE d = *CONST_DOUBLE_REAL_VALUE (op);
1881 switch (code)
1882 {
1883 case SQRT:
1884 return 0;
1885 case ABS:
1886 d = real_value_abs (&d);
1887 break;
1888 case NEG:
1889 d = real_value_negate (&d);
1890 break;
1891 case FLOAT_TRUNCATE:
1892 /* Don't perform the operation if flag_signaling_nans is on
1893 and the operand is a signaling NaN. */
1894 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1895 return NULL_RTX;
1896 d = real_value_truncate (mode, d);
1897 break;
1898 case FLOAT_EXTEND:
1899 /* Don't perform the operation if flag_signaling_nans is on
1900 and the operand is a signaling NaN. */
1901 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1902 return NULL_RTX;
1903 /* All this does is change the mode, unless changing
1904 mode class. */
1905 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1906 real_convert (&d, mode, &d);
1907 break;
1908 case FIX:
1909 /* Don't perform the operation if flag_signaling_nans is on
1910 and the operand is a signaling NaN. */
1911 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1912 return NULL_RTX;
1913 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1914 break;
1915 case NOT:
1916 {
1917 long tmp[4];
1918 int i;
1919
1920 real_to_target (tmp, &d, GET_MODE (op));
1921 for (i = 0; i < 4; i++)
1922 tmp[i] = ~tmp[i];
1923 real_from_target (&d, tmp, mode);
1924 break;
1925 }
1926 default:
1927 gcc_unreachable ();
1928 }
1929 return const_double_from_real_value (d, mode);
1930 }
1931 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1932 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1933 && GET_MODE_CLASS (mode) == MODE_INT
1934 && width > 0)
1935 {
1936 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1937 operators are intentionally left unspecified (to ease implementation
1938 by target backends), for consistency, this routine implements the
1939 same semantics for constant folding as used by the middle-end. */
1940
1941 /* This was formerly used only for non-IEEE float.
1942 eggert@twinsun.com says it is safe for IEEE also. */
1943 REAL_VALUE_TYPE t;
1944 const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (op);
1945 wide_int wmax, wmin;
1946 /* This is part of the abi to real_to_integer, but we check
1947 things before making this call. */
1948 bool fail;
1949
1950 switch (code)
1951 {
1952 case FIX:
1953 if (REAL_VALUE_ISNAN (*x))
1954 return const0_rtx;
1955
1956 /* Test against the signed upper bound. */
1957 wmax = wi::max_value (width, SIGNED);
1958 real_from_integer (&t, VOIDmode, wmax, SIGNED);
1959 if (real_less (&t, x))
1960 return immed_wide_int_const (wmax, mode);
1961
1962 /* Test against the signed lower bound. */
1963 wmin = wi::min_value (width, SIGNED);
1964 real_from_integer (&t, VOIDmode, wmin, SIGNED);
1965 if (real_less (x, &t))
1966 return immed_wide_int_const (wmin, mode);
1967
1968 return immed_wide_int_const (real_to_integer (x, &fail, width),
1969 mode);
1970
1971 case UNSIGNED_FIX:
1972 if (REAL_VALUE_ISNAN (*x) || REAL_VALUE_NEGATIVE (*x))
1973 return const0_rtx;
1974
1975 /* Test against the unsigned upper bound. */
1976 wmax = wi::max_value (width, UNSIGNED);
1977 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
1978 if (real_less (&t, x))
1979 return immed_wide_int_const (wmax, mode);
1980
1981 return immed_wide_int_const (real_to_integer (x, &fail, width),
1982 mode);
1983
1984 default:
1985 gcc_unreachable ();
1986 }
1987 }
1988
1989 return NULL_RTX;
1990 }
1991 \f
1992 /* Subroutine of simplify_binary_operation to simplify a binary operation
1993 CODE that can commute with byte swapping, with result mode MODE and
1994 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
1995 Return zero if no simplification or canonicalization is possible. */
1996
1997 static rtx
1998 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
1999 rtx op0, rtx op1)
2000 {
2001 rtx tem;
2002
2003 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2004 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
2005 {
2006 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
2007 simplify_gen_unary (BSWAP, mode, op1, mode));
2008 return simplify_gen_unary (BSWAP, mode, tem, mode);
2009 }
2010
2011 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2012 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
2013 {
2014 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
2015 return simplify_gen_unary (BSWAP, mode, tem, mode);
2016 }
2017
2018 return NULL_RTX;
2019 }
2020
2021 /* Subroutine of simplify_binary_operation to simplify a commutative,
2022 associative binary operation CODE with result mode MODE, operating
2023 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2024 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2025 canonicalization is possible. */
2026
2027 static rtx
2028 simplify_associative_operation (enum rtx_code code, machine_mode mode,
2029 rtx op0, rtx op1)
2030 {
2031 rtx tem;
2032
2033 /* Linearize the operator to the left. */
2034 if (GET_CODE (op1) == code)
2035 {
2036 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2037 if (GET_CODE (op0) == code)
2038 {
2039 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2040 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2041 }
2042
2043 /* "a op (b op c)" becomes "(b op c) op a". */
2044 if (! swap_commutative_operands_p (op1, op0))
2045 return simplify_gen_binary (code, mode, op1, op0);
2046
2047 std::swap (op0, op1);
2048 }
2049
2050 if (GET_CODE (op0) == code)
2051 {
2052 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2053 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2054 {
2055 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2056 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2057 }
2058
2059 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2060 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2061 if (tem != 0)
2062 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2063
2064 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2065 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2066 if (tem != 0)
2067 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2068 }
2069
2070 return 0;
2071 }
2072
2073
2074 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2075 and OP1. Return 0 if no simplification is possible.
2076
2077 Don't use this for relational operations such as EQ or LT.
2078 Use simplify_relational_operation instead. */
2079 rtx
2080 simplify_binary_operation (enum rtx_code code, machine_mode mode,
2081 rtx op0, rtx op1)
2082 {
2083 rtx trueop0, trueop1;
2084 rtx tem;
2085
2086 /* Relational operations don't work here. We must know the mode
2087 of the operands in order to do the comparison correctly.
2088 Assuming a full word can give incorrect results.
2089 Consider comparing 128 with -128 in QImode. */
2090 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2091 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2092
2093 /* Make sure the constant is second. */
2094 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2095 && swap_commutative_operands_p (op0, op1))
2096 std::swap (op0, op1);
2097
2098 trueop0 = avoid_constant_pool_reference (op0);
2099 trueop1 = avoid_constant_pool_reference (op1);
2100
2101 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2102 if (tem)
2103 return tem;
2104 tem = simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2105
2106 if (tem)
2107 return tem;
2108
2109 /* If the above steps did not result in a simplification and op0 or op1
2110 were constant pool references, use the referenced constants directly. */
2111 if (trueop0 != op0 || trueop1 != op1)
2112 return simplify_gen_binary (code, mode, trueop0, trueop1);
2113
2114 return NULL_RTX;
2115 }
2116
2117 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2118 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2119 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2120 actual constants. */
2121
2122 static rtx
2123 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
2124 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2125 {
2126 rtx tem, reversed, opleft, opright;
2127 HOST_WIDE_INT val;
2128 unsigned int width = GET_MODE_PRECISION (mode);
2129
2130 /* Even if we can't compute a constant result,
2131 there are some cases worth simplifying. */
2132
2133 switch (code)
2134 {
2135 case PLUS:
2136 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2137 when x is NaN, infinite, or finite and nonzero. They aren't
2138 when x is -0 and the rounding mode is not towards -infinity,
2139 since (-0) + 0 is then 0. */
2140 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2141 return op0;
2142
2143 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2144 transformations are safe even for IEEE. */
2145 if (GET_CODE (op0) == NEG)
2146 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2147 else if (GET_CODE (op1) == NEG)
2148 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2149
2150 /* (~a) + 1 -> -a */
2151 if (INTEGRAL_MODE_P (mode)
2152 && GET_CODE (op0) == NOT
2153 && trueop1 == const1_rtx)
2154 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2155
2156 /* Handle both-operands-constant cases. We can only add
2157 CONST_INTs to constants since the sum of relocatable symbols
2158 can't be handled by most assemblers. Don't add CONST_INT
2159 to CONST_INT since overflow won't be computed properly if wider
2160 than HOST_BITS_PER_WIDE_INT. */
2161
2162 if ((GET_CODE (op0) == CONST
2163 || GET_CODE (op0) == SYMBOL_REF
2164 || GET_CODE (op0) == LABEL_REF)
2165 && CONST_INT_P (op1))
2166 return plus_constant (mode, op0, INTVAL (op1));
2167 else if ((GET_CODE (op1) == CONST
2168 || GET_CODE (op1) == SYMBOL_REF
2169 || GET_CODE (op1) == LABEL_REF)
2170 && CONST_INT_P (op0))
2171 return plus_constant (mode, op1, INTVAL (op0));
2172
2173 /* See if this is something like X * C - X or vice versa or
2174 if the multiplication is written as a shift. If so, we can
2175 distribute and make a new multiply, shift, or maybe just
2176 have X (if C is 2 in the example above). But don't make
2177 something more expensive than we had before. */
2178
2179 if (SCALAR_INT_MODE_P (mode))
2180 {
2181 rtx lhs = op0, rhs = op1;
2182
2183 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2184 wide_int coeff1 = wi::one (GET_MODE_PRECISION (mode));
2185
2186 if (GET_CODE (lhs) == NEG)
2187 {
2188 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2189 lhs = XEXP (lhs, 0);
2190 }
2191 else if (GET_CODE (lhs) == MULT
2192 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2193 {
2194 coeff0 = rtx_mode_t (XEXP (lhs, 1), mode);
2195 lhs = XEXP (lhs, 0);
2196 }
2197 else if (GET_CODE (lhs) == ASHIFT
2198 && CONST_INT_P (XEXP (lhs, 1))
2199 && INTVAL (XEXP (lhs, 1)) >= 0
2200 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2201 {
2202 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2203 GET_MODE_PRECISION (mode));
2204 lhs = XEXP (lhs, 0);
2205 }
2206
2207 if (GET_CODE (rhs) == NEG)
2208 {
2209 coeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2210 rhs = XEXP (rhs, 0);
2211 }
2212 else if (GET_CODE (rhs) == MULT
2213 && CONST_INT_P (XEXP (rhs, 1)))
2214 {
2215 coeff1 = rtx_mode_t (XEXP (rhs, 1), mode);
2216 rhs = XEXP (rhs, 0);
2217 }
2218 else if (GET_CODE (rhs) == ASHIFT
2219 && CONST_INT_P (XEXP (rhs, 1))
2220 && INTVAL (XEXP (rhs, 1)) >= 0
2221 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2222 {
2223 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2224 GET_MODE_PRECISION (mode));
2225 rhs = XEXP (rhs, 0);
2226 }
2227
2228 if (rtx_equal_p (lhs, rhs))
2229 {
2230 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2231 rtx coeff;
2232 bool speed = optimize_function_for_speed_p (cfun);
2233
2234 coeff = immed_wide_int_const (coeff0 + coeff1, mode);
2235
2236 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2237 return (set_src_cost (tem, mode, speed)
2238 <= set_src_cost (orig, mode, speed) ? tem : 0);
2239 }
2240 }
2241
2242 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2243 if (CONST_SCALAR_INT_P (op1)
2244 && GET_CODE (op0) == XOR
2245 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2246 && mode_signbit_p (mode, op1))
2247 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2248 simplify_gen_binary (XOR, mode, op1,
2249 XEXP (op0, 1)));
2250
2251 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2252 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2253 && GET_CODE (op0) == MULT
2254 && GET_CODE (XEXP (op0, 0)) == NEG)
2255 {
2256 rtx in1, in2;
2257
2258 in1 = XEXP (XEXP (op0, 0), 0);
2259 in2 = XEXP (op0, 1);
2260 return simplify_gen_binary (MINUS, mode, op1,
2261 simplify_gen_binary (MULT, mode,
2262 in1, in2));
2263 }
2264
2265 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2266 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2267 is 1. */
2268 if (COMPARISON_P (op0)
2269 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2270 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2271 && (reversed = reversed_comparison (op0, mode)))
2272 return
2273 simplify_gen_unary (NEG, mode, reversed, mode);
2274
2275 /* If one of the operands is a PLUS or a MINUS, see if we can
2276 simplify this by the associative law.
2277 Don't use the associative law for floating point.
2278 The inaccuracy makes it nonassociative,
2279 and subtle programs can break if operations are associated. */
2280
2281 if (INTEGRAL_MODE_P (mode)
2282 && (plus_minus_operand_p (op0)
2283 || plus_minus_operand_p (op1))
2284 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2285 return tem;
2286
2287 /* Reassociate floating point addition only when the user
2288 specifies associative math operations. */
2289 if (FLOAT_MODE_P (mode)
2290 && flag_associative_math)
2291 {
2292 tem = simplify_associative_operation (code, mode, op0, op1);
2293 if (tem)
2294 return tem;
2295 }
2296 break;
2297
2298 case COMPARE:
2299 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2300 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2301 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2302 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2303 {
2304 rtx xop00 = XEXP (op0, 0);
2305 rtx xop10 = XEXP (op1, 0);
2306
2307 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2308 return xop00;
2309
2310 if (REG_P (xop00) && REG_P (xop10)
2311 && REGNO (xop00) == REGNO (xop10)
2312 && GET_MODE (xop00) == mode
2313 && GET_MODE (xop10) == mode
2314 && GET_MODE_CLASS (mode) == MODE_CC)
2315 return xop00;
2316 }
2317 break;
2318
2319 case MINUS:
2320 /* We can't assume x-x is 0 even with non-IEEE floating point,
2321 but since it is zero except in very strange circumstances, we
2322 will treat it as zero with -ffinite-math-only. */
2323 if (rtx_equal_p (trueop0, trueop1)
2324 && ! side_effects_p (op0)
2325 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2326 return CONST0_RTX (mode);
2327
2328 /* Change subtraction from zero into negation. (0 - x) is the
2329 same as -x when x is NaN, infinite, or finite and nonzero.
2330 But if the mode has signed zeros, and does not round towards
2331 -infinity, then 0 - 0 is 0, not -0. */
2332 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2333 return simplify_gen_unary (NEG, mode, op1, mode);
2334
2335 /* (-1 - a) is ~a, unless the expression contains symbolic
2336 constants, in which case not retaining additions and
2337 subtractions could cause invalid assembly to be produced. */
2338 if (trueop0 == constm1_rtx
2339 && !contains_symbolic_reference_p (op1))
2340 return simplify_gen_unary (NOT, mode, op1, mode);
2341
2342 /* Subtracting 0 has no effect unless the mode has signed zeros
2343 and supports rounding towards -infinity. In such a case,
2344 0 - 0 is -0. */
2345 if (!(HONOR_SIGNED_ZEROS (mode)
2346 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2347 && trueop1 == CONST0_RTX (mode))
2348 return op0;
2349
2350 /* See if this is something like X * C - X or vice versa or
2351 if the multiplication is written as a shift. If so, we can
2352 distribute and make a new multiply, shift, or maybe just
2353 have X (if C is 2 in the example above). But don't make
2354 something more expensive than we had before. */
2355
2356 if (SCALAR_INT_MODE_P (mode))
2357 {
2358 rtx lhs = op0, rhs = op1;
2359
2360 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2361 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2362
2363 if (GET_CODE (lhs) == NEG)
2364 {
2365 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2366 lhs = XEXP (lhs, 0);
2367 }
2368 else if (GET_CODE (lhs) == MULT
2369 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2370 {
2371 coeff0 = rtx_mode_t (XEXP (lhs, 1), mode);
2372 lhs = XEXP (lhs, 0);
2373 }
2374 else if (GET_CODE (lhs) == ASHIFT
2375 && CONST_INT_P (XEXP (lhs, 1))
2376 && INTVAL (XEXP (lhs, 1)) >= 0
2377 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2378 {
2379 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2380 GET_MODE_PRECISION (mode));
2381 lhs = XEXP (lhs, 0);
2382 }
2383
2384 if (GET_CODE (rhs) == NEG)
2385 {
2386 negcoeff1 = wi::one (GET_MODE_PRECISION (mode));
2387 rhs = XEXP (rhs, 0);
2388 }
2389 else if (GET_CODE (rhs) == MULT
2390 && CONST_INT_P (XEXP (rhs, 1)))
2391 {
2392 negcoeff1 = wi::neg (rtx_mode_t (XEXP (rhs, 1), mode));
2393 rhs = XEXP (rhs, 0);
2394 }
2395 else if (GET_CODE (rhs) == ASHIFT
2396 && CONST_INT_P (XEXP (rhs, 1))
2397 && INTVAL (XEXP (rhs, 1)) >= 0
2398 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2399 {
2400 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2401 GET_MODE_PRECISION (mode));
2402 negcoeff1 = -negcoeff1;
2403 rhs = XEXP (rhs, 0);
2404 }
2405
2406 if (rtx_equal_p (lhs, rhs))
2407 {
2408 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2409 rtx coeff;
2410 bool speed = optimize_function_for_speed_p (cfun);
2411
2412 coeff = immed_wide_int_const (coeff0 + negcoeff1, mode);
2413
2414 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2415 return (set_src_cost (tem, mode, speed)
2416 <= set_src_cost (orig, mode, speed) ? tem : 0);
2417 }
2418 }
2419
2420 /* (a - (-b)) -> (a + b). True even for IEEE. */
2421 if (GET_CODE (op1) == NEG)
2422 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2423
2424 /* (-x - c) may be simplified as (-c - x). */
2425 if (GET_CODE (op0) == NEG
2426 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2427 {
2428 tem = simplify_unary_operation (NEG, mode, op1, mode);
2429 if (tem)
2430 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2431 }
2432
2433 /* Don't let a relocatable value get a negative coeff. */
2434 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2435 return simplify_gen_binary (PLUS, mode,
2436 op0,
2437 neg_const_int (mode, op1));
2438
2439 /* (x - (x & y)) -> (x & ~y) */
2440 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2441 {
2442 if (rtx_equal_p (op0, XEXP (op1, 0)))
2443 {
2444 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2445 GET_MODE (XEXP (op1, 1)));
2446 return simplify_gen_binary (AND, mode, op0, tem);
2447 }
2448 if (rtx_equal_p (op0, XEXP (op1, 1)))
2449 {
2450 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2451 GET_MODE (XEXP (op1, 0)));
2452 return simplify_gen_binary (AND, mode, op0, tem);
2453 }
2454 }
2455
2456 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2457 by reversing the comparison code if valid. */
2458 if (STORE_FLAG_VALUE == 1
2459 && trueop0 == const1_rtx
2460 && COMPARISON_P (op1)
2461 && (reversed = reversed_comparison (op1, mode)))
2462 return reversed;
2463
2464 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2465 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2466 && GET_CODE (op1) == MULT
2467 && GET_CODE (XEXP (op1, 0)) == NEG)
2468 {
2469 rtx in1, in2;
2470
2471 in1 = XEXP (XEXP (op1, 0), 0);
2472 in2 = XEXP (op1, 1);
2473 return simplify_gen_binary (PLUS, mode,
2474 simplify_gen_binary (MULT, mode,
2475 in1, in2),
2476 op0);
2477 }
2478
2479 /* Canonicalize (minus (neg A) (mult B C)) to
2480 (minus (mult (neg B) C) A). */
2481 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2482 && GET_CODE (op1) == MULT
2483 && GET_CODE (op0) == NEG)
2484 {
2485 rtx in1, in2;
2486
2487 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2488 in2 = XEXP (op1, 1);
2489 return simplify_gen_binary (MINUS, mode,
2490 simplify_gen_binary (MULT, mode,
2491 in1, in2),
2492 XEXP (op0, 0));
2493 }
2494
2495 /* If one of the operands is a PLUS or a MINUS, see if we can
2496 simplify this by the associative law. This will, for example,
2497 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2498 Don't use the associative law for floating point.
2499 The inaccuracy makes it nonassociative,
2500 and subtle programs can break if operations are associated. */
2501
2502 if (INTEGRAL_MODE_P (mode)
2503 && (plus_minus_operand_p (op0)
2504 || plus_minus_operand_p (op1))
2505 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2506 return tem;
2507 break;
2508
2509 case MULT:
2510 if (trueop1 == constm1_rtx)
2511 return simplify_gen_unary (NEG, mode, op0, mode);
2512
2513 if (GET_CODE (op0) == NEG)
2514 {
2515 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2516 /* If op1 is a MULT as well and simplify_unary_operation
2517 just moved the NEG to the second operand, simplify_gen_binary
2518 below could through simplify_associative_operation move
2519 the NEG around again and recurse endlessly. */
2520 if (temp
2521 && GET_CODE (op1) == MULT
2522 && GET_CODE (temp) == MULT
2523 && XEXP (op1, 0) == XEXP (temp, 0)
2524 && GET_CODE (XEXP (temp, 1)) == NEG
2525 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2526 temp = NULL_RTX;
2527 if (temp)
2528 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2529 }
2530 if (GET_CODE (op1) == NEG)
2531 {
2532 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2533 /* If op0 is a MULT as well and simplify_unary_operation
2534 just moved the NEG to the second operand, simplify_gen_binary
2535 below could through simplify_associative_operation move
2536 the NEG around again and recurse endlessly. */
2537 if (temp
2538 && GET_CODE (op0) == MULT
2539 && GET_CODE (temp) == MULT
2540 && XEXP (op0, 0) == XEXP (temp, 0)
2541 && GET_CODE (XEXP (temp, 1)) == NEG
2542 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2543 temp = NULL_RTX;
2544 if (temp)
2545 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2546 }
2547
2548 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2549 x is NaN, since x * 0 is then also NaN. Nor is it valid
2550 when the mode has signed zeros, since multiplying a negative
2551 number by 0 will give -0, not 0. */
2552 if (!HONOR_NANS (mode)
2553 && !HONOR_SIGNED_ZEROS (mode)
2554 && trueop1 == CONST0_RTX (mode)
2555 && ! side_effects_p (op0))
2556 return op1;
2557
2558 /* In IEEE floating point, x*1 is not equivalent to x for
2559 signalling NaNs. */
2560 if (!HONOR_SNANS (mode)
2561 && trueop1 == CONST1_RTX (mode))
2562 return op0;
2563
2564 /* Convert multiply by constant power of two into shift. */
2565 if (CONST_SCALAR_INT_P (trueop1))
2566 {
2567 val = wi::exact_log2 (rtx_mode_t (trueop1, mode));
2568 if (val >= 0)
2569 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2570 }
2571
2572 /* x*2 is x+x and x*(-1) is -x */
2573 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2574 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2575 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2576 && GET_MODE (op0) == mode)
2577 {
2578 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
2579
2580 if (real_equal (d1, &dconst2))
2581 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2582
2583 if (!HONOR_SNANS (mode)
2584 && real_equal (d1, &dconstm1))
2585 return simplify_gen_unary (NEG, mode, op0, mode);
2586 }
2587
2588 /* Optimize -x * -x as x * x. */
2589 if (FLOAT_MODE_P (mode)
2590 && GET_CODE (op0) == NEG
2591 && GET_CODE (op1) == NEG
2592 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2593 && !side_effects_p (XEXP (op0, 0)))
2594 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2595
2596 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2597 if (SCALAR_FLOAT_MODE_P (mode)
2598 && GET_CODE (op0) == ABS
2599 && GET_CODE (op1) == ABS
2600 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2601 && !side_effects_p (XEXP (op0, 0)))
2602 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2603
2604 /* Reassociate multiplication, but for floating point MULTs
2605 only when the user specifies unsafe math optimizations. */
2606 if (! FLOAT_MODE_P (mode)
2607 || flag_unsafe_math_optimizations)
2608 {
2609 tem = simplify_associative_operation (code, mode, op0, op1);
2610 if (tem)
2611 return tem;
2612 }
2613 break;
2614
2615 case IOR:
2616 if (trueop1 == CONST0_RTX (mode))
2617 return op0;
2618 if (INTEGRAL_MODE_P (mode)
2619 && trueop1 == CONSTM1_RTX (mode)
2620 && !side_effects_p (op0))
2621 return op1;
2622 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2623 return op0;
2624 /* A | (~A) -> -1 */
2625 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2626 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2627 && ! side_effects_p (op0)
2628 && SCALAR_INT_MODE_P (mode))
2629 return constm1_rtx;
2630
2631 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2632 if (CONST_INT_P (op1)
2633 && HWI_COMPUTABLE_MODE_P (mode)
2634 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2635 && !side_effects_p (op0))
2636 return op1;
2637
2638 /* Canonicalize (X & C1) | C2. */
2639 if (GET_CODE (op0) == AND
2640 && CONST_INT_P (trueop1)
2641 && CONST_INT_P (XEXP (op0, 1)))
2642 {
2643 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2644 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2645 HOST_WIDE_INT c2 = INTVAL (trueop1);
2646
2647 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2648 if ((c1 & c2) == c1
2649 && !side_effects_p (XEXP (op0, 0)))
2650 return trueop1;
2651
2652 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2653 if (((c1|c2) & mask) == mask)
2654 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2655
2656 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2657 if (((c1 & ~c2) & mask) != (c1 & mask))
2658 {
2659 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2660 gen_int_mode (c1 & ~c2, mode));
2661 return simplify_gen_binary (IOR, mode, tem, op1);
2662 }
2663 }
2664
2665 /* Convert (A & B) | A to A. */
2666 if (GET_CODE (op0) == AND
2667 && (rtx_equal_p (XEXP (op0, 0), op1)
2668 || rtx_equal_p (XEXP (op0, 1), op1))
2669 && ! side_effects_p (XEXP (op0, 0))
2670 && ! side_effects_p (XEXP (op0, 1)))
2671 return op1;
2672
2673 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2674 mode size to (rotate A CX). */
2675
2676 if (GET_CODE (op1) == ASHIFT
2677 || GET_CODE (op1) == SUBREG)
2678 {
2679 opleft = op1;
2680 opright = op0;
2681 }
2682 else
2683 {
2684 opright = op1;
2685 opleft = op0;
2686 }
2687
2688 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2689 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2690 && CONST_INT_P (XEXP (opleft, 1))
2691 && CONST_INT_P (XEXP (opright, 1))
2692 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2693 == GET_MODE_PRECISION (mode)))
2694 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2695
2696 /* Same, but for ashift that has been "simplified" to a wider mode
2697 by simplify_shift_const. */
2698
2699 if (GET_CODE (opleft) == SUBREG
2700 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2701 && GET_CODE (opright) == LSHIFTRT
2702 && GET_CODE (XEXP (opright, 0)) == SUBREG
2703 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2704 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2705 && (GET_MODE_SIZE (GET_MODE (opleft))
2706 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2707 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2708 SUBREG_REG (XEXP (opright, 0)))
2709 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2710 && CONST_INT_P (XEXP (opright, 1))
2711 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2712 == GET_MODE_PRECISION (mode)))
2713 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2714 XEXP (SUBREG_REG (opleft), 1));
2715
2716 /* If we have (ior (and (X C1) C2)), simplify this by making
2717 C1 as small as possible if C1 actually changes. */
2718 if (CONST_INT_P (op1)
2719 && (HWI_COMPUTABLE_MODE_P (mode)
2720 || INTVAL (op1) > 0)
2721 && GET_CODE (op0) == AND
2722 && CONST_INT_P (XEXP (op0, 1))
2723 && CONST_INT_P (op1)
2724 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2725 {
2726 rtx tmp = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2727 gen_int_mode (UINTVAL (XEXP (op0, 1))
2728 & ~UINTVAL (op1),
2729 mode));
2730 return simplify_gen_binary (IOR, mode, tmp, op1);
2731 }
2732
2733 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2734 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2735 the PLUS does not affect any of the bits in OP1: then we can do
2736 the IOR as a PLUS and we can associate. This is valid if OP1
2737 can be safely shifted left C bits. */
2738 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2739 && GET_CODE (XEXP (op0, 0)) == PLUS
2740 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2741 && CONST_INT_P (XEXP (op0, 1))
2742 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2743 {
2744 int count = INTVAL (XEXP (op0, 1));
2745 HOST_WIDE_INT mask = UINTVAL (trueop1) << count;
2746
2747 if (mask >> count == INTVAL (trueop1)
2748 && trunc_int_for_mode (mask, mode) == mask
2749 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2750 return simplify_gen_binary (ASHIFTRT, mode,
2751 plus_constant (mode, XEXP (op0, 0),
2752 mask),
2753 XEXP (op0, 1));
2754 }
2755
2756 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2757 if (tem)
2758 return tem;
2759
2760 tem = simplify_associative_operation (code, mode, op0, op1);
2761 if (tem)
2762 return tem;
2763 break;
2764
2765 case XOR:
2766 if (trueop1 == CONST0_RTX (mode))
2767 return op0;
2768 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2769 return simplify_gen_unary (NOT, mode, op0, mode);
2770 if (rtx_equal_p (trueop0, trueop1)
2771 && ! side_effects_p (op0)
2772 && GET_MODE_CLASS (mode) != MODE_CC)
2773 return CONST0_RTX (mode);
2774
2775 /* Canonicalize XOR of the most significant bit to PLUS. */
2776 if (CONST_SCALAR_INT_P (op1)
2777 && mode_signbit_p (mode, op1))
2778 return simplify_gen_binary (PLUS, mode, op0, op1);
2779 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2780 if (CONST_SCALAR_INT_P (op1)
2781 && GET_CODE (op0) == PLUS
2782 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2783 && mode_signbit_p (mode, XEXP (op0, 1)))
2784 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2785 simplify_gen_binary (XOR, mode, op1,
2786 XEXP (op0, 1)));
2787
2788 /* If we are XORing two things that have no bits in common,
2789 convert them into an IOR. This helps to detect rotation encoded
2790 using those methods and possibly other simplifications. */
2791
2792 if (HWI_COMPUTABLE_MODE_P (mode)
2793 && (nonzero_bits (op0, mode)
2794 & nonzero_bits (op1, mode)) == 0)
2795 return (simplify_gen_binary (IOR, mode, op0, op1));
2796
2797 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2798 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2799 (NOT y). */
2800 {
2801 int num_negated = 0;
2802
2803 if (GET_CODE (op0) == NOT)
2804 num_negated++, op0 = XEXP (op0, 0);
2805 if (GET_CODE (op1) == NOT)
2806 num_negated++, op1 = XEXP (op1, 0);
2807
2808 if (num_negated == 2)
2809 return simplify_gen_binary (XOR, mode, op0, op1);
2810 else if (num_negated == 1)
2811 return simplify_gen_unary (NOT, mode,
2812 simplify_gen_binary (XOR, mode, op0, op1),
2813 mode);
2814 }
2815
2816 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2817 correspond to a machine insn or result in further simplifications
2818 if B is a constant. */
2819
2820 if (GET_CODE (op0) == AND
2821 && rtx_equal_p (XEXP (op0, 1), op1)
2822 && ! side_effects_p (op1))
2823 return simplify_gen_binary (AND, mode,
2824 simplify_gen_unary (NOT, mode,
2825 XEXP (op0, 0), mode),
2826 op1);
2827
2828 else if (GET_CODE (op0) == AND
2829 && rtx_equal_p (XEXP (op0, 0), op1)
2830 && ! side_effects_p (op1))
2831 return simplify_gen_binary (AND, mode,
2832 simplify_gen_unary (NOT, mode,
2833 XEXP (op0, 1), mode),
2834 op1);
2835
2836 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2837 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2838 out bits inverted twice and not set by C. Similarly, given
2839 (xor (and (xor A B) C) D), simplify without inverting C in
2840 the xor operand: (xor (and A C) (B&C)^D).
2841 */
2842 else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
2843 && GET_CODE (XEXP (op0, 0)) == XOR
2844 && CONST_INT_P (op1)
2845 && CONST_INT_P (XEXP (op0, 1))
2846 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
2847 {
2848 enum rtx_code op = GET_CODE (op0);
2849 rtx a = XEXP (XEXP (op0, 0), 0);
2850 rtx b = XEXP (XEXP (op0, 0), 1);
2851 rtx c = XEXP (op0, 1);
2852 rtx d = op1;
2853 HOST_WIDE_INT bval = INTVAL (b);
2854 HOST_WIDE_INT cval = INTVAL (c);
2855 HOST_WIDE_INT dval = INTVAL (d);
2856 HOST_WIDE_INT xcval;
2857
2858 if (op == IOR)
2859 xcval = ~cval;
2860 else
2861 xcval = cval;
2862
2863 return simplify_gen_binary (XOR, mode,
2864 simplify_gen_binary (op, mode, a, c),
2865 gen_int_mode ((bval & xcval) ^ dval,
2866 mode));
2867 }
2868
2869 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2870 we can transform like this:
2871 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2872 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2873 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2874 Attempt a few simplifications when B and C are both constants. */
2875 if (GET_CODE (op0) == AND
2876 && CONST_INT_P (op1)
2877 && CONST_INT_P (XEXP (op0, 1)))
2878 {
2879 rtx a = XEXP (op0, 0);
2880 rtx b = XEXP (op0, 1);
2881 rtx c = op1;
2882 HOST_WIDE_INT bval = INTVAL (b);
2883 HOST_WIDE_INT cval = INTVAL (c);
2884
2885 /* Instead of computing ~A&C, we compute its negated value,
2886 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2887 optimize for sure. If it does not simplify, we still try
2888 to compute ~A&C below, but since that always allocates
2889 RTL, we don't try that before committing to returning a
2890 simplified expression. */
2891 rtx n_na_c = simplify_binary_operation (IOR, mode, a,
2892 GEN_INT (~cval));
2893
2894 if ((~cval & bval) == 0)
2895 {
2896 rtx na_c = NULL_RTX;
2897 if (n_na_c)
2898 na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
2899 else
2900 {
2901 /* If ~A does not simplify, don't bother: we don't
2902 want to simplify 2 operations into 3, and if na_c
2903 were to simplify with na, n_na_c would have
2904 simplified as well. */
2905 rtx na = simplify_unary_operation (NOT, mode, a, mode);
2906 if (na)
2907 na_c = simplify_gen_binary (AND, mode, na, c);
2908 }
2909
2910 /* Try to simplify ~A&C | ~B&C. */
2911 if (na_c != NULL_RTX)
2912 return simplify_gen_binary (IOR, mode, na_c,
2913 gen_int_mode (~bval & cval, mode));
2914 }
2915 else
2916 {
2917 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2918 if (n_na_c == CONSTM1_RTX (mode))
2919 {
2920 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2921 gen_int_mode (~cval & bval,
2922 mode));
2923 return simplify_gen_binary (IOR, mode, a_nc_b,
2924 gen_int_mode (~bval & cval,
2925 mode));
2926 }
2927 }
2928 }
2929
2930 /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
2931 do (ior (and A ~C) (and B C)) which is a machine instruction on some
2932 machines, and also has shorter instruction path length. */
2933 if (GET_CODE (op0) == AND
2934 && GET_CODE (XEXP (op0, 0)) == XOR
2935 && CONST_INT_P (XEXP (op0, 1))
2936 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), trueop1))
2937 {
2938 rtx a = trueop1;
2939 rtx b = XEXP (XEXP (op0, 0), 1);
2940 rtx c = XEXP (op0, 1);
2941 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
2942 rtx a_nc = simplify_gen_binary (AND, mode, a, nc);
2943 rtx bc = simplify_gen_binary (AND, mode, b, c);
2944 return simplify_gen_binary (IOR, mode, a_nc, bc);
2945 }
2946 /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C)) */
2947 else if (GET_CODE (op0) == AND
2948 && GET_CODE (XEXP (op0, 0)) == XOR
2949 && CONST_INT_P (XEXP (op0, 1))
2950 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), trueop1))
2951 {
2952 rtx a = XEXP (XEXP (op0, 0), 0);
2953 rtx b = trueop1;
2954 rtx c = XEXP (op0, 1);
2955 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
2956 rtx b_nc = simplify_gen_binary (AND, mode, b, nc);
2957 rtx ac = simplify_gen_binary (AND, mode, a, c);
2958 return simplify_gen_binary (IOR, mode, ac, b_nc);
2959 }
2960
2961 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2962 comparison if STORE_FLAG_VALUE is 1. */
2963 if (STORE_FLAG_VALUE == 1
2964 && trueop1 == const1_rtx
2965 && COMPARISON_P (op0)
2966 && (reversed = reversed_comparison (op0, mode)))
2967 return reversed;
2968
2969 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2970 is (lt foo (const_int 0)), so we can perform the above
2971 simplification if STORE_FLAG_VALUE is 1. */
2972
2973 if (STORE_FLAG_VALUE == 1
2974 && trueop1 == const1_rtx
2975 && GET_CODE (op0) == LSHIFTRT
2976 && CONST_INT_P (XEXP (op0, 1))
2977 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2978 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2979
2980 /* (xor (comparison foo bar) (const_int sign-bit))
2981 when STORE_FLAG_VALUE is the sign bit. */
2982 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2983 && trueop1 == const_true_rtx
2984 && COMPARISON_P (op0)
2985 && (reversed = reversed_comparison (op0, mode)))
2986 return reversed;
2987
2988 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2989 if (tem)
2990 return tem;
2991
2992 tem = simplify_associative_operation (code, mode, op0, op1);
2993 if (tem)
2994 return tem;
2995 break;
2996
2997 case AND:
2998 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2999 return trueop1;
3000 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
3001 return op0;
3002 if (HWI_COMPUTABLE_MODE_P (mode))
3003 {
3004 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
3005 HOST_WIDE_INT nzop1;
3006 if (CONST_INT_P (trueop1))
3007 {
3008 HOST_WIDE_INT val1 = INTVAL (trueop1);
3009 /* If we are turning off bits already known off in OP0, we need
3010 not do an AND. */
3011 if ((nzop0 & ~val1) == 0)
3012 return op0;
3013 }
3014 nzop1 = nonzero_bits (trueop1, mode);
3015 /* If we are clearing all the nonzero bits, the result is zero. */
3016 if ((nzop1 & nzop0) == 0
3017 && !side_effects_p (op0) && !side_effects_p (op1))
3018 return CONST0_RTX (mode);
3019 }
3020 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
3021 && GET_MODE_CLASS (mode) != MODE_CC)
3022 return op0;
3023 /* A & (~A) -> 0 */
3024 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3025 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3026 && ! side_effects_p (op0)
3027 && GET_MODE_CLASS (mode) != MODE_CC)
3028 return CONST0_RTX (mode);
3029
3030 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3031 there are no nonzero bits of C outside of X's mode. */
3032 if ((GET_CODE (op0) == SIGN_EXTEND
3033 || GET_CODE (op0) == ZERO_EXTEND)
3034 && CONST_INT_P (trueop1)
3035 && HWI_COMPUTABLE_MODE_P (mode)
3036 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
3037 & UINTVAL (trueop1)) == 0)
3038 {
3039 machine_mode imode = GET_MODE (XEXP (op0, 0));
3040 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
3041 gen_int_mode (INTVAL (trueop1),
3042 imode));
3043 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
3044 }
3045
3046 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3047 we might be able to further simplify the AND with X and potentially
3048 remove the truncation altogether. */
3049 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
3050 {
3051 rtx x = XEXP (op0, 0);
3052 machine_mode xmode = GET_MODE (x);
3053 tem = simplify_gen_binary (AND, xmode, x,
3054 gen_int_mode (INTVAL (trueop1), xmode));
3055 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
3056 }
3057
3058 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3059 if (GET_CODE (op0) == IOR
3060 && CONST_INT_P (trueop1)
3061 && CONST_INT_P (XEXP (op0, 1)))
3062 {
3063 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
3064 return simplify_gen_binary (IOR, mode,
3065 simplify_gen_binary (AND, mode,
3066 XEXP (op0, 0), op1),
3067 gen_int_mode (tmp, mode));
3068 }
3069
3070 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3071 insn (and may simplify more). */
3072 if (GET_CODE (op0) == XOR
3073 && rtx_equal_p (XEXP (op0, 0), op1)
3074 && ! side_effects_p (op1))
3075 return simplify_gen_binary (AND, mode,
3076 simplify_gen_unary (NOT, mode,
3077 XEXP (op0, 1), mode),
3078 op1);
3079
3080 if (GET_CODE (op0) == XOR
3081 && rtx_equal_p (XEXP (op0, 1), op1)
3082 && ! side_effects_p (op1))
3083 return simplify_gen_binary (AND, mode,
3084 simplify_gen_unary (NOT, mode,
3085 XEXP (op0, 0), mode),
3086 op1);
3087
3088 /* Similarly for (~(A ^ B)) & A. */
3089 if (GET_CODE (op0) == NOT
3090 && GET_CODE (XEXP (op0, 0)) == XOR
3091 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3092 && ! side_effects_p (op1))
3093 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3094
3095 if (GET_CODE (op0) == NOT
3096 && GET_CODE (XEXP (op0, 0)) == XOR
3097 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3098 && ! side_effects_p (op1))
3099 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3100
3101 /* Convert (A | B) & A to A. */
3102 if (GET_CODE (op0) == IOR
3103 && (rtx_equal_p (XEXP (op0, 0), op1)
3104 || rtx_equal_p (XEXP (op0, 1), op1))
3105 && ! side_effects_p (XEXP (op0, 0))
3106 && ! side_effects_p (XEXP (op0, 1)))
3107 return op1;
3108
3109 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3110 ((A & N) + B) & M -> (A + B) & M
3111 Similarly if (N & M) == 0,
3112 ((A | N) + B) & M -> (A + B) & M
3113 and for - instead of + and/or ^ instead of |.
3114 Also, if (N & M) == 0, then
3115 (A +- N) & M -> A & M. */
3116 if (CONST_INT_P (trueop1)
3117 && HWI_COMPUTABLE_MODE_P (mode)
3118 && ~UINTVAL (trueop1)
3119 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3120 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3121 {
3122 rtx pmop[2];
3123 int which;
3124
3125 pmop[0] = XEXP (op0, 0);
3126 pmop[1] = XEXP (op0, 1);
3127
3128 if (CONST_INT_P (pmop[1])
3129 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3130 return simplify_gen_binary (AND, mode, pmop[0], op1);
3131
3132 for (which = 0; which < 2; which++)
3133 {
3134 tem = pmop[which];
3135 switch (GET_CODE (tem))
3136 {
3137 case AND:
3138 if (CONST_INT_P (XEXP (tem, 1))
3139 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3140 == UINTVAL (trueop1))
3141 pmop[which] = XEXP (tem, 0);
3142 break;
3143 case IOR:
3144 case XOR:
3145 if (CONST_INT_P (XEXP (tem, 1))
3146 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3147 pmop[which] = XEXP (tem, 0);
3148 break;
3149 default:
3150 break;
3151 }
3152 }
3153
3154 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3155 {
3156 tem = simplify_gen_binary (GET_CODE (op0), mode,
3157 pmop[0], pmop[1]);
3158 return simplify_gen_binary (code, mode, tem, op1);
3159 }
3160 }
3161
3162 /* (and X (ior (not X) Y) -> (and X Y) */
3163 if (GET_CODE (op1) == IOR
3164 && GET_CODE (XEXP (op1, 0)) == NOT
3165 && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
3166 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3167
3168 /* (and (ior (not X) Y) X) -> (and X Y) */
3169 if (GET_CODE (op0) == IOR
3170 && GET_CODE (XEXP (op0, 0)) == NOT
3171 && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
3172 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3173
3174 /* (and X (ior Y (not X)) -> (and X Y) */
3175 if (GET_CODE (op1) == IOR
3176 && GET_CODE (XEXP (op1, 1)) == NOT
3177 && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3178 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3179
3180 /* (and (ior Y (not X)) X) -> (and X Y) */
3181 if (GET_CODE (op0) == IOR
3182 && GET_CODE (XEXP (op0, 1)) == NOT
3183 && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3184 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3185
3186 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3187 if (tem)
3188 return tem;
3189
3190 tem = simplify_associative_operation (code, mode, op0, op1);
3191 if (tem)
3192 return tem;
3193 break;
3194
3195 case UDIV:
3196 /* 0/x is 0 (or x&0 if x has side-effects). */
3197 if (trueop0 == CONST0_RTX (mode))
3198 {
3199 if (side_effects_p (op1))
3200 return simplify_gen_binary (AND, mode, op1, trueop0);
3201 return trueop0;
3202 }
3203 /* x/1 is x. */
3204 if (trueop1 == CONST1_RTX (mode))
3205 {
3206 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3207 if (tem)
3208 return tem;
3209 }
3210 /* Convert divide by power of two into shift. */
3211 if (CONST_INT_P (trueop1)
3212 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3213 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3214 break;
3215
3216 case DIV:
3217 /* Handle floating point and integers separately. */
3218 if (SCALAR_FLOAT_MODE_P (mode))
3219 {
3220 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3221 safe for modes with NaNs, since 0.0 / 0.0 will then be
3222 NaN rather than 0.0. Nor is it safe for modes with signed
3223 zeros, since dividing 0 by a negative number gives -0.0 */
3224 if (trueop0 == CONST0_RTX (mode)
3225 && !HONOR_NANS (mode)
3226 && !HONOR_SIGNED_ZEROS (mode)
3227 && ! side_effects_p (op1))
3228 return op0;
3229 /* x/1.0 is x. */
3230 if (trueop1 == CONST1_RTX (mode)
3231 && !HONOR_SNANS (mode))
3232 return op0;
3233
3234 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3235 && trueop1 != CONST0_RTX (mode))
3236 {
3237 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
3238
3239 /* x/-1.0 is -x. */
3240 if (real_equal (d1, &dconstm1)
3241 && !HONOR_SNANS (mode))
3242 return simplify_gen_unary (NEG, mode, op0, mode);
3243
3244 /* Change FP division by a constant into multiplication.
3245 Only do this with -freciprocal-math. */
3246 if (flag_reciprocal_math
3247 && !real_equal (d1, &dconst0))
3248 {
3249 REAL_VALUE_TYPE d;
3250 real_arithmetic (&d, RDIV_EXPR, &dconst1, d1);
3251 tem = const_double_from_real_value (d, mode);
3252 return simplify_gen_binary (MULT, mode, op0, tem);
3253 }
3254 }
3255 }
3256 else if (SCALAR_INT_MODE_P (mode))
3257 {
3258 /* 0/x is 0 (or x&0 if x has side-effects). */
3259 if (trueop0 == CONST0_RTX (mode)
3260 && !cfun->can_throw_non_call_exceptions)
3261 {
3262 if (side_effects_p (op1))
3263 return simplify_gen_binary (AND, mode, op1, trueop0);
3264 return trueop0;
3265 }
3266 /* x/1 is x. */
3267 if (trueop1 == CONST1_RTX (mode))
3268 {
3269 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3270 if (tem)
3271 return tem;
3272 }
3273 /* x/-1 is -x. */
3274 if (trueop1 == constm1_rtx)
3275 {
3276 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3277 if (x)
3278 return simplify_gen_unary (NEG, mode, x, mode);
3279 }
3280 }
3281 break;
3282
3283 case UMOD:
3284 /* 0%x is 0 (or x&0 if x has side-effects). */
3285 if (trueop0 == CONST0_RTX (mode))
3286 {
3287 if (side_effects_p (op1))
3288 return simplify_gen_binary (AND, mode, op1, trueop0);
3289 return trueop0;
3290 }
3291 /* x%1 is 0 (of x&0 if x has side-effects). */
3292 if (trueop1 == CONST1_RTX (mode))
3293 {
3294 if (side_effects_p (op0))
3295 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3296 return CONST0_RTX (mode);
3297 }
3298 /* Implement modulus by power of two as AND. */
3299 if (CONST_INT_P (trueop1)
3300 && exact_log2 (UINTVAL (trueop1)) > 0)
3301 return simplify_gen_binary (AND, mode, op0,
3302 gen_int_mode (INTVAL (op1) - 1, mode));
3303 break;
3304
3305 case MOD:
3306 /* 0%x is 0 (or x&0 if x has side-effects). */
3307 if (trueop0 == CONST0_RTX (mode))
3308 {
3309 if (side_effects_p (op1))
3310 return simplify_gen_binary (AND, mode, op1, trueop0);
3311 return trueop0;
3312 }
3313 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3314 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3315 {
3316 if (side_effects_p (op0))
3317 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3318 return CONST0_RTX (mode);
3319 }
3320 break;
3321
3322 case ROTATERT:
3323 case ROTATE:
3324 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3325 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3326 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3327 amount instead. */
3328 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3329 if (CONST_INT_P (trueop1)
3330 && IN_RANGE (INTVAL (trueop1),
3331 GET_MODE_PRECISION (mode) / 2 + (code == ROTATE),
3332 GET_MODE_PRECISION (mode) - 1))
3333 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3334 mode, op0, GEN_INT (GET_MODE_PRECISION (mode)
3335 - INTVAL (trueop1)));
3336 #endif
3337 /* FALLTHRU */
3338 case ASHIFTRT:
3339 if (trueop1 == CONST0_RTX (mode))
3340 return op0;
3341 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3342 return op0;
3343 /* Rotating ~0 always results in ~0. */
3344 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3345 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3346 && ! side_effects_p (op1))
3347 return op0;
3348
3349 canonicalize_shift:
3350 /* Given:
3351 scalar modes M1, M2
3352 scalar constants c1, c2
3353 size (M2) > size (M1)
3354 c1 == size (M2) - size (M1)
3355 optimize:
3356 ([a|l]shiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3357 <low_part>)
3358 (const_int <c2>))
3359 to:
3360 (subreg:M1 ([a|l]shiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3361 <low_part>). */
3362 if ((code == ASHIFTRT || code == LSHIFTRT)
3363 && !VECTOR_MODE_P (mode)
3364 && SUBREG_P (op0)
3365 && CONST_INT_P (op1)
3366 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3367 && !VECTOR_MODE_P (GET_MODE (SUBREG_REG (op0)))
3368 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3369 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3370 > GET_MODE_BITSIZE (mode))
3371 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3372 == (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3373 - GET_MODE_BITSIZE (mode)))
3374 && subreg_lowpart_p (op0))
3375 {
3376 rtx tmp = GEN_INT (INTVAL (XEXP (SUBREG_REG (op0), 1))
3377 + INTVAL (op1));
3378 machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
3379 tmp = simplify_gen_binary (code,
3380 GET_MODE (SUBREG_REG (op0)),
3381 XEXP (SUBREG_REG (op0), 0),
3382 tmp);
3383 return lowpart_subreg (mode, tmp, inner_mode);
3384 }
3385
3386 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3387 {
3388 val = INTVAL (op1) & (GET_MODE_PRECISION (mode) - 1);
3389 if (val != INTVAL (op1))
3390 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3391 }
3392 break;
3393
3394 case ASHIFT:
3395 case SS_ASHIFT:
3396 case US_ASHIFT:
3397 if (trueop1 == CONST0_RTX (mode))
3398 return op0;
3399 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3400 return op0;
3401 goto canonicalize_shift;
3402
3403 case LSHIFTRT:
3404 if (trueop1 == CONST0_RTX (mode))
3405 return op0;
3406 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3407 return op0;
3408 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3409 if (GET_CODE (op0) == CLZ
3410 && CONST_INT_P (trueop1)
3411 && STORE_FLAG_VALUE == 1
3412 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3413 {
3414 machine_mode imode = GET_MODE (XEXP (op0, 0));
3415 unsigned HOST_WIDE_INT zero_val = 0;
3416
3417 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3418 && zero_val == GET_MODE_PRECISION (imode)
3419 && INTVAL (trueop1) == exact_log2 (zero_val))
3420 return simplify_gen_relational (EQ, mode, imode,
3421 XEXP (op0, 0), const0_rtx);
3422 }
3423 goto canonicalize_shift;
3424
3425 case SMIN:
3426 if (width <= HOST_BITS_PER_WIDE_INT
3427 && mode_signbit_p (mode, trueop1)
3428 && ! side_effects_p (op0))
3429 return op1;
3430 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3431 return op0;
3432 tem = simplify_associative_operation (code, mode, op0, op1);
3433 if (tem)
3434 return tem;
3435 break;
3436
3437 case SMAX:
3438 if (width <= HOST_BITS_PER_WIDE_INT
3439 && CONST_INT_P (trueop1)
3440 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3441 && ! side_effects_p (op0))
3442 return op1;
3443 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3444 return op0;
3445 tem = simplify_associative_operation (code, mode, op0, op1);
3446 if (tem)
3447 return tem;
3448 break;
3449
3450 case UMIN:
3451 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3452 return op1;
3453 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3454 return op0;
3455 tem = simplify_associative_operation (code, mode, op0, op1);
3456 if (tem)
3457 return tem;
3458 break;
3459
3460 case UMAX:
3461 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3462 return op1;
3463 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3464 return op0;
3465 tem = simplify_associative_operation (code, mode, op0, op1);
3466 if (tem)
3467 return tem;
3468 break;
3469
3470 case SS_PLUS:
3471 case US_PLUS:
3472 case SS_MINUS:
3473 case US_MINUS:
3474 case SS_MULT:
3475 case US_MULT:
3476 case SS_DIV:
3477 case US_DIV:
3478 /* ??? There are simplifications that can be done. */
3479 return 0;
3480
3481 case VEC_SELECT:
3482 if (!VECTOR_MODE_P (mode))
3483 {
3484 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3485 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3486 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3487 gcc_assert (XVECLEN (trueop1, 0) == 1);
3488 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3489
3490 if (GET_CODE (trueop0) == CONST_VECTOR)
3491 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3492 (trueop1, 0, 0)));
3493
3494 /* Extract a scalar element from a nested VEC_SELECT expression
3495 (with optional nested VEC_CONCAT expression). Some targets
3496 (i386) extract scalar element from a vector using chain of
3497 nested VEC_SELECT expressions. When input operand is a memory
3498 operand, this operation can be simplified to a simple scalar
3499 load from an offseted memory address. */
3500 if (GET_CODE (trueop0) == VEC_SELECT)
3501 {
3502 rtx op0 = XEXP (trueop0, 0);
3503 rtx op1 = XEXP (trueop0, 1);
3504
3505 machine_mode opmode = GET_MODE (op0);
3506 int elt_size = GET_MODE_UNIT_SIZE (opmode);
3507 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3508
3509 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3510 int elem;
3511
3512 rtvec vec;
3513 rtx tmp_op, tmp;
3514
3515 gcc_assert (GET_CODE (op1) == PARALLEL);
3516 gcc_assert (i < n_elts);
3517
3518 /* Select element, pointed by nested selector. */
3519 elem = INTVAL (XVECEXP (op1, 0, i));
3520
3521 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3522 if (GET_CODE (op0) == VEC_CONCAT)
3523 {
3524 rtx op00 = XEXP (op0, 0);
3525 rtx op01 = XEXP (op0, 1);
3526
3527 machine_mode mode00, mode01;
3528 int n_elts00, n_elts01;
3529
3530 mode00 = GET_MODE (op00);
3531 mode01 = GET_MODE (op01);
3532
3533 /* Find out number of elements of each operand. */
3534 if (VECTOR_MODE_P (mode00))
3535 {
3536 elt_size = GET_MODE_UNIT_SIZE (mode00);
3537 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3538 }
3539 else
3540 n_elts00 = 1;
3541
3542 if (VECTOR_MODE_P (mode01))
3543 {
3544 elt_size = GET_MODE_UNIT_SIZE (mode01);
3545 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3546 }
3547 else
3548 n_elts01 = 1;
3549
3550 gcc_assert (n_elts == n_elts00 + n_elts01);
3551
3552 /* Select correct operand of VEC_CONCAT
3553 and adjust selector. */
3554 if (elem < n_elts01)
3555 tmp_op = op00;
3556 else
3557 {
3558 tmp_op = op01;
3559 elem -= n_elts00;
3560 }
3561 }
3562 else
3563 tmp_op = op0;
3564
3565 vec = rtvec_alloc (1);
3566 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3567
3568 tmp = gen_rtx_fmt_ee (code, mode,
3569 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3570 return tmp;
3571 }
3572 if (GET_CODE (trueop0) == VEC_DUPLICATE
3573 && GET_MODE (XEXP (trueop0, 0)) == mode)
3574 return XEXP (trueop0, 0);
3575 }
3576 else
3577 {
3578 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3579 gcc_assert (GET_MODE_INNER (mode)
3580 == GET_MODE_INNER (GET_MODE (trueop0)));
3581 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3582
3583 if (GET_CODE (trueop0) == CONST_VECTOR)
3584 {
3585 int elt_size = GET_MODE_UNIT_SIZE (mode);
3586 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3587 rtvec v = rtvec_alloc (n_elts);
3588 unsigned int i;
3589
3590 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3591 for (i = 0; i < n_elts; i++)
3592 {
3593 rtx x = XVECEXP (trueop1, 0, i);
3594
3595 gcc_assert (CONST_INT_P (x));
3596 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3597 INTVAL (x));
3598 }
3599
3600 return gen_rtx_CONST_VECTOR (mode, v);
3601 }
3602
3603 /* Recognize the identity. */
3604 if (GET_MODE (trueop0) == mode)
3605 {
3606 bool maybe_ident = true;
3607 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3608 {
3609 rtx j = XVECEXP (trueop1, 0, i);
3610 if (!CONST_INT_P (j) || INTVAL (j) != i)
3611 {
3612 maybe_ident = false;
3613 break;
3614 }
3615 }
3616 if (maybe_ident)
3617 return trueop0;
3618 }
3619
3620 /* If we build {a,b} then permute it, build the result directly. */
3621 if (XVECLEN (trueop1, 0) == 2
3622 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3623 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3624 && GET_CODE (trueop0) == VEC_CONCAT
3625 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3626 && GET_MODE (XEXP (trueop0, 0)) == mode
3627 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3628 && GET_MODE (XEXP (trueop0, 1)) == mode)
3629 {
3630 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3631 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3632 rtx subop0, subop1;
3633
3634 gcc_assert (i0 < 4 && i1 < 4);
3635 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3636 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3637
3638 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3639 }
3640
3641 if (XVECLEN (trueop1, 0) == 2
3642 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3643 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3644 && GET_CODE (trueop0) == VEC_CONCAT
3645 && GET_MODE (trueop0) == mode)
3646 {
3647 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3648 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3649 rtx subop0, subop1;
3650
3651 gcc_assert (i0 < 2 && i1 < 2);
3652 subop0 = XEXP (trueop0, i0);
3653 subop1 = XEXP (trueop0, i1);
3654
3655 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3656 }
3657
3658 /* If we select one half of a vec_concat, return that. */
3659 if (GET_CODE (trueop0) == VEC_CONCAT
3660 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3661 {
3662 rtx subop0 = XEXP (trueop0, 0);
3663 rtx subop1 = XEXP (trueop0, 1);
3664 machine_mode mode0 = GET_MODE (subop0);
3665 machine_mode mode1 = GET_MODE (subop1);
3666 int li = GET_MODE_UNIT_SIZE (mode0);
3667 int l0 = GET_MODE_SIZE (mode0) / li;
3668 int l1 = GET_MODE_SIZE (mode1) / li;
3669 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3670 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3671 {
3672 bool success = true;
3673 for (int i = 1; i < l0; ++i)
3674 {
3675 rtx j = XVECEXP (trueop1, 0, i);
3676 if (!CONST_INT_P (j) || INTVAL (j) != i)
3677 {
3678 success = false;
3679 break;
3680 }
3681 }
3682 if (success)
3683 return subop0;
3684 }
3685 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3686 {
3687 bool success = true;
3688 for (int i = 1; i < l1; ++i)
3689 {
3690 rtx j = XVECEXP (trueop1, 0, i);
3691 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3692 {
3693 success = false;
3694 break;
3695 }
3696 }
3697 if (success)
3698 return subop1;
3699 }
3700 }
3701 }
3702
3703 if (XVECLEN (trueop1, 0) == 1
3704 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3705 && GET_CODE (trueop0) == VEC_CONCAT)
3706 {
3707 rtx vec = trueop0;
3708 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3709
3710 /* Try to find the element in the VEC_CONCAT. */
3711 while (GET_MODE (vec) != mode
3712 && GET_CODE (vec) == VEC_CONCAT)
3713 {
3714 HOST_WIDE_INT vec_size;
3715
3716 if (CONST_INT_P (XEXP (vec, 0)))
3717 {
3718 /* vec_concat of two const_ints doesn't make sense with
3719 respect to modes. */
3720 if (CONST_INT_P (XEXP (vec, 1)))
3721 return 0;
3722
3723 vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
3724 - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
3725 }
3726 else
3727 vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3728
3729 if (offset < vec_size)
3730 vec = XEXP (vec, 0);
3731 else
3732 {
3733 offset -= vec_size;
3734 vec = XEXP (vec, 1);
3735 }
3736 vec = avoid_constant_pool_reference (vec);
3737 }
3738
3739 if (GET_MODE (vec) == mode)
3740 return vec;
3741 }
3742
3743 /* If we select elements in a vec_merge that all come from the same
3744 operand, select from that operand directly. */
3745 if (GET_CODE (op0) == VEC_MERGE)
3746 {
3747 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3748 if (CONST_INT_P (trueop02))
3749 {
3750 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3751 bool all_operand0 = true;
3752 bool all_operand1 = true;
3753 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3754 {
3755 rtx j = XVECEXP (trueop1, 0, i);
3756 if (sel & (HOST_WIDE_INT_1U << UINTVAL (j)))
3757 all_operand1 = false;
3758 else
3759 all_operand0 = false;
3760 }
3761 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3762 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3763 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3764 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3765 }
3766 }
3767
3768 /* If we have two nested selects that are inverses of each
3769 other, replace them with the source operand. */
3770 if (GET_CODE (trueop0) == VEC_SELECT
3771 && GET_MODE (XEXP (trueop0, 0)) == mode)
3772 {
3773 rtx op0_subop1 = XEXP (trueop0, 1);
3774 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3775 gcc_assert (XVECLEN (trueop1, 0) == GET_MODE_NUNITS (mode));
3776
3777 /* Apply the outer ordering vector to the inner one. (The inner
3778 ordering vector is expressly permitted to be of a different
3779 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3780 then the two VEC_SELECTs cancel. */
3781 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3782 {
3783 rtx x = XVECEXP (trueop1, 0, i);
3784 if (!CONST_INT_P (x))
3785 return 0;
3786 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3787 if (!CONST_INT_P (y) || i != INTVAL (y))
3788 return 0;
3789 }
3790 return XEXP (trueop0, 0);
3791 }
3792
3793 return 0;
3794 case VEC_CONCAT:
3795 {
3796 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3797 ? GET_MODE (trueop0)
3798 : GET_MODE_INNER (mode));
3799 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3800 ? GET_MODE (trueop1)
3801 : GET_MODE_INNER (mode));
3802
3803 gcc_assert (VECTOR_MODE_P (mode));
3804 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3805 == GET_MODE_SIZE (mode));
3806
3807 if (VECTOR_MODE_P (op0_mode))
3808 gcc_assert (GET_MODE_INNER (mode)
3809 == GET_MODE_INNER (op0_mode));
3810 else
3811 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3812
3813 if (VECTOR_MODE_P (op1_mode))
3814 gcc_assert (GET_MODE_INNER (mode)
3815 == GET_MODE_INNER (op1_mode));
3816 else
3817 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3818
3819 if ((GET_CODE (trueop0) == CONST_VECTOR
3820 || CONST_SCALAR_INT_P (trueop0)
3821 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3822 && (GET_CODE (trueop1) == CONST_VECTOR
3823 || CONST_SCALAR_INT_P (trueop1)
3824 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3825 {
3826 int elt_size = GET_MODE_UNIT_SIZE (mode);
3827 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3828 rtvec v = rtvec_alloc (n_elts);
3829 unsigned int i;
3830 unsigned in_n_elts = 1;
3831
3832 if (VECTOR_MODE_P (op0_mode))
3833 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3834 for (i = 0; i < n_elts; i++)
3835 {
3836 if (i < in_n_elts)
3837 {
3838 if (!VECTOR_MODE_P (op0_mode))
3839 RTVEC_ELT (v, i) = trueop0;
3840 else
3841 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3842 }
3843 else
3844 {
3845 if (!VECTOR_MODE_P (op1_mode))
3846 RTVEC_ELT (v, i) = trueop1;
3847 else
3848 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3849 i - in_n_elts);
3850 }
3851 }
3852
3853 return gen_rtx_CONST_VECTOR (mode, v);
3854 }
3855
3856 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3857 Restrict the transformation to avoid generating a VEC_SELECT with a
3858 mode unrelated to its operand. */
3859 if (GET_CODE (trueop0) == VEC_SELECT
3860 && GET_CODE (trueop1) == VEC_SELECT
3861 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3862 && GET_MODE (XEXP (trueop0, 0)) == mode)
3863 {
3864 rtx par0 = XEXP (trueop0, 1);
3865 rtx par1 = XEXP (trueop1, 1);
3866 int len0 = XVECLEN (par0, 0);
3867 int len1 = XVECLEN (par1, 0);
3868 rtvec vec = rtvec_alloc (len0 + len1);
3869 for (int i = 0; i < len0; i++)
3870 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3871 for (int i = 0; i < len1; i++)
3872 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3873 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3874 gen_rtx_PARALLEL (VOIDmode, vec));
3875 }
3876 }
3877 return 0;
3878
3879 default:
3880 gcc_unreachable ();
3881 }
3882
3883 return 0;
3884 }
3885
3886 rtx
3887 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
3888 rtx op0, rtx op1)
3889 {
3890 unsigned int width = GET_MODE_PRECISION (mode);
3891
3892 if (VECTOR_MODE_P (mode)
3893 && code != VEC_CONCAT
3894 && GET_CODE (op0) == CONST_VECTOR
3895 && GET_CODE (op1) == CONST_VECTOR)
3896 {
3897 unsigned n_elts = GET_MODE_NUNITS (mode);
3898 machine_mode op0mode = GET_MODE (op0);
3899 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3900 machine_mode op1mode = GET_MODE (op1);
3901 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3902 rtvec v = rtvec_alloc (n_elts);
3903 unsigned int i;
3904
3905 gcc_assert (op0_n_elts == n_elts);
3906 gcc_assert (op1_n_elts == n_elts);
3907 for (i = 0; i < n_elts; i++)
3908 {
3909 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3910 CONST_VECTOR_ELT (op0, i),
3911 CONST_VECTOR_ELT (op1, i));
3912 if (!x)
3913 return 0;
3914 RTVEC_ELT (v, i) = x;
3915 }
3916
3917 return gen_rtx_CONST_VECTOR (mode, v);
3918 }
3919
3920 if (VECTOR_MODE_P (mode)
3921 && code == VEC_CONCAT
3922 && (CONST_SCALAR_INT_P (op0)
3923 || GET_CODE (op0) == CONST_FIXED
3924 || CONST_DOUBLE_AS_FLOAT_P (op0))
3925 && (CONST_SCALAR_INT_P (op1)
3926 || CONST_DOUBLE_AS_FLOAT_P (op1)
3927 || GET_CODE (op1) == CONST_FIXED))
3928 {
3929 unsigned n_elts = GET_MODE_NUNITS (mode);
3930 rtvec v = rtvec_alloc (n_elts);
3931
3932 gcc_assert (n_elts >= 2);
3933 if (n_elts == 2)
3934 {
3935 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3936 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3937
3938 RTVEC_ELT (v, 0) = op0;
3939 RTVEC_ELT (v, 1) = op1;
3940 }
3941 else
3942 {
3943 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3944 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3945 unsigned i;
3946
3947 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3948 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3949 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3950
3951 for (i = 0; i < op0_n_elts; ++i)
3952 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3953 for (i = 0; i < op1_n_elts; ++i)
3954 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3955 }
3956
3957 return gen_rtx_CONST_VECTOR (mode, v);
3958 }
3959
3960 if (SCALAR_FLOAT_MODE_P (mode)
3961 && CONST_DOUBLE_AS_FLOAT_P (op0)
3962 && CONST_DOUBLE_AS_FLOAT_P (op1)
3963 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3964 {
3965 if (code == AND
3966 || code == IOR
3967 || code == XOR)
3968 {
3969 long tmp0[4];
3970 long tmp1[4];
3971 REAL_VALUE_TYPE r;
3972 int i;
3973
3974 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3975 GET_MODE (op0));
3976 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3977 GET_MODE (op1));
3978 for (i = 0; i < 4; i++)
3979 {
3980 switch (code)
3981 {
3982 case AND:
3983 tmp0[i] &= tmp1[i];
3984 break;
3985 case IOR:
3986 tmp0[i] |= tmp1[i];
3987 break;
3988 case XOR:
3989 tmp0[i] ^= tmp1[i];
3990 break;
3991 default:
3992 gcc_unreachable ();
3993 }
3994 }
3995 real_from_target (&r, tmp0, mode);
3996 return const_double_from_real_value (r, mode);
3997 }
3998 else
3999 {
4000 REAL_VALUE_TYPE f0, f1, value, result;
4001 const REAL_VALUE_TYPE *opr0, *opr1;
4002 bool inexact;
4003
4004 opr0 = CONST_DOUBLE_REAL_VALUE (op0);
4005 opr1 = CONST_DOUBLE_REAL_VALUE (op1);
4006
4007 if (HONOR_SNANS (mode)
4008 && (REAL_VALUE_ISSIGNALING_NAN (*opr0)
4009 || REAL_VALUE_ISSIGNALING_NAN (*opr1)))
4010 return 0;
4011
4012 real_convert (&f0, mode, opr0);
4013 real_convert (&f1, mode, opr1);
4014
4015 if (code == DIV
4016 && real_equal (&f1, &dconst0)
4017 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
4018 return 0;
4019
4020 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4021 && flag_trapping_math
4022 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
4023 {
4024 int s0 = REAL_VALUE_NEGATIVE (f0);
4025 int s1 = REAL_VALUE_NEGATIVE (f1);
4026
4027 switch (code)
4028 {
4029 case PLUS:
4030 /* Inf + -Inf = NaN plus exception. */
4031 if (s0 != s1)
4032 return 0;
4033 break;
4034 case MINUS:
4035 /* Inf - Inf = NaN plus exception. */
4036 if (s0 == s1)
4037 return 0;
4038 break;
4039 case DIV:
4040 /* Inf / Inf = NaN plus exception. */
4041 return 0;
4042 default:
4043 break;
4044 }
4045 }
4046
4047 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4048 && flag_trapping_math
4049 && ((REAL_VALUE_ISINF (f0) && real_equal (&f1, &dconst0))
4050 || (REAL_VALUE_ISINF (f1)
4051 && real_equal (&f0, &dconst0))))
4052 /* Inf * 0 = NaN plus exception. */
4053 return 0;
4054
4055 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
4056 &f0, &f1);
4057 real_convert (&result, mode, &value);
4058
4059 /* Don't constant fold this floating point operation if
4060 the result has overflowed and flag_trapping_math. */
4061
4062 if (flag_trapping_math
4063 && MODE_HAS_INFINITIES (mode)
4064 && REAL_VALUE_ISINF (result)
4065 && !REAL_VALUE_ISINF (f0)
4066 && !REAL_VALUE_ISINF (f1))
4067 /* Overflow plus exception. */
4068 return 0;
4069
4070 /* Don't constant fold this floating point operation if the
4071 result may dependent upon the run-time rounding mode and
4072 flag_rounding_math is set, or if GCC's software emulation
4073 is unable to accurately represent the result. */
4074
4075 if ((flag_rounding_math
4076 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
4077 && (inexact || !real_identical (&result, &value)))
4078 return NULL_RTX;
4079
4080 return const_double_from_real_value (result, mode);
4081 }
4082 }
4083
4084 /* We can fold some multi-word operations. */
4085 if ((GET_MODE_CLASS (mode) == MODE_INT
4086 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
4087 && CONST_SCALAR_INT_P (op0)
4088 && CONST_SCALAR_INT_P (op1))
4089 {
4090 wide_int result;
4091 bool overflow;
4092 rtx_mode_t pop0 = rtx_mode_t (op0, mode);
4093 rtx_mode_t pop1 = rtx_mode_t (op1, mode);
4094
4095 #if TARGET_SUPPORTS_WIDE_INT == 0
4096 /* This assert keeps the simplification from producing a result
4097 that cannot be represented in a CONST_DOUBLE but a lot of
4098 upstream callers expect that this function never fails to
4099 simplify something and so you if you added this to the test
4100 above the code would die later anyway. If this assert
4101 happens, you just need to make the port support wide int. */
4102 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
4103 #endif
4104 switch (code)
4105 {
4106 case MINUS:
4107 result = wi::sub (pop0, pop1);
4108 break;
4109
4110 case PLUS:
4111 result = wi::add (pop0, pop1);
4112 break;
4113
4114 case MULT:
4115 result = wi::mul (pop0, pop1);
4116 break;
4117
4118 case DIV:
4119 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
4120 if (overflow)
4121 return NULL_RTX;
4122 break;
4123
4124 case MOD:
4125 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
4126 if (overflow)
4127 return NULL_RTX;
4128 break;
4129
4130 case UDIV:
4131 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
4132 if (overflow)
4133 return NULL_RTX;
4134 break;
4135
4136 case UMOD:
4137 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
4138 if (overflow)
4139 return NULL_RTX;
4140 break;
4141
4142 case AND:
4143 result = wi::bit_and (pop0, pop1);
4144 break;
4145
4146 case IOR:
4147 result = wi::bit_or (pop0, pop1);
4148 break;
4149
4150 case XOR:
4151 result = wi::bit_xor (pop0, pop1);
4152 break;
4153
4154 case SMIN:
4155 result = wi::smin (pop0, pop1);
4156 break;
4157
4158 case SMAX:
4159 result = wi::smax (pop0, pop1);
4160 break;
4161
4162 case UMIN:
4163 result = wi::umin (pop0, pop1);
4164 break;
4165
4166 case UMAX:
4167 result = wi::umax (pop0, pop1);
4168 break;
4169
4170 case LSHIFTRT:
4171 case ASHIFTRT:
4172 case ASHIFT:
4173 {
4174 wide_int wop1 = pop1;
4175 if (SHIFT_COUNT_TRUNCATED)
4176 wop1 = wi::umod_trunc (wop1, width);
4177 else if (wi::geu_p (wop1, width))
4178 return NULL_RTX;
4179
4180 switch (code)
4181 {
4182 case LSHIFTRT:
4183 result = wi::lrshift (pop0, wop1);
4184 break;
4185
4186 case ASHIFTRT:
4187 result = wi::arshift (pop0, wop1);
4188 break;
4189
4190 case ASHIFT:
4191 result = wi::lshift (pop0, wop1);
4192 break;
4193
4194 default:
4195 gcc_unreachable ();
4196 }
4197 break;
4198 }
4199 case ROTATE:
4200 case ROTATERT:
4201 {
4202 if (wi::neg_p (pop1))
4203 return NULL_RTX;
4204
4205 switch (code)
4206 {
4207 case ROTATE:
4208 result = wi::lrotate (pop0, pop1);
4209 break;
4210
4211 case ROTATERT:
4212 result = wi::rrotate (pop0, pop1);
4213 break;
4214
4215 default:
4216 gcc_unreachable ();
4217 }
4218 break;
4219 }
4220 default:
4221 return NULL_RTX;
4222 }
4223 return immed_wide_int_const (result, mode);
4224 }
4225
4226 return NULL_RTX;
4227 }
4228
4229
4230 \f
4231 /* Return a positive integer if X should sort after Y. The value
4232 returned is 1 if and only if X and Y are both regs. */
4233
4234 static int
4235 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4236 {
4237 int result;
4238
4239 result = (commutative_operand_precedence (y)
4240 - commutative_operand_precedence (x));
4241 if (result)
4242 return result + result;
4243
4244 /* Group together equal REGs to do more simplification. */
4245 if (REG_P (x) && REG_P (y))
4246 return REGNO (x) > REGNO (y);
4247
4248 return 0;
4249 }
4250
4251 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4252 operands may be another PLUS or MINUS.
4253
4254 Rather than test for specific case, we do this by a brute-force method
4255 and do all possible simplifications until no more changes occur. Then
4256 we rebuild the operation.
4257
4258 May return NULL_RTX when no changes were made. */
4259
4260 static rtx
4261 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
4262 rtx op1)
4263 {
4264 struct simplify_plus_minus_op_data
4265 {
4266 rtx op;
4267 short neg;
4268 } ops[16];
4269 rtx result, tem;
4270 int n_ops = 2;
4271 int changed, n_constants, canonicalized = 0;
4272 int i, j;
4273
4274 memset (ops, 0, sizeof ops);
4275
4276 /* Set up the two operands and then expand them until nothing has been
4277 changed. If we run out of room in our array, give up; this should
4278 almost never happen. */
4279
4280 ops[0].op = op0;
4281 ops[0].neg = 0;
4282 ops[1].op = op1;
4283 ops[1].neg = (code == MINUS);
4284
4285 do
4286 {
4287 changed = 0;
4288 n_constants = 0;
4289
4290 for (i = 0; i < n_ops; i++)
4291 {
4292 rtx this_op = ops[i].op;
4293 int this_neg = ops[i].neg;
4294 enum rtx_code this_code = GET_CODE (this_op);
4295
4296 switch (this_code)
4297 {
4298 case PLUS:
4299 case MINUS:
4300 if (n_ops == ARRAY_SIZE (ops))
4301 return NULL_RTX;
4302
4303 ops[n_ops].op = XEXP (this_op, 1);
4304 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4305 n_ops++;
4306
4307 ops[i].op = XEXP (this_op, 0);
4308 changed = 1;
4309 /* If this operand was negated then we will potentially
4310 canonicalize the expression. Similarly if we don't
4311 place the operands adjacent we're re-ordering the
4312 expression and thus might be performing a
4313 canonicalization. Ignore register re-ordering.
4314 ??? It might be better to shuffle the ops array here,
4315 but then (plus (plus (A, B), plus (C, D))) wouldn't
4316 be seen as non-canonical. */
4317 if (this_neg
4318 || (i != n_ops - 2
4319 && !(REG_P (ops[i].op) && REG_P (ops[n_ops - 1].op))))
4320 canonicalized = 1;
4321 break;
4322
4323 case NEG:
4324 ops[i].op = XEXP (this_op, 0);
4325 ops[i].neg = ! this_neg;
4326 changed = 1;
4327 canonicalized = 1;
4328 break;
4329
4330 case CONST:
4331 if (n_ops != ARRAY_SIZE (ops)
4332 && GET_CODE (XEXP (this_op, 0)) == PLUS
4333 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4334 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4335 {
4336 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4337 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4338 ops[n_ops].neg = this_neg;
4339 n_ops++;
4340 changed = 1;
4341 canonicalized = 1;
4342 }
4343 break;
4344
4345 case NOT:
4346 /* ~a -> (-a - 1) */
4347 if (n_ops != ARRAY_SIZE (ops))
4348 {
4349 ops[n_ops].op = CONSTM1_RTX (mode);
4350 ops[n_ops++].neg = this_neg;
4351 ops[i].op = XEXP (this_op, 0);
4352 ops[i].neg = !this_neg;
4353 changed = 1;
4354 canonicalized = 1;
4355 }
4356 break;
4357
4358 case CONST_INT:
4359 n_constants++;
4360 if (this_neg)
4361 {
4362 ops[i].op = neg_const_int (mode, this_op);
4363 ops[i].neg = 0;
4364 changed = 1;
4365 canonicalized = 1;
4366 }
4367 break;
4368
4369 default:
4370 break;
4371 }
4372 }
4373 }
4374 while (changed);
4375
4376 if (n_constants > 1)
4377 canonicalized = 1;
4378
4379 gcc_assert (n_ops >= 2);
4380
4381 /* If we only have two operands, we can avoid the loops. */
4382 if (n_ops == 2)
4383 {
4384 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4385 rtx lhs, rhs;
4386
4387 /* Get the two operands. Be careful with the order, especially for
4388 the cases where code == MINUS. */
4389 if (ops[0].neg && ops[1].neg)
4390 {
4391 lhs = gen_rtx_NEG (mode, ops[0].op);
4392 rhs = ops[1].op;
4393 }
4394 else if (ops[0].neg)
4395 {
4396 lhs = ops[1].op;
4397 rhs = ops[0].op;
4398 }
4399 else
4400 {
4401 lhs = ops[0].op;
4402 rhs = ops[1].op;
4403 }
4404
4405 return simplify_const_binary_operation (code, mode, lhs, rhs);
4406 }
4407
4408 /* Now simplify each pair of operands until nothing changes. */
4409 while (1)
4410 {
4411 /* Insertion sort is good enough for a small array. */
4412 for (i = 1; i < n_ops; i++)
4413 {
4414 struct simplify_plus_minus_op_data save;
4415 int cmp;
4416
4417 j = i - 1;
4418 cmp = simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op);
4419 if (cmp <= 0)
4420 continue;
4421 /* Just swapping registers doesn't count as canonicalization. */
4422 if (cmp != 1)
4423 canonicalized = 1;
4424
4425 save = ops[i];
4426 do
4427 ops[j + 1] = ops[j];
4428 while (j--
4429 && simplify_plus_minus_op_data_cmp (ops[j].op, save.op) > 0);
4430 ops[j + 1] = save;
4431 }
4432
4433 changed = 0;
4434 for (i = n_ops - 1; i > 0; i--)
4435 for (j = i - 1; j >= 0; j--)
4436 {
4437 rtx lhs = ops[j].op, rhs = ops[i].op;
4438 int lneg = ops[j].neg, rneg = ops[i].neg;
4439
4440 if (lhs != 0 && rhs != 0)
4441 {
4442 enum rtx_code ncode = PLUS;
4443
4444 if (lneg != rneg)
4445 {
4446 ncode = MINUS;
4447 if (lneg)
4448 std::swap (lhs, rhs);
4449 }
4450 else if (swap_commutative_operands_p (lhs, rhs))
4451 std::swap (lhs, rhs);
4452
4453 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4454 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4455 {
4456 rtx tem_lhs, tem_rhs;
4457
4458 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4459 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4460 tem = simplify_binary_operation (ncode, mode, tem_lhs,
4461 tem_rhs);
4462
4463 if (tem && !CONSTANT_P (tem))
4464 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4465 }
4466 else
4467 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4468
4469 if (tem)
4470 {
4471 /* Reject "simplifications" that just wrap the two
4472 arguments in a CONST. Failure to do so can result
4473 in infinite recursion with simplify_binary_operation
4474 when it calls us to simplify CONST operations.
4475 Also, if we find such a simplification, don't try
4476 any more combinations with this rhs: We must have
4477 something like symbol+offset, ie. one of the
4478 trivial CONST expressions we handle later. */
4479 if (GET_CODE (tem) == CONST
4480 && GET_CODE (XEXP (tem, 0)) == ncode
4481 && XEXP (XEXP (tem, 0), 0) == lhs
4482 && XEXP (XEXP (tem, 0), 1) == rhs)
4483 break;
4484 lneg &= rneg;
4485 if (GET_CODE (tem) == NEG)
4486 tem = XEXP (tem, 0), lneg = !lneg;
4487 if (CONST_INT_P (tem) && lneg)
4488 tem = neg_const_int (mode, tem), lneg = 0;
4489
4490 ops[i].op = tem;
4491 ops[i].neg = lneg;
4492 ops[j].op = NULL_RTX;
4493 changed = 1;
4494 canonicalized = 1;
4495 }
4496 }
4497 }
4498
4499 if (!changed)
4500 break;
4501
4502 /* Pack all the operands to the lower-numbered entries. */
4503 for (i = 0, j = 0; j < n_ops; j++)
4504 if (ops[j].op)
4505 {
4506 ops[i] = ops[j];
4507 i++;
4508 }
4509 n_ops = i;
4510 }
4511
4512 /* If nothing changed, check that rematerialization of rtl instructions
4513 is still required. */
4514 if (!canonicalized)
4515 {
4516 /* Perform rematerialization if only all operands are registers and
4517 all operations are PLUS. */
4518 /* ??? Also disallow (non-global, non-frame) fixed registers to work
4519 around rs6000 and how it uses the CA register. See PR67145. */
4520 for (i = 0; i < n_ops; i++)
4521 if (ops[i].neg
4522 || !REG_P (ops[i].op)
4523 || (REGNO (ops[i].op) < FIRST_PSEUDO_REGISTER
4524 && fixed_regs[REGNO (ops[i].op)]
4525 && !global_regs[REGNO (ops[i].op)]
4526 && ops[i].op != frame_pointer_rtx
4527 && ops[i].op != arg_pointer_rtx
4528 && ops[i].op != stack_pointer_rtx))
4529 return NULL_RTX;
4530 goto gen_result;
4531 }
4532
4533 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4534 if (n_ops == 2
4535 && CONST_INT_P (ops[1].op)
4536 && CONSTANT_P (ops[0].op)
4537 && ops[0].neg)
4538 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4539
4540 /* We suppressed creation of trivial CONST expressions in the
4541 combination loop to avoid recursion. Create one manually now.
4542 The combination loop should have ensured that there is exactly
4543 one CONST_INT, and the sort will have ensured that it is last
4544 in the array and that any other constant will be next-to-last. */
4545
4546 if (n_ops > 1
4547 && CONST_INT_P (ops[n_ops - 1].op)
4548 && CONSTANT_P (ops[n_ops - 2].op))
4549 {
4550 rtx value = ops[n_ops - 1].op;
4551 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4552 value = neg_const_int (mode, value);
4553 if (CONST_INT_P (value))
4554 {
4555 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4556 INTVAL (value));
4557 n_ops--;
4558 }
4559 }
4560
4561 /* Put a non-negated operand first, if possible. */
4562
4563 for (i = 0; i < n_ops && ops[i].neg; i++)
4564 continue;
4565 if (i == n_ops)
4566 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4567 else if (i != 0)
4568 {
4569 tem = ops[0].op;
4570 ops[0] = ops[i];
4571 ops[i].op = tem;
4572 ops[i].neg = 1;
4573 }
4574
4575 /* Now make the result by performing the requested operations. */
4576 gen_result:
4577 result = ops[0].op;
4578 for (i = 1; i < n_ops; i++)
4579 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4580 mode, result, ops[i].op);
4581
4582 return result;
4583 }
4584
4585 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4586 static bool
4587 plus_minus_operand_p (const_rtx x)
4588 {
4589 return GET_CODE (x) == PLUS
4590 || GET_CODE (x) == MINUS
4591 || (GET_CODE (x) == CONST
4592 && GET_CODE (XEXP (x, 0)) == PLUS
4593 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4594 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4595 }
4596
4597 /* Like simplify_binary_operation except used for relational operators.
4598 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4599 not also be VOIDmode.
4600
4601 CMP_MODE specifies in which mode the comparison is done in, so it is
4602 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4603 the operands or, if both are VOIDmode, the operands are compared in
4604 "infinite precision". */
4605 rtx
4606 simplify_relational_operation (enum rtx_code code, machine_mode mode,
4607 machine_mode cmp_mode, rtx op0, rtx op1)
4608 {
4609 rtx tem, trueop0, trueop1;
4610
4611 if (cmp_mode == VOIDmode)
4612 cmp_mode = GET_MODE (op0);
4613 if (cmp_mode == VOIDmode)
4614 cmp_mode = GET_MODE (op1);
4615
4616 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4617 if (tem)
4618 {
4619 if (SCALAR_FLOAT_MODE_P (mode))
4620 {
4621 if (tem == const0_rtx)
4622 return CONST0_RTX (mode);
4623 #ifdef FLOAT_STORE_FLAG_VALUE
4624 {
4625 REAL_VALUE_TYPE val;
4626 val = FLOAT_STORE_FLAG_VALUE (mode);
4627 return const_double_from_real_value (val, mode);
4628 }
4629 #else
4630 return NULL_RTX;
4631 #endif
4632 }
4633 if (VECTOR_MODE_P (mode))
4634 {
4635 if (tem == const0_rtx)
4636 return CONST0_RTX (mode);
4637 #ifdef VECTOR_STORE_FLAG_VALUE
4638 {
4639 int i, units;
4640 rtvec v;
4641
4642 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4643 if (val == NULL_RTX)
4644 return NULL_RTX;
4645 if (val == const1_rtx)
4646 return CONST1_RTX (mode);
4647
4648 units = GET_MODE_NUNITS (mode);
4649 v = rtvec_alloc (units);
4650 for (i = 0; i < units; i++)
4651 RTVEC_ELT (v, i) = val;
4652 return gen_rtx_raw_CONST_VECTOR (mode, v);
4653 }
4654 #else
4655 return NULL_RTX;
4656 #endif
4657 }
4658
4659 return tem;
4660 }
4661
4662 /* For the following tests, ensure const0_rtx is op1. */
4663 if (swap_commutative_operands_p (op0, op1)
4664 || (op0 == const0_rtx && op1 != const0_rtx))
4665 std::swap (op0, op1), code = swap_condition (code);
4666
4667 /* If op0 is a compare, extract the comparison arguments from it. */
4668 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4669 return simplify_gen_relational (code, mode, VOIDmode,
4670 XEXP (op0, 0), XEXP (op0, 1));
4671
4672 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4673 || CC0_P (op0))
4674 return NULL_RTX;
4675
4676 trueop0 = avoid_constant_pool_reference (op0);
4677 trueop1 = avoid_constant_pool_reference (op1);
4678 return simplify_relational_operation_1 (code, mode, cmp_mode,
4679 trueop0, trueop1);
4680 }
4681
4682 /* This part of simplify_relational_operation is only used when CMP_MODE
4683 is not in class MODE_CC (i.e. it is a real comparison).
4684
4685 MODE is the mode of the result, while CMP_MODE specifies in which
4686 mode the comparison is done in, so it is the mode of the operands. */
4687
4688 static rtx
4689 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4690 machine_mode cmp_mode, rtx op0, rtx op1)
4691 {
4692 enum rtx_code op0code = GET_CODE (op0);
4693
4694 if (op1 == const0_rtx && COMPARISON_P (op0))
4695 {
4696 /* If op0 is a comparison, extract the comparison arguments
4697 from it. */
4698 if (code == NE)
4699 {
4700 if (GET_MODE (op0) == mode)
4701 return simplify_rtx (op0);
4702 else
4703 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4704 XEXP (op0, 0), XEXP (op0, 1));
4705 }
4706 else if (code == EQ)
4707 {
4708 enum rtx_code new_code = reversed_comparison_code (op0, NULL);
4709 if (new_code != UNKNOWN)
4710 return simplify_gen_relational (new_code, mode, VOIDmode,
4711 XEXP (op0, 0), XEXP (op0, 1));
4712 }
4713 }
4714
4715 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4716 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4717 if ((code == LTU || code == GEU)
4718 && GET_CODE (op0) == PLUS
4719 && CONST_INT_P (XEXP (op0, 1))
4720 && (rtx_equal_p (op1, XEXP (op0, 0))
4721 || rtx_equal_p (op1, XEXP (op0, 1)))
4722 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4723 && XEXP (op0, 1) != const0_rtx)
4724 {
4725 rtx new_cmp
4726 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4727 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4728 cmp_mode, XEXP (op0, 0), new_cmp);
4729 }
4730
4731 /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
4732 transformed into (LTU a -C). */
4733 if (code == GTU && GET_CODE (op0) == PLUS && CONST_INT_P (op1)
4734 && CONST_INT_P (XEXP (op0, 1))
4735 && (UINTVAL (op1) == UINTVAL (XEXP (op0, 1)) - 1)
4736 && XEXP (op0, 1) != const0_rtx)
4737 {
4738 rtx new_cmp
4739 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4740 return simplify_gen_relational (LTU, mode, cmp_mode,
4741 XEXP (op0, 0), new_cmp);
4742 }
4743
4744 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4745 if ((code == LTU || code == GEU)
4746 && GET_CODE (op0) == PLUS
4747 && rtx_equal_p (op1, XEXP (op0, 1))
4748 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4749 && !rtx_equal_p (op1, XEXP (op0, 0)))
4750 return simplify_gen_relational (code, mode, cmp_mode, op0,
4751 copy_rtx (XEXP (op0, 0)));
4752
4753 if (op1 == const0_rtx)
4754 {
4755 /* Canonicalize (GTU x 0) as (NE x 0). */
4756 if (code == GTU)
4757 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4758 /* Canonicalize (LEU x 0) as (EQ x 0). */
4759 if (code == LEU)
4760 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4761 }
4762 else if (op1 == const1_rtx)
4763 {
4764 switch (code)
4765 {
4766 case GE:
4767 /* Canonicalize (GE x 1) as (GT x 0). */
4768 return simplify_gen_relational (GT, mode, cmp_mode,
4769 op0, const0_rtx);
4770 case GEU:
4771 /* Canonicalize (GEU x 1) as (NE x 0). */
4772 return simplify_gen_relational (NE, mode, cmp_mode,
4773 op0, const0_rtx);
4774 case LT:
4775 /* Canonicalize (LT x 1) as (LE x 0). */
4776 return simplify_gen_relational (LE, mode, cmp_mode,
4777 op0, const0_rtx);
4778 case LTU:
4779 /* Canonicalize (LTU x 1) as (EQ x 0). */
4780 return simplify_gen_relational (EQ, mode, cmp_mode,
4781 op0, const0_rtx);
4782 default:
4783 break;
4784 }
4785 }
4786 else if (op1 == constm1_rtx)
4787 {
4788 /* Canonicalize (LE x -1) as (LT x 0). */
4789 if (code == LE)
4790 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4791 /* Canonicalize (GT x -1) as (GE x 0). */
4792 if (code == GT)
4793 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4794 }
4795
4796 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4797 if ((code == EQ || code == NE)
4798 && (op0code == PLUS || op0code == MINUS)
4799 && CONSTANT_P (op1)
4800 && CONSTANT_P (XEXP (op0, 1))
4801 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4802 {
4803 rtx x = XEXP (op0, 0);
4804 rtx c = XEXP (op0, 1);
4805 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4806 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4807
4808 /* Detect an infinite recursive condition, where we oscillate at this
4809 simplification case between:
4810 A + B == C <---> C - B == A,
4811 where A, B, and C are all constants with non-simplifiable expressions,
4812 usually SYMBOL_REFs. */
4813 if (GET_CODE (tem) == invcode
4814 && CONSTANT_P (x)
4815 && rtx_equal_p (c, XEXP (tem, 1)))
4816 return NULL_RTX;
4817
4818 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4819 }
4820
4821 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4822 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4823 if (code == NE
4824 && op1 == const0_rtx
4825 && GET_MODE_CLASS (mode) == MODE_INT
4826 && cmp_mode != VOIDmode
4827 /* ??? Work-around BImode bugs in the ia64 backend. */
4828 && mode != BImode
4829 && cmp_mode != BImode
4830 && nonzero_bits (op0, cmp_mode) == 1
4831 && STORE_FLAG_VALUE == 1)
4832 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4833 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4834 : lowpart_subreg (mode, op0, cmp_mode);
4835
4836 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4837 if ((code == EQ || code == NE)
4838 && op1 == const0_rtx
4839 && op0code == XOR)
4840 return simplify_gen_relational (code, mode, cmp_mode,
4841 XEXP (op0, 0), XEXP (op0, 1));
4842
4843 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4844 if ((code == EQ || code == NE)
4845 && op0code == XOR
4846 && rtx_equal_p (XEXP (op0, 0), op1)
4847 && !side_effects_p (XEXP (op0, 0)))
4848 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
4849 CONST0_RTX (mode));
4850
4851 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4852 if ((code == EQ || code == NE)
4853 && op0code == XOR
4854 && rtx_equal_p (XEXP (op0, 1), op1)
4855 && !side_effects_p (XEXP (op0, 1)))
4856 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4857 CONST0_RTX (mode));
4858
4859 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4860 if ((code == EQ || code == NE)
4861 && op0code == XOR
4862 && CONST_SCALAR_INT_P (op1)
4863 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4864 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4865 simplify_gen_binary (XOR, cmp_mode,
4866 XEXP (op0, 1), op1));
4867
4868 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4869 can be implemented with a BICS instruction on some targets, or
4870 constant-folded if y is a constant. */
4871 if ((code == EQ || code == NE)
4872 && op0code == AND
4873 && rtx_equal_p (XEXP (op0, 0), op1)
4874 && !side_effects_p (op1)
4875 && op1 != CONST0_RTX (cmp_mode))
4876 {
4877 rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1), cmp_mode);
4878 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
4879
4880 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4881 CONST0_RTX (cmp_mode));
4882 }
4883
4884 /* Likewise for (eq/ne (and x y) y). */
4885 if ((code == EQ || code == NE)
4886 && op0code == AND
4887 && rtx_equal_p (XEXP (op0, 1), op1)
4888 && !side_effects_p (op1)
4889 && op1 != CONST0_RTX (cmp_mode))
4890 {
4891 rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0), cmp_mode);
4892 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
4893
4894 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4895 CONST0_RTX (cmp_mode));
4896 }
4897
4898 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4899 if ((code == EQ || code == NE)
4900 && GET_CODE (op0) == BSWAP
4901 && CONST_SCALAR_INT_P (op1))
4902 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4903 simplify_gen_unary (BSWAP, cmp_mode,
4904 op1, cmp_mode));
4905
4906 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4907 if ((code == EQ || code == NE)
4908 && GET_CODE (op0) == BSWAP
4909 && GET_CODE (op1) == BSWAP)
4910 return simplify_gen_relational (code, mode, cmp_mode,
4911 XEXP (op0, 0), XEXP (op1, 0));
4912
4913 if (op0code == POPCOUNT && op1 == const0_rtx)
4914 switch (code)
4915 {
4916 case EQ:
4917 case LE:
4918 case LEU:
4919 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4920 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4921 XEXP (op0, 0), const0_rtx);
4922
4923 case NE:
4924 case GT:
4925 case GTU:
4926 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4927 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4928 XEXP (op0, 0), const0_rtx);
4929
4930 default:
4931 break;
4932 }
4933
4934 return NULL_RTX;
4935 }
4936
4937 enum
4938 {
4939 CMP_EQ = 1,
4940 CMP_LT = 2,
4941 CMP_GT = 4,
4942 CMP_LTU = 8,
4943 CMP_GTU = 16
4944 };
4945
4946
4947 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4948 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4949 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4950 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4951 For floating-point comparisons, assume that the operands were ordered. */
4952
4953 static rtx
4954 comparison_result (enum rtx_code code, int known_results)
4955 {
4956 switch (code)
4957 {
4958 case EQ:
4959 case UNEQ:
4960 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4961 case NE:
4962 case LTGT:
4963 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4964
4965 case LT:
4966 case UNLT:
4967 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4968 case GE:
4969 case UNGE:
4970 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4971
4972 case GT:
4973 case UNGT:
4974 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4975 case LE:
4976 case UNLE:
4977 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4978
4979 case LTU:
4980 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4981 case GEU:
4982 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4983
4984 case GTU:
4985 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4986 case LEU:
4987 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4988
4989 case ORDERED:
4990 return const_true_rtx;
4991 case UNORDERED:
4992 return const0_rtx;
4993 default:
4994 gcc_unreachable ();
4995 }
4996 }
4997
4998 /* Check if the given comparison (done in the given MODE) is actually
4999 a tautology or a contradiction. If the mode is VOID_mode, the
5000 comparison is done in "infinite precision". If no simplification
5001 is possible, this function returns zero. Otherwise, it returns
5002 either const_true_rtx or const0_rtx. */
5003
5004 rtx
5005 simplify_const_relational_operation (enum rtx_code code,
5006 machine_mode mode,
5007 rtx op0, rtx op1)
5008 {
5009 rtx tem;
5010 rtx trueop0;
5011 rtx trueop1;
5012
5013 gcc_assert (mode != VOIDmode
5014 || (GET_MODE (op0) == VOIDmode
5015 && GET_MODE (op1) == VOIDmode));
5016
5017 /* If op0 is a compare, extract the comparison arguments from it. */
5018 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
5019 {
5020 op1 = XEXP (op0, 1);
5021 op0 = XEXP (op0, 0);
5022
5023 if (GET_MODE (op0) != VOIDmode)
5024 mode = GET_MODE (op0);
5025 else if (GET_MODE (op1) != VOIDmode)
5026 mode = GET_MODE (op1);
5027 else
5028 return 0;
5029 }
5030
5031 /* We can't simplify MODE_CC values since we don't know what the
5032 actual comparison is. */
5033 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
5034 return 0;
5035
5036 /* Make sure the constant is second. */
5037 if (swap_commutative_operands_p (op0, op1))
5038 {
5039 std::swap (op0, op1);
5040 code = swap_condition (code);
5041 }
5042
5043 trueop0 = avoid_constant_pool_reference (op0);
5044 trueop1 = avoid_constant_pool_reference (op1);
5045
5046 /* For integer comparisons of A and B maybe we can simplify A - B and can
5047 then simplify a comparison of that with zero. If A and B are both either
5048 a register or a CONST_INT, this can't help; testing for these cases will
5049 prevent infinite recursion here and speed things up.
5050
5051 We can only do this for EQ and NE comparisons as otherwise we may
5052 lose or introduce overflow which we cannot disregard as undefined as
5053 we do not know the signedness of the operation on either the left or
5054 the right hand side of the comparison. */
5055
5056 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
5057 && (code == EQ || code == NE)
5058 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
5059 && (REG_P (op1) || CONST_INT_P (trueop1)))
5060 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
5061 /* We cannot do this if tem is a nonzero address. */
5062 && ! nonzero_address_p (tem))
5063 return simplify_const_relational_operation (signed_condition (code),
5064 mode, tem, const0_rtx);
5065
5066 if (! HONOR_NANS (mode) && code == ORDERED)
5067 return const_true_rtx;
5068
5069 if (! HONOR_NANS (mode) && code == UNORDERED)
5070 return const0_rtx;
5071
5072 /* For modes without NaNs, if the two operands are equal, we know the
5073 result except if they have side-effects. Even with NaNs we know
5074 the result of unordered comparisons and, if signaling NaNs are
5075 irrelevant, also the result of LT/GT/LTGT. */
5076 if ((! HONOR_NANS (trueop0)
5077 || code == UNEQ || code == UNLE || code == UNGE
5078 || ((code == LT || code == GT || code == LTGT)
5079 && ! HONOR_SNANS (trueop0)))
5080 && rtx_equal_p (trueop0, trueop1)
5081 && ! side_effects_p (trueop0))
5082 return comparison_result (code, CMP_EQ);
5083
5084 /* If the operands are floating-point constants, see if we can fold
5085 the result. */
5086 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
5087 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
5088 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
5089 {
5090 const REAL_VALUE_TYPE *d0 = CONST_DOUBLE_REAL_VALUE (trueop0);
5091 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
5092
5093 /* Comparisons are unordered iff at least one of the values is NaN. */
5094 if (REAL_VALUE_ISNAN (*d0) || REAL_VALUE_ISNAN (*d1))
5095 switch (code)
5096 {
5097 case UNEQ:
5098 case UNLT:
5099 case UNGT:
5100 case UNLE:
5101 case UNGE:
5102 case NE:
5103 case UNORDERED:
5104 return const_true_rtx;
5105 case EQ:
5106 case LT:
5107 case GT:
5108 case LE:
5109 case GE:
5110 case LTGT:
5111 case ORDERED:
5112 return const0_rtx;
5113 default:
5114 return 0;
5115 }
5116
5117 return comparison_result (code,
5118 (real_equal (d0, d1) ? CMP_EQ :
5119 real_less (d0, d1) ? CMP_LT : CMP_GT));
5120 }
5121
5122 /* Otherwise, see if the operands are both integers. */
5123 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
5124 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
5125 {
5126 /* It would be nice if we really had a mode here. However, the
5127 largest int representable on the target is as good as
5128 infinite. */
5129 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
5130 rtx_mode_t ptrueop0 = rtx_mode_t (trueop0, cmode);
5131 rtx_mode_t ptrueop1 = rtx_mode_t (trueop1, cmode);
5132
5133 if (wi::eq_p (ptrueop0, ptrueop1))
5134 return comparison_result (code, CMP_EQ);
5135 else
5136 {
5137 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
5138 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
5139 return comparison_result (code, cr);
5140 }
5141 }
5142
5143 /* Optimize comparisons with upper and lower bounds. */
5144 if (HWI_COMPUTABLE_MODE_P (mode)
5145 && CONST_INT_P (trueop1)
5146 && !side_effects_p (trueop0))
5147 {
5148 int sign;
5149 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
5150 HOST_WIDE_INT val = INTVAL (trueop1);
5151 HOST_WIDE_INT mmin, mmax;
5152
5153 if (code == GEU
5154 || code == LEU
5155 || code == GTU
5156 || code == LTU)
5157 sign = 0;
5158 else
5159 sign = 1;
5160
5161 /* Get a reduced range if the sign bit is zero. */
5162 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
5163 {
5164 mmin = 0;
5165 mmax = nonzero;
5166 }
5167 else
5168 {
5169 rtx mmin_rtx, mmax_rtx;
5170 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
5171
5172 mmin = INTVAL (mmin_rtx);
5173 mmax = INTVAL (mmax_rtx);
5174 if (sign)
5175 {
5176 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
5177
5178 mmin >>= (sign_copies - 1);
5179 mmax >>= (sign_copies - 1);
5180 }
5181 }
5182
5183 switch (code)
5184 {
5185 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5186 case GEU:
5187 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5188 return const_true_rtx;
5189 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5190 return const0_rtx;
5191 break;
5192 case GE:
5193 if (val <= mmin)
5194 return const_true_rtx;
5195 if (val > mmax)
5196 return const0_rtx;
5197 break;
5198
5199 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5200 case LEU:
5201 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5202 return const_true_rtx;
5203 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5204 return const0_rtx;
5205 break;
5206 case LE:
5207 if (val >= mmax)
5208 return const_true_rtx;
5209 if (val < mmin)
5210 return const0_rtx;
5211 break;
5212
5213 case EQ:
5214 /* x == y is always false for y out of range. */
5215 if (val < mmin || val > mmax)
5216 return const0_rtx;
5217 break;
5218
5219 /* x > y is always false for y >= mmax, always true for y < mmin. */
5220 case GTU:
5221 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5222 return const0_rtx;
5223 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5224 return const_true_rtx;
5225 break;
5226 case GT:
5227 if (val >= mmax)
5228 return const0_rtx;
5229 if (val < mmin)
5230 return const_true_rtx;
5231 break;
5232
5233 /* x < y is always false for y <= mmin, always true for y > mmax. */
5234 case LTU:
5235 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5236 return const0_rtx;
5237 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5238 return const_true_rtx;
5239 break;
5240 case LT:
5241 if (val <= mmin)
5242 return const0_rtx;
5243 if (val > mmax)
5244 return const_true_rtx;
5245 break;
5246
5247 case NE:
5248 /* x != y is always true for y out of range. */
5249 if (val < mmin || val > mmax)
5250 return const_true_rtx;
5251 break;
5252
5253 default:
5254 break;
5255 }
5256 }
5257
5258 /* Optimize integer comparisons with zero. */
5259 if (trueop1 == const0_rtx && !side_effects_p (trueop0))
5260 {
5261 /* Some addresses are known to be nonzero. We don't know
5262 their sign, but equality comparisons are known. */
5263 if (nonzero_address_p (trueop0))
5264 {
5265 if (code == EQ || code == LEU)
5266 return const0_rtx;
5267 if (code == NE || code == GTU)
5268 return const_true_rtx;
5269 }
5270
5271 /* See if the first operand is an IOR with a constant. If so, we
5272 may be able to determine the result of this comparison. */
5273 if (GET_CODE (op0) == IOR)
5274 {
5275 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5276 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5277 {
5278 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
5279 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5280 && (UINTVAL (inner_const)
5281 & (HOST_WIDE_INT_1U
5282 << sign_bitnum)));
5283
5284 switch (code)
5285 {
5286 case EQ:
5287 case LEU:
5288 return const0_rtx;
5289 case NE:
5290 case GTU:
5291 return const_true_rtx;
5292 case LT:
5293 case LE:
5294 if (has_sign)
5295 return const_true_rtx;
5296 break;
5297 case GT:
5298 case GE:
5299 if (has_sign)
5300 return const0_rtx;
5301 break;
5302 default:
5303 break;
5304 }
5305 }
5306 }
5307 }
5308
5309 /* Optimize comparison of ABS with zero. */
5310 if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
5311 && (GET_CODE (trueop0) == ABS
5312 || (GET_CODE (trueop0) == FLOAT_EXTEND
5313 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5314 {
5315 switch (code)
5316 {
5317 case LT:
5318 /* Optimize abs(x) < 0.0. */
5319 if (!INTEGRAL_MODE_P (mode) && !HONOR_SNANS (mode))
5320 return const0_rtx;
5321 break;
5322
5323 case GE:
5324 /* Optimize abs(x) >= 0.0. */
5325 if (!INTEGRAL_MODE_P (mode) && !HONOR_NANS (mode))
5326 return const_true_rtx;
5327 break;
5328
5329 case UNGE:
5330 /* Optimize ! (abs(x) < 0.0). */
5331 return const_true_rtx;
5332
5333 default:
5334 break;
5335 }
5336 }
5337
5338 return 0;
5339 }
5340
5341 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
5342 where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
5343 or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
5344 can be simplified to that or NULL_RTX if not.
5345 Assume X is compared against zero with CMP_CODE and the true
5346 arm is TRUE_VAL and the false arm is FALSE_VAL. */
5347
5348 static rtx
5349 simplify_cond_clz_ctz (rtx x, rtx_code cmp_code, rtx true_val, rtx false_val)
5350 {
5351 if (cmp_code != EQ && cmp_code != NE)
5352 return NULL_RTX;
5353
5354 /* Result on X == 0 and X !=0 respectively. */
5355 rtx on_zero, on_nonzero;
5356 if (cmp_code == EQ)
5357 {
5358 on_zero = true_val;
5359 on_nonzero = false_val;
5360 }
5361 else
5362 {
5363 on_zero = false_val;
5364 on_nonzero = true_val;
5365 }
5366
5367 rtx_code op_code = GET_CODE (on_nonzero);
5368 if ((op_code != CLZ && op_code != CTZ)
5369 || !rtx_equal_p (XEXP (on_nonzero, 0), x)
5370 || !CONST_INT_P (on_zero))
5371 return NULL_RTX;
5372
5373 HOST_WIDE_INT op_val;
5374 if (((op_code == CLZ
5375 && CLZ_DEFINED_VALUE_AT_ZERO (GET_MODE (on_nonzero), op_val))
5376 || (op_code == CTZ
5377 && CTZ_DEFINED_VALUE_AT_ZERO (GET_MODE (on_nonzero), op_val)))
5378 && op_val == INTVAL (on_zero))
5379 return on_nonzero;
5380
5381 return NULL_RTX;
5382 }
5383
5384 \f
5385 /* Simplify CODE, an operation with result mode MODE and three operands,
5386 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5387 a constant. Return 0 if no simplifications is possible. */
5388
5389 rtx
5390 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5391 machine_mode op0_mode, rtx op0, rtx op1,
5392 rtx op2)
5393 {
5394 unsigned int width = GET_MODE_PRECISION (mode);
5395 bool any_change = false;
5396 rtx tem, trueop2;
5397
5398 /* VOIDmode means "infinite" precision. */
5399 if (width == 0)
5400 width = HOST_BITS_PER_WIDE_INT;
5401
5402 switch (code)
5403 {
5404 case FMA:
5405 /* Simplify negations around the multiplication. */
5406 /* -a * -b + c => a * b + c. */
5407 if (GET_CODE (op0) == NEG)
5408 {
5409 tem = simplify_unary_operation (NEG, mode, op1, mode);
5410 if (tem)
5411 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5412 }
5413 else if (GET_CODE (op1) == NEG)
5414 {
5415 tem = simplify_unary_operation (NEG, mode, op0, mode);
5416 if (tem)
5417 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5418 }
5419
5420 /* Canonicalize the two multiplication operands. */
5421 /* a * -b + c => -b * a + c. */
5422 if (swap_commutative_operands_p (op0, op1))
5423 std::swap (op0, op1), any_change = true;
5424
5425 if (any_change)
5426 return gen_rtx_FMA (mode, op0, op1, op2);
5427 return NULL_RTX;
5428
5429 case SIGN_EXTRACT:
5430 case ZERO_EXTRACT:
5431 if (CONST_INT_P (op0)
5432 && CONST_INT_P (op1)
5433 && CONST_INT_P (op2)
5434 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5435 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5436 {
5437 /* Extracting a bit-field from a constant */
5438 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5439 HOST_WIDE_INT op1val = INTVAL (op1);
5440 HOST_WIDE_INT op2val = INTVAL (op2);
5441 if (BITS_BIG_ENDIAN)
5442 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5443 else
5444 val >>= op2val;
5445
5446 if (HOST_BITS_PER_WIDE_INT != op1val)
5447 {
5448 /* First zero-extend. */
5449 val &= (HOST_WIDE_INT_1U << op1val) - 1;
5450 /* If desired, propagate sign bit. */
5451 if (code == SIGN_EXTRACT
5452 && (val & (HOST_WIDE_INT_1U << (op1val - 1)))
5453 != 0)
5454 val |= ~ ((HOST_WIDE_INT_1U << op1val) - 1);
5455 }
5456
5457 return gen_int_mode (val, mode);
5458 }
5459 break;
5460
5461 case IF_THEN_ELSE:
5462 if (CONST_INT_P (op0))
5463 return op0 != const0_rtx ? op1 : op2;
5464
5465 /* Convert c ? a : a into "a". */
5466 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5467 return op1;
5468
5469 /* Convert a != b ? a : b into "a". */
5470 if (GET_CODE (op0) == NE
5471 && ! side_effects_p (op0)
5472 && ! HONOR_NANS (mode)
5473 && ! HONOR_SIGNED_ZEROS (mode)
5474 && ((rtx_equal_p (XEXP (op0, 0), op1)
5475 && rtx_equal_p (XEXP (op0, 1), op2))
5476 || (rtx_equal_p (XEXP (op0, 0), op2)
5477 && rtx_equal_p (XEXP (op0, 1), op1))))
5478 return op1;
5479
5480 /* Convert a == b ? a : b into "b". */
5481 if (GET_CODE (op0) == EQ
5482 && ! side_effects_p (op0)
5483 && ! HONOR_NANS (mode)
5484 && ! HONOR_SIGNED_ZEROS (mode)
5485 && ((rtx_equal_p (XEXP (op0, 0), op1)
5486 && rtx_equal_p (XEXP (op0, 1), op2))
5487 || (rtx_equal_p (XEXP (op0, 0), op2)
5488 && rtx_equal_p (XEXP (op0, 1), op1))))
5489 return op2;
5490
5491 /* Convert (!c) != {0,...,0} ? a : b into
5492 c != {0,...,0} ? b : a for vector modes. */
5493 if (VECTOR_MODE_P (GET_MODE (op1))
5494 && GET_CODE (op0) == NE
5495 && GET_CODE (XEXP (op0, 0)) == NOT
5496 && GET_CODE (XEXP (op0, 1)) == CONST_VECTOR)
5497 {
5498 rtx cv = XEXP (op0, 1);
5499 int nunits = CONST_VECTOR_NUNITS (cv);
5500 bool ok = true;
5501 for (int i = 0; i < nunits; ++i)
5502 if (CONST_VECTOR_ELT (cv, i) != const0_rtx)
5503 {
5504 ok = false;
5505 break;
5506 }
5507 if (ok)
5508 {
5509 rtx new_op0 = gen_rtx_NE (GET_MODE (op0),
5510 XEXP (XEXP (op0, 0), 0),
5511 XEXP (op0, 1));
5512 rtx retval = gen_rtx_IF_THEN_ELSE (mode, new_op0, op2, op1);
5513 return retval;
5514 }
5515 }
5516
5517 /* Convert x == 0 ? N : clz (x) into clz (x) when
5518 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
5519 Similarly for ctz (x). */
5520 if (COMPARISON_P (op0) && !side_effects_p (op0)
5521 && XEXP (op0, 1) == const0_rtx)
5522 {
5523 rtx simplified
5524 = simplify_cond_clz_ctz (XEXP (op0, 0), GET_CODE (op0),
5525 op1, op2);
5526 if (simplified)
5527 return simplified;
5528 }
5529
5530 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5531 {
5532 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5533 ? GET_MODE (XEXP (op0, 1))
5534 : GET_MODE (XEXP (op0, 0)));
5535 rtx temp;
5536
5537 /* Look for happy constants in op1 and op2. */
5538 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5539 {
5540 HOST_WIDE_INT t = INTVAL (op1);
5541 HOST_WIDE_INT f = INTVAL (op2);
5542
5543 if (t == STORE_FLAG_VALUE && f == 0)
5544 code = GET_CODE (op0);
5545 else if (t == 0 && f == STORE_FLAG_VALUE)
5546 {
5547 enum rtx_code tmp;
5548 tmp = reversed_comparison_code (op0, NULL);
5549 if (tmp == UNKNOWN)
5550 break;
5551 code = tmp;
5552 }
5553 else
5554 break;
5555
5556 return simplify_gen_relational (code, mode, cmp_mode,
5557 XEXP (op0, 0), XEXP (op0, 1));
5558 }
5559
5560 if (cmp_mode == VOIDmode)
5561 cmp_mode = op0_mode;
5562 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5563 cmp_mode, XEXP (op0, 0),
5564 XEXP (op0, 1));
5565
5566 /* See if any simplifications were possible. */
5567 if (temp)
5568 {
5569 if (CONST_INT_P (temp))
5570 return temp == const0_rtx ? op2 : op1;
5571 else if (temp)
5572 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5573 }
5574 }
5575 break;
5576
5577 case VEC_MERGE:
5578 gcc_assert (GET_MODE (op0) == mode);
5579 gcc_assert (GET_MODE (op1) == mode);
5580 gcc_assert (VECTOR_MODE_P (mode));
5581 trueop2 = avoid_constant_pool_reference (op2);
5582 if (CONST_INT_P (trueop2))
5583 {
5584 int elt_size = GET_MODE_UNIT_SIZE (mode);
5585 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5586 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5587 unsigned HOST_WIDE_INT mask;
5588 if (n_elts == HOST_BITS_PER_WIDE_INT)
5589 mask = -1;
5590 else
5591 mask = (HOST_WIDE_INT_1U << n_elts) - 1;
5592
5593 if (!(sel & mask) && !side_effects_p (op0))
5594 return op1;
5595 if ((sel & mask) == mask && !side_effects_p (op1))
5596 return op0;
5597
5598 rtx trueop0 = avoid_constant_pool_reference (op0);
5599 rtx trueop1 = avoid_constant_pool_reference (op1);
5600 if (GET_CODE (trueop0) == CONST_VECTOR
5601 && GET_CODE (trueop1) == CONST_VECTOR)
5602 {
5603 rtvec v = rtvec_alloc (n_elts);
5604 unsigned int i;
5605
5606 for (i = 0; i < n_elts; i++)
5607 RTVEC_ELT (v, i) = ((sel & (HOST_WIDE_INT_1U << i))
5608 ? CONST_VECTOR_ELT (trueop0, i)
5609 : CONST_VECTOR_ELT (trueop1, i));
5610 return gen_rtx_CONST_VECTOR (mode, v);
5611 }
5612
5613 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5614 if no element from a appears in the result. */
5615 if (GET_CODE (op0) == VEC_MERGE)
5616 {
5617 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5618 if (CONST_INT_P (tem))
5619 {
5620 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5621 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5622 return simplify_gen_ternary (code, mode, mode,
5623 XEXP (op0, 1), op1, op2);
5624 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5625 return simplify_gen_ternary (code, mode, mode,
5626 XEXP (op0, 0), op1, op2);
5627 }
5628 }
5629 if (GET_CODE (op1) == VEC_MERGE)
5630 {
5631 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5632 if (CONST_INT_P (tem))
5633 {
5634 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5635 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5636 return simplify_gen_ternary (code, mode, mode,
5637 op0, XEXP (op1, 1), op2);
5638 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5639 return simplify_gen_ternary (code, mode, mode,
5640 op0, XEXP (op1, 0), op2);
5641 }
5642 }
5643
5644 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5645 with a. */
5646 if (GET_CODE (op0) == VEC_DUPLICATE
5647 && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
5648 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
5649 && mode_nunits[GET_MODE (XEXP (op0, 0))] == 1)
5650 {
5651 tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
5652 if (CONST_INT_P (tem) && CONST_INT_P (op2))
5653 {
5654 if (XEXP (XEXP (op0, 0), 0) == op1
5655 && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
5656 return op1;
5657 }
5658 }
5659 }
5660
5661 if (rtx_equal_p (op0, op1)
5662 && !side_effects_p (op2) && !side_effects_p (op1))
5663 return op0;
5664
5665 break;
5666
5667 default:
5668 gcc_unreachable ();
5669 }
5670
5671 return 0;
5672 }
5673
5674 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5675 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5676 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5677
5678 Works by unpacking OP into a collection of 8-bit values
5679 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5680 and then repacking them again for OUTERMODE. */
5681
5682 static rtx
5683 simplify_immed_subreg (machine_mode outermode, rtx op,
5684 machine_mode innermode, unsigned int byte)
5685 {
5686 enum {
5687 value_bit = 8,
5688 value_mask = (1 << value_bit) - 1
5689 };
5690 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
5691 int value_start;
5692 int i;
5693 int elem;
5694
5695 int num_elem;
5696 rtx * elems;
5697 int elem_bitsize;
5698 rtx result_s = NULL;
5699 rtvec result_v = NULL;
5700 enum mode_class outer_class;
5701 machine_mode outer_submode;
5702 int max_bitsize;
5703
5704 /* Some ports misuse CCmode. */
5705 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5706 return op;
5707
5708 /* We have no way to represent a complex constant at the rtl level. */
5709 if (COMPLEX_MODE_P (outermode))
5710 return NULL_RTX;
5711
5712 /* We support any size mode. */
5713 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5714 GET_MODE_BITSIZE (innermode));
5715
5716 /* Unpack the value. */
5717
5718 if (GET_CODE (op) == CONST_VECTOR)
5719 {
5720 num_elem = CONST_VECTOR_NUNITS (op);
5721 elems = &CONST_VECTOR_ELT (op, 0);
5722 elem_bitsize = GET_MODE_UNIT_BITSIZE (innermode);
5723 }
5724 else
5725 {
5726 num_elem = 1;
5727 elems = &op;
5728 elem_bitsize = max_bitsize;
5729 }
5730 /* If this asserts, it is too complicated; reducing value_bit may help. */
5731 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5732 /* I don't know how to handle endianness of sub-units. */
5733 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5734
5735 for (elem = 0; elem < num_elem; elem++)
5736 {
5737 unsigned char * vp;
5738 rtx el = elems[elem];
5739
5740 /* Vectors are kept in target memory order. (This is probably
5741 a mistake.) */
5742 {
5743 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5744 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5745 / BITS_PER_UNIT);
5746 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5747 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5748 unsigned bytele = (subword_byte % UNITS_PER_WORD
5749 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5750 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5751 }
5752
5753 switch (GET_CODE (el))
5754 {
5755 case CONST_INT:
5756 for (i = 0;
5757 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5758 i += value_bit)
5759 *vp++ = INTVAL (el) >> i;
5760 /* CONST_INTs are always logically sign-extended. */
5761 for (; i < elem_bitsize; i += value_bit)
5762 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5763 break;
5764
5765 case CONST_WIDE_INT:
5766 {
5767 rtx_mode_t val = rtx_mode_t (el, innermode);
5768 unsigned char extend = wi::sign_mask (val);
5769 int prec = wi::get_precision (val);
5770
5771 for (i = 0; i < prec && i < elem_bitsize; i += value_bit)
5772 *vp++ = wi::extract_uhwi (val, i, value_bit);
5773 for (; i < elem_bitsize; i += value_bit)
5774 *vp++ = extend;
5775 }
5776 break;
5777
5778 case CONST_DOUBLE:
5779 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
5780 {
5781 unsigned char extend = 0;
5782 /* If this triggers, someone should have generated a
5783 CONST_INT instead. */
5784 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5785
5786 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5787 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5788 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5789 {
5790 *vp++
5791 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5792 i += value_bit;
5793 }
5794
5795 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5796 extend = -1;
5797 for (; i < elem_bitsize; i += value_bit)
5798 *vp++ = extend;
5799 }
5800 else
5801 {
5802 /* This is big enough for anything on the platform. */
5803 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5804 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5805
5806 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5807 gcc_assert (bitsize <= elem_bitsize);
5808 gcc_assert (bitsize % value_bit == 0);
5809
5810 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5811 GET_MODE (el));
5812
5813 /* real_to_target produces its result in words affected by
5814 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5815 and use WORDS_BIG_ENDIAN instead; see the documentation
5816 of SUBREG in rtl.texi. */
5817 for (i = 0; i < bitsize; i += value_bit)
5818 {
5819 int ibase;
5820 if (WORDS_BIG_ENDIAN)
5821 ibase = bitsize - 1 - i;
5822 else
5823 ibase = i;
5824 *vp++ = tmp[ibase / 32] >> i % 32;
5825 }
5826
5827 /* It shouldn't matter what's done here, so fill it with
5828 zero. */
5829 for (; i < elem_bitsize; i += value_bit)
5830 *vp++ = 0;
5831 }
5832 break;
5833
5834 case CONST_FIXED:
5835 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5836 {
5837 for (i = 0; i < elem_bitsize; i += value_bit)
5838 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5839 }
5840 else
5841 {
5842 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5843 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5844 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5845 i += value_bit)
5846 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5847 >> (i - HOST_BITS_PER_WIDE_INT);
5848 for (; i < elem_bitsize; i += value_bit)
5849 *vp++ = 0;
5850 }
5851 break;
5852
5853 default:
5854 gcc_unreachable ();
5855 }
5856 }
5857
5858 /* Now, pick the right byte to start with. */
5859 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5860 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5861 will already have offset 0. */
5862 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5863 {
5864 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5865 - byte);
5866 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5867 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5868 byte = (subword_byte % UNITS_PER_WORD
5869 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5870 }
5871
5872 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5873 so if it's become negative it will instead be very large.) */
5874 gcc_assert (byte < GET_MODE_SIZE (innermode));
5875
5876 /* Convert from bytes to chunks of size value_bit. */
5877 value_start = byte * (BITS_PER_UNIT / value_bit);
5878
5879 /* Re-pack the value. */
5880 num_elem = GET_MODE_NUNITS (outermode);
5881
5882 if (VECTOR_MODE_P (outermode))
5883 {
5884 result_v = rtvec_alloc (num_elem);
5885 elems = &RTVEC_ELT (result_v, 0);
5886 }
5887 else
5888 elems = &result_s;
5889
5890 outer_submode = GET_MODE_INNER (outermode);
5891 outer_class = GET_MODE_CLASS (outer_submode);
5892 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5893
5894 gcc_assert (elem_bitsize % value_bit == 0);
5895 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5896
5897 for (elem = 0; elem < num_elem; elem++)
5898 {
5899 unsigned char *vp;
5900
5901 /* Vectors are stored in target memory order. (This is probably
5902 a mistake.) */
5903 {
5904 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5905 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5906 / BITS_PER_UNIT);
5907 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5908 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5909 unsigned bytele = (subword_byte % UNITS_PER_WORD
5910 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5911 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5912 }
5913
5914 switch (outer_class)
5915 {
5916 case MODE_INT:
5917 case MODE_PARTIAL_INT:
5918 {
5919 int u;
5920 int base = 0;
5921 int units
5922 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
5923 / HOST_BITS_PER_WIDE_INT;
5924 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
5925 wide_int r;
5926
5927 if (GET_MODE_PRECISION (outer_submode) > MAX_BITSIZE_MODE_ANY_INT)
5928 return NULL_RTX;
5929 for (u = 0; u < units; u++)
5930 {
5931 unsigned HOST_WIDE_INT buf = 0;
5932 for (i = 0;
5933 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
5934 i += value_bit)
5935 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5936
5937 tmp[u] = buf;
5938 base += HOST_BITS_PER_WIDE_INT;
5939 }
5940 r = wide_int::from_array (tmp, units,
5941 GET_MODE_PRECISION (outer_submode));
5942 #if TARGET_SUPPORTS_WIDE_INT == 0
5943 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
5944 if (wi::min_precision (r, SIGNED) > HOST_BITS_PER_DOUBLE_INT)
5945 return NULL_RTX;
5946 #endif
5947 elems[elem] = immed_wide_int_const (r, outer_submode);
5948 }
5949 break;
5950
5951 case MODE_FLOAT:
5952 case MODE_DECIMAL_FLOAT:
5953 {
5954 REAL_VALUE_TYPE r;
5955 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32] = { 0 };
5956
5957 /* real_from_target wants its input in words affected by
5958 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5959 and use WORDS_BIG_ENDIAN instead; see the documentation
5960 of SUBREG in rtl.texi. */
5961 for (i = 0; i < elem_bitsize; i += value_bit)
5962 {
5963 int ibase;
5964 if (WORDS_BIG_ENDIAN)
5965 ibase = elem_bitsize - 1 - i;
5966 else
5967 ibase = i;
5968 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5969 }
5970
5971 real_from_target (&r, tmp, outer_submode);
5972 elems[elem] = const_double_from_real_value (r, outer_submode);
5973 }
5974 break;
5975
5976 case MODE_FRACT:
5977 case MODE_UFRACT:
5978 case MODE_ACCUM:
5979 case MODE_UACCUM:
5980 {
5981 FIXED_VALUE_TYPE f;
5982 f.data.low = 0;
5983 f.data.high = 0;
5984 f.mode = outer_submode;
5985
5986 for (i = 0;
5987 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5988 i += value_bit)
5989 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5990 for (; i < elem_bitsize; i += value_bit)
5991 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5992 << (i - HOST_BITS_PER_WIDE_INT));
5993
5994 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5995 }
5996 break;
5997
5998 default:
5999 gcc_unreachable ();
6000 }
6001 }
6002 if (VECTOR_MODE_P (outermode))
6003 return gen_rtx_CONST_VECTOR (outermode, result_v);
6004 else
6005 return result_s;
6006 }
6007
6008 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
6009 Return 0 if no simplifications are possible. */
6010 rtx
6011 simplify_subreg (machine_mode outermode, rtx op,
6012 machine_mode innermode, unsigned int byte)
6013 {
6014 /* Little bit of sanity checking. */
6015 gcc_assert (innermode != VOIDmode);
6016 gcc_assert (outermode != VOIDmode);
6017 gcc_assert (innermode != BLKmode);
6018 gcc_assert (outermode != BLKmode);
6019
6020 gcc_assert (GET_MODE (op) == innermode
6021 || GET_MODE (op) == VOIDmode);
6022
6023 if ((byte % GET_MODE_SIZE (outermode)) != 0)
6024 return NULL_RTX;
6025
6026 if (byte >= GET_MODE_SIZE (innermode))
6027 return NULL_RTX;
6028
6029 if (outermode == innermode && !byte)
6030 return op;
6031
6032 if (CONST_SCALAR_INT_P (op)
6033 || CONST_DOUBLE_AS_FLOAT_P (op)
6034 || GET_CODE (op) == CONST_FIXED
6035 || GET_CODE (op) == CONST_VECTOR)
6036 return simplify_immed_subreg (outermode, op, innermode, byte);
6037
6038 /* Changing mode twice with SUBREG => just change it once,
6039 or not at all if changing back op starting mode. */
6040 if (GET_CODE (op) == SUBREG)
6041 {
6042 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
6043 int final_offset = byte + SUBREG_BYTE (op);
6044 rtx newx;
6045
6046 if (outermode == innermostmode
6047 && byte == 0 && SUBREG_BYTE (op) == 0)
6048 return SUBREG_REG (op);
6049
6050 /* The SUBREG_BYTE represents offset, as if the value were stored
6051 in memory. Irritating exception is paradoxical subreg, where
6052 we define SUBREG_BYTE to be 0. On big endian machines, this
6053 value should be negative. For a moment, undo this exception. */
6054 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
6055 {
6056 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
6057 if (WORDS_BIG_ENDIAN)
6058 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6059 if (BYTES_BIG_ENDIAN)
6060 final_offset += difference % UNITS_PER_WORD;
6061 }
6062 if (SUBREG_BYTE (op) == 0
6063 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
6064 {
6065 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
6066 if (WORDS_BIG_ENDIAN)
6067 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6068 if (BYTES_BIG_ENDIAN)
6069 final_offset += difference % UNITS_PER_WORD;
6070 }
6071
6072 /* See whether resulting subreg will be paradoxical. */
6073 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
6074 {
6075 /* In nonparadoxical subregs we can't handle negative offsets. */
6076 if (final_offset < 0)
6077 return NULL_RTX;
6078 /* Bail out in case resulting subreg would be incorrect. */
6079 if (final_offset % GET_MODE_SIZE (outermode)
6080 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
6081 return NULL_RTX;
6082 }
6083 else
6084 {
6085 int offset = 0;
6086 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
6087
6088 /* In paradoxical subreg, see if we are still looking on lower part.
6089 If so, our SUBREG_BYTE will be 0. */
6090 if (WORDS_BIG_ENDIAN)
6091 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6092 if (BYTES_BIG_ENDIAN)
6093 offset += difference % UNITS_PER_WORD;
6094 if (offset == final_offset)
6095 final_offset = 0;
6096 else
6097 return NULL_RTX;
6098 }
6099
6100 /* Recurse for further possible simplifications. */
6101 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
6102 final_offset);
6103 if (newx)
6104 return newx;
6105 if (validate_subreg (outermode, innermostmode,
6106 SUBREG_REG (op), final_offset))
6107 {
6108 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
6109 if (SUBREG_PROMOTED_VAR_P (op)
6110 && SUBREG_PROMOTED_SIGN (op) >= 0
6111 && GET_MODE_CLASS (outermode) == MODE_INT
6112 && IN_RANGE (GET_MODE_SIZE (outermode),
6113 GET_MODE_SIZE (innermode),
6114 GET_MODE_SIZE (innermostmode))
6115 && subreg_lowpart_p (newx))
6116 {
6117 SUBREG_PROMOTED_VAR_P (newx) = 1;
6118 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
6119 }
6120 return newx;
6121 }
6122 return NULL_RTX;
6123 }
6124
6125 /* SUBREG of a hard register => just change the register number
6126 and/or mode. If the hard register is not valid in that mode,
6127 suppress this simplification. If the hard register is the stack,
6128 frame, or argument pointer, leave this as a SUBREG. */
6129
6130 if (REG_P (op) && HARD_REGISTER_P (op))
6131 {
6132 unsigned int regno, final_regno;
6133
6134 regno = REGNO (op);
6135 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
6136 if (HARD_REGISTER_NUM_P (final_regno))
6137 {
6138 rtx x;
6139 int final_offset = byte;
6140
6141 /* Adjust offset for paradoxical subregs. */
6142 if (byte == 0
6143 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
6144 {
6145 int difference = (GET_MODE_SIZE (innermode)
6146 - GET_MODE_SIZE (outermode));
6147 if (WORDS_BIG_ENDIAN)
6148 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6149 if (BYTES_BIG_ENDIAN)
6150 final_offset += difference % UNITS_PER_WORD;
6151 }
6152
6153 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
6154
6155 /* Propagate original regno. We don't have any way to specify
6156 the offset inside original regno, so do so only for lowpart.
6157 The information is used only by alias analysis that can not
6158 grog partial register anyway. */
6159
6160 if (subreg_lowpart_offset (outermode, innermode) == byte)
6161 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
6162 return x;
6163 }
6164 }
6165
6166 /* If we have a SUBREG of a register that we are replacing and we are
6167 replacing it with a MEM, make a new MEM and try replacing the
6168 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6169 or if we would be widening it. */
6170
6171 if (MEM_P (op)
6172 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
6173 /* Allow splitting of volatile memory references in case we don't
6174 have instruction to move the whole thing. */
6175 && (! MEM_VOLATILE_P (op)
6176 || ! have_insn_for (SET, innermode))
6177 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
6178 return adjust_address_nv (op, outermode, byte);
6179
6180 /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
6181 of two parts. */
6182 if (GET_CODE (op) == CONCAT
6183 || GET_CODE (op) == VEC_CONCAT)
6184 {
6185 unsigned int part_size, final_offset;
6186 rtx part, res;
6187
6188 enum machine_mode part_mode = GET_MODE (XEXP (op, 0));
6189 if (part_mode == VOIDmode)
6190 part_mode = GET_MODE_INNER (GET_MODE (op));
6191 part_size = GET_MODE_SIZE (part_mode);
6192 if (byte < part_size)
6193 {
6194 part = XEXP (op, 0);
6195 final_offset = byte;
6196 }
6197 else
6198 {
6199 part = XEXP (op, 1);
6200 final_offset = byte - part_size;
6201 }
6202
6203 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
6204 return NULL_RTX;
6205
6206 part_mode = GET_MODE (part);
6207 if (part_mode == VOIDmode)
6208 part_mode = GET_MODE_INNER (GET_MODE (op));
6209 res = simplify_subreg (outermode, part, part_mode, final_offset);
6210 if (res)
6211 return res;
6212 if (validate_subreg (outermode, part_mode, part, final_offset))
6213 return gen_rtx_SUBREG (outermode, part, final_offset);
6214 return NULL_RTX;
6215 }
6216
6217 /* A SUBREG resulting from a zero extension may fold to zero if
6218 it extracts higher bits that the ZERO_EXTEND's source bits. */
6219 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
6220 {
6221 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
6222 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
6223 return CONST0_RTX (outermode);
6224 }
6225
6226 if (SCALAR_INT_MODE_P (outermode)
6227 && SCALAR_INT_MODE_P (innermode)
6228 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
6229 && byte == subreg_lowpart_offset (outermode, innermode))
6230 {
6231 rtx tem = simplify_truncation (outermode, op, innermode);
6232 if (tem)
6233 return tem;
6234 }
6235
6236 return NULL_RTX;
6237 }
6238
6239 /* Make a SUBREG operation or equivalent if it folds. */
6240
6241 rtx
6242 simplify_gen_subreg (machine_mode outermode, rtx op,
6243 machine_mode innermode, unsigned int byte)
6244 {
6245 rtx newx;
6246
6247 newx = simplify_subreg (outermode, op, innermode, byte);
6248 if (newx)
6249 return newx;
6250
6251 if (GET_CODE (op) == SUBREG
6252 || GET_CODE (op) == CONCAT
6253 || GET_MODE (op) == VOIDmode)
6254 return NULL_RTX;
6255
6256 if (validate_subreg (outermode, innermode, op, byte))
6257 return gen_rtx_SUBREG (outermode, op, byte);
6258
6259 return NULL_RTX;
6260 }
6261
6262 /* Generates a subreg to get the least significant part of EXPR (in mode
6263 INNER_MODE) to OUTER_MODE. */
6264
6265 rtx
6266 lowpart_subreg (machine_mode outer_mode, rtx expr,
6267 machine_mode inner_mode)
6268 {
6269 return simplify_gen_subreg (outer_mode, expr, inner_mode,
6270 subreg_lowpart_offset (outer_mode, inner_mode));
6271 }
6272
6273 /* Simplify X, an rtx expression.
6274
6275 Return the simplified expression or NULL if no simplifications
6276 were possible.
6277
6278 This is the preferred entry point into the simplification routines;
6279 however, we still allow passes to call the more specific routines.
6280
6281 Right now GCC has three (yes, three) major bodies of RTL simplification
6282 code that need to be unified.
6283
6284 1. fold_rtx in cse.c. This code uses various CSE specific
6285 information to aid in RTL simplification.
6286
6287 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6288 it uses combine specific information to aid in RTL
6289 simplification.
6290
6291 3. The routines in this file.
6292
6293
6294 Long term we want to only have one body of simplification code; to
6295 get to that state I recommend the following steps:
6296
6297 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6298 which are not pass dependent state into these routines.
6299
6300 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6301 use this routine whenever possible.
6302
6303 3. Allow for pass dependent state to be provided to these
6304 routines and add simplifications based on the pass dependent
6305 state. Remove code from cse.c & combine.c that becomes
6306 redundant/dead.
6307
6308 It will take time, but ultimately the compiler will be easier to
6309 maintain and improve. It's totally silly that when we add a
6310 simplification that it needs to be added to 4 places (3 for RTL
6311 simplification and 1 for tree simplification. */
6312
6313 rtx
6314 simplify_rtx (const_rtx x)
6315 {
6316 const enum rtx_code code = GET_CODE (x);
6317 const machine_mode mode = GET_MODE (x);
6318
6319 switch (GET_RTX_CLASS (code))
6320 {
6321 case RTX_UNARY:
6322 return simplify_unary_operation (code, mode,
6323 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6324 case RTX_COMM_ARITH:
6325 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6326 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6327
6328 /* Fall through. */
6329
6330 case RTX_BIN_ARITH:
6331 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6332
6333 case RTX_TERNARY:
6334 case RTX_BITFIELD_OPS:
6335 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6336 XEXP (x, 0), XEXP (x, 1),
6337 XEXP (x, 2));
6338
6339 case RTX_COMPARE:
6340 case RTX_COMM_COMPARE:
6341 return simplify_relational_operation (code, mode,
6342 ((GET_MODE (XEXP (x, 0))
6343 != VOIDmode)
6344 ? GET_MODE (XEXP (x, 0))
6345 : GET_MODE (XEXP (x, 1))),
6346 XEXP (x, 0),
6347 XEXP (x, 1));
6348
6349 case RTX_EXTRA:
6350 if (code == SUBREG)
6351 return simplify_subreg (mode, SUBREG_REG (x),
6352 GET_MODE (SUBREG_REG (x)),
6353 SUBREG_BYTE (x));
6354 break;
6355
6356 case RTX_OBJ:
6357 if (code == LO_SUM)
6358 {
6359 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6360 if (GET_CODE (XEXP (x, 0)) == HIGH
6361 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6362 return XEXP (x, 1);
6363 }
6364 break;
6365
6366 default:
6367 break;
6368 }
6369 return NULL;
6370 }