Plug a memory leak.
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000 Free Software Foundation, Inc.
4
5 This file is part of GNU CC.
6
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
11
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include <setjmp.h>
26
27 #include "rtl.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "flags.h"
32 #include "real.h"
33 #include "insn-config.h"
34 #include "recog.h"
35 #include "function.h"
36 #include "expr.h"
37 #include "toplev.h"
38 #include "output.h"
39 #include "ggc.h"
40 #include "obstack.h"
41 #include "hashtab.h"
42 #include "cselib.h"
43
44 /* Simplification and canonicalization of RTL. */
45
46 /* Nonzero if X has the form (PLUS frame-pointer integer). We check for
47 virtual regs here because the simplify_*_operation routines are called
48 by integrate.c, which is called before virtual register instantiation.
49
50 ?!? FIXED_BASE_PLUS_P and NONZERO_BASE_PLUS_P need to move into
51 a header file so that their definitions can be shared with the
52 simplification routines in simplify-rtx.c. Until then, do not
53 change these macros without also changing the copy in simplify-rtx.c. */
54
55 #define FIXED_BASE_PLUS_P(X) \
56 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
57 || ((X) == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])\
58 || (X) == virtual_stack_vars_rtx \
59 || (X) == virtual_incoming_args_rtx \
60 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
61 && (XEXP (X, 0) == frame_pointer_rtx \
62 || XEXP (X, 0) == hard_frame_pointer_rtx \
63 || ((X) == arg_pointer_rtx \
64 && fixed_regs[ARG_POINTER_REGNUM]) \
65 || XEXP (X, 0) == virtual_stack_vars_rtx \
66 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
67 || GET_CODE (X) == ADDRESSOF)
68
69 /* Similar, but also allows reference to the stack pointer.
70
71 This used to include FIXED_BASE_PLUS_P, however, we can't assume that
72 arg_pointer_rtx by itself is nonzero, because on at least one machine,
73 the i960, the arg pointer is zero when it is unused. */
74
75 #define NONZERO_BASE_PLUS_P(X) \
76 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
77 || (X) == virtual_stack_vars_rtx \
78 || (X) == virtual_incoming_args_rtx \
79 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
80 && (XEXP (X, 0) == frame_pointer_rtx \
81 || XEXP (X, 0) == hard_frame_pointer_rtx \
82 || ((X) == arg_pointer_rtx \
83 && fixed_regs[ARG_POINTER_REGNUM]) \
84 || XEXP (X, 0) == virtual_stack_vars_rtx \
85 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
86 || (X) == stack_pointer_rtx \
87 || (X) == virtual_stack_dynamic_rtx \
88 || (X) == virtual_outgoing_args_rtx \
89 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
90 && (XEXP (X, 0) == stack_pointer_rtx \
91 || XEXP (X, 0) == virtual_stack_dynamic_rtx \
92 || XEXP (X, 0) == virtual_outgoing_args_rtx)) \
93 || GET_CODE (X) == ADDRESSOF)
94
95 /* Much code operates on (low, high) pairs; the low value is an
96 unsigned wide int, the high value a signed wide int. We
97 occasionally need to sign extend from low to high as if low were a
98 signed wide int. */
99 #define HWI_SIGN_EXTEND(low) \
100 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
101
102 static rtx simplify_plus_minus PARAMS ((enum rtx_code,
103 enum machine_mode, rtx, rtx));
104 static void check_fold_consts PARAMS ((PTR));
105 static int entry_and_rtx_equal_p PARAMS ((const void *, const void *));
106 static unsigned int get_value_hash PARAMS ((const void *));
107 static struct elt_list *new_elt_list PARAMS ((struct elt_list *,
108 cselib_val *));
109 static struct elt_loc_list *new_elt_loc_list PARAMS ((struct elt_loc_list *,
110 rtx));
111 static void unchain_one_value PARAMS ((cselib_val *));
112 static void unchain_one_elt_list PARAMS ((struct elt_list **));
113 static void unchain_one_elt_loc_list PARAMS ((struct elt_loc_list **));
114 static void clear_table PARAMS ((int));
115 static int discard_useless_locs PARAMS ((void **, void *));
116 static int discard_useless_values PARAMS ((void **, void *));
117 static void remove_useless_values PARAMS ((void));
118 static rtx wrap_constant PARAMS ((enum machine_mode, rtx));
119 static unsigned int hash_rtx PARAMS ((rtx, enum machine_mode, int));
120 static cselib_val *new_cselib_val PARAMS ((unsigned int,
121 enum machine_mode));
122 static void add_mem_for_addr PARAMS ((cselib_val *, cselib_val *,
123 rtx));
124 static cselib_val *cselib_lookup_mem PARAMS ((rtx, int));
125 static rtx cselib_subst_to_values PARAMS ((rtx));
126 static void cselib_invalidate_regno PARAMS ((unsigned int,
127 enum machine_mode));
128 static int cselib_mem_conflict_p PARAMS ((rtx, rtx));
129 static int cselib_invalidate_mem_1 PARAMS ((void **, void *));
130 static void cselib_invalidate_mem PARAMS ((rtx));
131 static void cselib_invalidate_rtx PARAMS ((rtx, rtx, void *));
132 static void cselib_record_set PARAMS ((rtx, cselib_val *,
133 cselib_val *));
134 static void cselib_record_sets PARAMS ((rtx));
135
136 /* There are three ways in which cselib can look up an rtx:
137 - for a REG, the reg_values table (which is indexed by regno) is used
138 - for a MEM, we recursively look up its address and then follow the
139 addr_list of that value
140 - for everything else, we compute a hash value and go through the hash
141 table. Since different rtx's can still have the same hash value,
142 this involves walking the table entries for a given value and comparing
143 the locations of the entries with the rtx we are looking up. */
144
145 /* A table that enables us to look up elts by their value. */
146 static htab_t hash_table;
147
148 /* This is a global so we don't have to pass this through every function.
149 It is used in new_elt_loc_list to set SETTING_INSN. */
150 static rtx cselib_current_insn;
151
152 /* Every new unknown value gets a unique number. */
153 static unsigned int next_unknown_value;
154
155 /* The number of registers we had when the varrays were last resized. */
156 static unsigned int cselib_nregs;
157
158 /* Count values without known locations. Whenever this grows too big, we
159 remove these useless values from the table. */
160 static int n_useless_values;
161
162 /* Number of useless values before we remove them from the hash table. */
163 #define MAX_USELESS_VALUES 32
164
165 /* This table maps from register number to values. It does not contain
166 pointers to cselib_val structures, but rather elt_lists. The purpose is
167 to be able to refer to the same register in different modes. */
168 static varray_type reg_values;
169 #define REG_VALUES(I) VARRAY_ELT_LIST (reg_values, (I))
170
171 /* Here the set of indices I with REG_VALUES(I) != 0 is saved. This is used
172 in clear_table() for fast emptying. */
173 static varray_type used_regs;
174
175 /* We pass this to cselib_invalidate_mem to invalidate all of
176 memory for a non-const call instruction. */
177 static rtx callmem;
178
179 /* Memory for our structures is allocated from this obstack. */
180 static struct obstack cselib_obstack;
181
182 /* Used to quickly free all memory. */
183 static char *cselib_startobj;
184
185 /* Caches for unused structures. */
186 static cselib_val *empty_vals;
187 static struct elt_list *empty_elt_lists;
188 static struct elt_loc_list *empty_elt_loc_lists;
189
190 /* Set by discard_useless_locs if it deleted the last location of any
191 value. */
192 static int values_became_useless;
193 \f
194 /* Make a binary operation by properly ordering the operands and
195 seeing if the expression folds. */
196
197 rtx
198 simplify_gen_binary (code, mode, op0, op1)
199 enum rtx_code code;
200 enum machine_mode mode;
201 rtx op0, op1;
202 {
203 rtx tem;
204
205 /* Put complex operands first and constants second if commutative. */
206 if (GET_RTX_CLASS (code) == 'c'
207 && ((CONSTANT_P (op0) && GET_CODE (op1) != CONST_INT)
208 || (GET_RTX_CLASS (GET_CODE (op0)) == 'o'
209 && GET_RTX_CLASS (GET_CODE (op1)) != 'o')
210 || (GET_CODE (op0) == SUBREG
211 && GET_RTX_CLASS (GET_CODE (SUBREG_REG (op0))) == 'o'
212 && GET_RTX_CLASS (GET_CODE (op1)) != 'o')))
213 tem = op0, op0 = op1, op1 = tem;
214
215 /* If this simplifies, do it. */
216 tem = simplify_binary_operation (code, mode, op0, op1);
217
218 if (tem)
219 return tem;
220
221 /* Handle addition and subtraction of CONST_INT specially. Otherwise,
222 just form the operation. */
223
224 if (code == PLUS && GET_CODE (op1) == CONST_INT
225 && GET_MODE (op0) != VOIDmode)
226 return plus_constant (op0, INTVAL (op1));
227 else if (code == MINUS && GET_CODE (op1) == CONST_INT
228 && GET_MODE (op0) != VOIDmode)
229 return plus_constant (op0, - INTVAL (op1));
230 else
231 return gen_rtx_fmt_ee (code, mode, op0, op1);
232 }
233 \f
234 /* Try to simplify a unary operation CODE whose output mode is to be
235 MODE with input operand OP whose mode was originally OP_MODE.
236 Return zero if no simplification can be made. */
237
238 rtx
239 simplify_unary_operation (code, mode, op, op_mode)
240 enum rtx_code code;
241 enum machine_mode mode;
242 rtx op;
243 enum machine_mode op_mode;
244 {
245 unsigned int width = GET_MODE_BITSIZE (mode);
246
247 /* The order of these tests is critical so that, for example, we don't
248 check the wrong mode (input vs. output) for a conversion operation,
249 such as FIX. At some point, this should be simplified. */
250
251 #if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC)
252
253 if (code == FLOAT && GET_MODE (op) == VOIDmode
254 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
255 {
256 HOST_WIDE_INT hv, lv;
257 REAL_VALUE_TYPE d;
258
259 if (GET_CODE (op) == CONST_INT)
260 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
261 else
262 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
263
264 #ifdef REAL_ARITHMETIC
265 REAL_VALUE_FROM_INT (d, lv, hv, mode);
266 #else
267 if (hv < 0)
268 {
269 d = (double) (~ hv);
270 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
271 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
272 d += (double) (unsigned HOST_WIDE_INT) (~ lv);
273 d = (- d - 1.0);
274 }
275 else
276 {
277 d = (double) hv;
278 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
279 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
280 d += (double) (unsigned HOST_WIDE_INT) lv;
281 }
282 #endif /* REAL_ARITHMETIC */
283 d = real_value_truncate (mode, d);
284 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
285 }
286 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
287 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
288 {
289 HOST_WIDE_INT hv, lv;
290 REAL_VALUE_TYPE d;
291
292 if (GET_CODE (op) == CONST_INT)
293 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
294 else
295 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
296
297 if (op_mode == VOIDmode)
298 {
299 /* We don't know how to interpret negative-looking numbers in
300 this case, so don't try to fold those. */
301 if (hv < 0)
302 return 0;
303 }
304 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
305 ;
306 else
307 hv = 0, lv &= GET_MODE_MASK (op_mode);
308
309 #ifdef REAL_ARITHMETIC
310 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
311 #else
312
313 d = (double) (unsigned HOST_WIDE_INT) hv;
314 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
315 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
316 d += (double) (unsigned HOST_WIDE_INT) lv;
317 #endif /* REAL_ARITHMETIC */
318 d = real_value_truncate (mode, d);
319 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
320 }
321 #endif
322
323 if (GET_CODE (op) == CONST_INT
324 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
325 {
326 register HOST_WIDE_INT arg0 = INTVAL (op);
327 register HOST_WIDE_INT val;
328
329 switch (code)
330 {
331 case NOT:
332 val = ~ arg0;
333 break;
334
335 case NEG:
336 val = - arg0;
337 break;
338
339 case ABS:
340 val = (arg0 >= 0 ? arg0 : - arg0);
341 break;
342
343 case FFS:
344 /* Don't use ffs here. Instead, get low order bit and then its
345 number. If arg0 is zero, this will return 0, as desired. */
346 arg0 &= GET_MODE_MASK (mode);
347 val = exact_log2 (arg0 & (- arg0)) + 1;
348 break;
349
350 case TRUNCATE:
351 val = arg0;
352 break;
353
354 case ZERO_EXTEND:
355 if (op_mode == VOIDmode)
356 op_mode = mode;
357 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
358 {
359 /* If we were really extending the mode,
360 we would have to distinguish between zero-extension
361 and sign-extension. */
362 if (width != GET_MODE_BITSIZE (op_mode))
363 abort ();
364 val = arg0;
365 }
366 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
367 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
368 else
369 return 0;
370 break;
371
372 case SIGN_EXTEND:
373 if (op_mode == VOIDmode)
374 op_mode = mode;
375 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
376 {
377 /* If we were really extending the mode,
378 we would have to distinguish between zero-extension
379 and sign-extension. */
380 if (width != GET_MODE_BITSIZE (op_mode))
381 abort ();
382 val = arg0;
383 }
384 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
385 {
386 val
387 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
388 if (val
389 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
390 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
391 }
392 else
393 return 0;
394 break;
395
396 case SQRT:
397 case FLOAT_EXTEND:
398 case FLOAT_TRUNCATE:
399 return 0;
400
401 default:
402 abort ();
403 }
404
405 val = trunc_int_for_mode (val, mode);
406
407 return GEN_INT (val);
408 }
409
410 /* We can do some operations on integer CONST_DOUBLEs. Also allow
411 for a DImode operation on a CONST_INT. */
412 else if (GET_MODE (op) == VOIDmode && width <= HOST_BITS_PER_INT * 2
413 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
414 {
415 unsigned HOST_WIDE_INT l1, lv;
416 HOST_WIDE_INT h1, hv;
417
418 if (GET_CODE (op) == CONST_DOUBLE)
419 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
420 else
421 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
422
423 switch (code)
424 {
425 case NOT:
426 lv = ~ l1;
427 hv = ~ h1;
428 break;
429
430 case NEG:
431 neg_double (l1, h1, &lv, &hv);
432 break;
433
434 case ABS:
435 if (h1 < 0)
436 neg_double (l1, h1, &lv, &hv);
437 else
438 lv = l1, hv = h1;
439 break;
440
441 case FFS:
442 hv = 0;
443 if (l1 == 0)
444 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & (-h1)) + 1;
445 else
446 lv = exact_log2 (l1 & (-l1)) + 1;
447 break;
448
449 case TRUNCATE:
450 /* This is just a change-of-mode, so do nothing. */
451 lv = l1, hv = h1;
452 break;
453
454 case ZERO_EXTEND:
455 if (op_mode == VOIDmode
456 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
457 return 0;
458
459 hv = 0;
460 lv = l1 & GET_MODE_MASK (op_mode);
461 break;
462
463 case SIGN_EXTEND:
464 if (op_mode == VOIDmode
465 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
466 return 0;
467 else
468 {
469 lv = l1 & GET_MODE_MASK (op_mode);
470 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
471 && (lv & ((HOST_WIDE_INT) 1
472 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
473 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
474
475 hv = HWI_SIGN_EXTEND (lv);
476 }
477 break;
478
479 case SQRT:
480 return 0;
481
482 default:
483 return 0;
484 }
485
486 return immed_double_const (lv, hv, mode);
487 }
488
489 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
490 else if (GET_CODE (op) == CONST_DOUBLE
491 && GET_MODE_CLASS (mode) == MODE_FLOAT)
492 {
493 REAL_VALUE_TYPE d;
494 jmp_buf handler;
495 rtx x;
496
497 if (setjmp (handler))
498 /* There used to be a warning here, but that is inadvisable.
499 People may want to cause traps, and the natural way
500 to do it should not get a warning. */
501 return 0;
502
503 set_float_handler (handler);
504
505 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
506
507 switch (code)
508 {
509 case NEG:
510 d = REAL_VALUE_NEGATE (d);
511 break;
512
513 case ABS:
514 if (REAL_VALUE_NEGATIVE (d))
515 d = REAL_VALUE_NEGATE (d);
516 break;
517
518 case FLOAT_TRUNCATE:
519 d = real_value_truncate (mode, d);
520 break;
521
522 case FLOAT_EXTEND:
523 /* All this does is change the mode. */
524 break;
525
526 case FIX:
527 d = REAL_VALUE_RNDZINT (d);
528 break;
529
530 case UNSIGNED_FIX:
531 d = REAL_VALUE_UNSIGNED_RNDZINT (d);
532 break;
533
534 case SQRT:
535 return 0;
536
537 default:
538 abort ();
539 }
540
541 x = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
542 set_float_handler (NULL_PTR);
543 return x;
544 }
545
546 else if (GET_CODE (op) == CONST_DOUBLE
547 && GET_MODE_CLASS (GET_MODE (op)) == MODE_FLOAT
548 && GET_MODE_CLASS (mode) == MODE_INT
549 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
550 {
551 REAL_VALUE_TYPE d;
552 jmp_buf handler;
553 HOST_WIDE_INT val;
554
555 if (setjmp (handler))
556 return 0;
557
558 set_float_handler (handler);
559
560 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
561
562 switch (code)
563 {
564 case FIX:
565 val = REAL_VALUE_FIX (d);
566 break;
567
568 case UNSIGNED_FIX:
569 val = REAL_VALUE_UNSIGNED_FIX (d);
570 break;
571
572 default:
573 abort ();
574 }
575
576 set_float_handler (NULL_PTR);
577
578 val = trunc_int_for_mode (val, mode);
579
580 return GEN_INT (val);
581 }
582 #endif
583 /* This was formerly used only for non-IEEE float.
584 eggert@twinsun.com says it is safe for IEEE also. */
585 else
586 {
587 /* There are some simplifications we can do even if the operands
588 aren't constant. */
589 switch (code)
590 {
591 case NOT:
592 /* (not (not X)) == X. */
593 if (GET_CODE (op) == NOT)
594 return XEXP (op, 0);
595
596 /* (not (eq X Y)) == (ne X Y), etc. */
597 if (mode == BImode && GET_RTX_CLASS (GET_CODE (op)) == '<'
598 && can_reverse_comparison_p (op, NULL_RTX))
599 return gen_rtx_fmt_ee (reverse_condition (GET_CODE (op)),
600 op_mode, XEXP (op, 0), XEXP (op, 1));
601 break;
602
603 case NEG:
604 /* (neg (neg X)) == X. */
605 if (GET_CODE (op) == NEG)
606 return XEXP (op, 0);
607 break;
608
609 case SIGN_EXTEND:
610 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
611 becomes just the MINUS if its mode is MODE. This allows
612 folding switch statements on machines using casesi (such as
613 the Vax). */
614 if (GET_CODE (op) == TRUNCATE
615 && GET_MODE (XEXP (op, 0)) == mode
616 && GET_CODE (XEXP (op, 0)) == MINUS
617 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
618 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
619 return XEXP (op, 0);
620
621 #ifdef POINTERS_EXTEND_UNSIGNED
622 if (! POINTERS_EXTEND_UNSIGNED
623 && mode == Pmode && GET_MODE (op) == ptr_mode
624 && CONSTANT_P (op))
625 return convert_memory_address (Pmode, op);
626 #endif
627 break;
628
629 #ifdef POINTERS_EXTEND_UNSIGNED
630 case ZERO_EXTEND:
631 if (POINTERS_EXTEND_UNSIGNED
632 && mode == Pmode && GET_MODE (op) == ptr_mode
633 && CONSTANT_P (op))
634 return convert_memory_address (Pmode, op);
635 break;
636 #endif
637
638 default:
639 break;
640 }
641
642 return 0;
643 }
644 }
645 \f
646 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
647 and OP1. Return 0 if no simplification is possible.
648
649 Don't use this for relational operations such as EQ or LT.
650 Use simplify_relational_operation instead. */
651
652 rtx
653 simplify_binary_operation (code, mode, op0, op1)
654 enum rtx_code code;
655 enum machine_mode mode;
656 rtx op0, op1;
657 {
658 register HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
659 HOST_WIDE_INT val;
660 unsigned int width = GET_MODE_BITSIZE (mode);
661 rtx tem;
662
663 /* Relational operations don't work here. We must know the mode
664 of the operands in order to do the comparison correctly.
665 Assuming a full word can give incorrect results.
666 Consider comparing 128 with -128 in QImode. */
667
668 if (GET_RTX_CLASS (code) == '<')
669 abort ();
670
671 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
672 if (GET_MODE_CLASS (mode) == MODE_FLOAT
673 && GET_CODE (op0) == CONST_DOUBLE && GET_CODE (op1) == CONST_DOUBLE
674 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
675 {
676 REAL_VALUE_TYPE f0, f1, value;
677 jmp_buf handler;
678
679 if (setjmp (handler))
680 return 0;
681
682 set_float_handler (handler);
683
684 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
685 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
686 f0 = real_value_truncate (mode, f0);
687 f1 = real_value_truncate (mode, f1);
688
689 #ifdef REAL_ARITHMETIC
690 #ifndef REAL_INFINITY
691 if (code == DIV && REAL_VALUES_EQUAL (f1, dconst0))
692 return 0;
693 #endif
694 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
695 #else
696 switch (code)
697 {
698 case PLUS:
699 value = f0 + f1;
700 break;
701 case MINUS:
702 value = f0 - f1;
703 break;
704 case MULT:
705 value = f0 * f1;
706 break;
707 case DIV:
708 #ifndef REAL_INFINITY
709 if (f1 == 0)
710 return 0;
711 #endif
712 value = f0 / f1;
713 break;
714 case SMIN:
715 value = MIN (f0, f1);
716 break;
717 case SMAX:
718 value = MAX (f0, f1);
719 break;
720 default:
721 abort ();
722 }
723 #endif
724
725 value = real_value_truncate (mode, value);
726 set_float_handler (NULL_PTR);
727 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
728 }
729 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
730
731 /* We can fold some multi-word operations. */
732 if (GET_MODE_CLASS (mode) == MODE_INT
733 && width == HOST_BITS_PER_WIDE_INT * 2
734 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
735 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
736 {
737 unsigned HOST_WIDE_INT l1, l2, lv;
738 HOST_WIDE_INT h1, h2, hv;
739
740 if (GET_CODE (op0) == CONST_DOUBLE)
741 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
742 else
743 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
744
745 if (GET_CODE (op1) == CONST_DOUBLE)
746 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
747 else
748 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
749
750 switch (code)
751 {
752 case MINUS:
753 /* A - B == A + (-B). */
754 neg_double (l2, h2, &lv, &hv);
755 l2 = lv, h2 = hv;
756
757 /* .. fall through ... */
758
759 case PLUS:
760 add_double (l1, h1, l2, h2, &lv, &hv);
761 break;
762
763 case MULT:
764 mul_double (l1, h1, l2, h2, &lv, &hv);
765 break;
766
767 case DIV: case MOD: case UDIV: case UMOD:
768 /* We'd need to include tree.h to do this and it doesn't seem worth
769 it. */
770 return 0;
771
772 case AND:
773 lv = l1 & l2, hv = h1 & h2;
774 break;
775
776 case IOR:
777 lv = l1 | l2, hv = h1 | h2;
778 break;
779
780 case XOR:
781 lv = l1 ^ l2, hv = h1 ^ h2;
782 break;
783
784 case SMIN:
785 if (h1 < h2
786 || (h1 == h2
787 && ((unsigned HOST_WIDE_INT) l1
788 < (unsigned HOST_WIDE_INT) l2)))
789 lv = l1, hv = h1;
790 else
791 lv = l2, hv = h2;
792 break;
793
794 case SMAX:
795 if (h1 > h2
796 || (h1 == h2
797 && ((unsigned HOST_WIDE_INT) l1
798 > (unsigned HOST_WIDE_INT) l2)))
799 lv = l1, hv = h1;
800 else
801 lv = l2, hv = h2;
802 break;
803
804 case UMIN:
805 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
806 || (h1 == h2
807 && ((unsigned HOST_WIDE_INT) l1
808 < (unsigned HOST_WIDE_INT) l2)))
809 lv = l1, hv = h1;
810 else
811 lv = l2, hv = h2;
812 break;
813
814 case UMAX:
815 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
816 || (h1 == h2
817 && ((unsigned HOST_WIDE_INT) l1
818 > (unsigned HOST_WIDE_INT) l2)))
819 lv = l1, hv = h1;
820 else
821 lv = l2, hv = h2;
822 break;
823
824 case LSHIFTRT: case ASHIFTRT:
825 case ASHIFT:
826 case ROTATE: case ROTATERT:
827 #ifdef SHIFT_COUNT_TRUNCATED
828 if (SHIFT_COUNT_TRUNCATED)
829 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
830 #endif
831
832 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
833 return 0;
834
835 if (code == LSHIFTRT || code == ASHIFTRT)
836 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
837 code == ASHIFTRT);
838 else if (code == ASHIFT)
839 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
840 else if (code == ROTATE)
841 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
842 else /* code == ROTATERT */
843 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
844 break;
845
846 default:
847 return 0;
848 }
849
850 return immed_double_const (lv, hv, mode);
851 }
852
853 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
854 || width > HOST_BITS_PER_WIDE_INT || width == 0)
855 {
856 /* Even if we can't compute a constant result,
857 there are some cases worth simplifying. */
858
859 switch (code)
860 {
861 case PLUS:
862 /* In IEEE floating point, x+0 is not the same as x. Similarly
863 for the other optimizations below. */
864 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
865 && FLOAT_MODE_P (mode) && ! flag_fast_math)
866 break;
867
868 if (op1 == CONST0_RTX (mode))
869 return op0;
870
871 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)) */
872 if (GET_CODE (op0) == NEG)
873 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
874 else if (GET_CODE (op1) == NEG)
875 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
876
877 /* Handle both-operands-constant cases. We can only add
878 CONST_INTs to constants since the sum of relocatable symbols
879 can't be handled by most assemblers. Don't add CONST_INT
880 to CONST_INT since overflow won't be computed properly if wider
881 than HOST_BITS_PER_WIDE_INT. */
882
883 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
884 && GET_CODE (op1) == CONST_INT)
885 return plus_constant (op0, INTVAL (op1));
886 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
887 && GET_CODE (op0) == CONST_INT)
888 return plus_constant (op1, INTVAL (op0));
889
890 /* See if this is something like X * C - X or vice versa or
891 if the multiplication is written as a shift. If so, we can
892 distribute and make a new multiply, shift, or maybe just
893 have X (if C is 2 in the example above). But don't make
894 real multiply if we didn't have one before. */
895
896 if (! FLOAT_MODE_P (mode))
897 {
898 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
899 rtx lhs = op0, rhs = op1;
900 int had_mult = 0;
901
902 if (GET_CODE (lhs) == NEG)
903 coeff0 = -1, lhs = XEXP (lhs, 0);
904 else if (GET_CODE (lhs) == MULT
905 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
906 {
907 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
908 had_mult = 1;
909 }
910 else if (GET_CODE (lhs) == ASHIFT
911 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
912 && INTVAL (XEXP (lhs, 1)) >= 0
913 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
914 {
915 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
916 lhs = XEXP (lhs, 0);
917 }
918
919 if (GET_CODE (rhs) == NEG)
920 coeff1 = -1, rhs = XEXP (rhs, 0);
921 else if (GET_CODE (rhs) == MULT
922 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
923 {
924 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
925 had_mult = 1;
926 }
927 else if (GET_CODE (rhs) == ASHIFT
928 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
929 && INTVAL (XEXP (rhs, 1)) >= 0
930 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
931 {
932 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
933 rhs = XEXP (rhs, 0);
934 }
935
936 if (rtx_equal_p (lhs, rhs))
937 {
938 tem = simplify_gen_binary (MULT, mode, lhs,
939 GEN_INT (coeff0 + coeff1));
940 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
941 }
942 }
943
944 /* If one of the operands is a PLUS or a MINUS, see if we can
945 simplify this by the associative law.
946 Don't use the associative law for floating point.
947 The inaccuracy makes it nonassociative,
948 and subtle programs can break if operations are associated. */
949
950 if (INTEGRAL_MODE_P (mode)
951 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
952 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS)
953 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
954 return tem;
955 break;
956
957 case COMPARE:
958 #ifdef HAVE_cc0
959 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
960 using cc0, in which case we want to leave it as a COMPARE
961 so we can distinguish it from a register-register-copy.
962
963 In IEEE floating point, x-0 is not the same as x. */
964
965 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
966 || ! FLOAT_MODE_P (mode) || flag_fast_math)
967 && op1 == CONST0_RTX (mode))
968 return op0;
969 #endif
970
971 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
972 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
973 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
974 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
975 {
976 rtx xop00 = XEXP (op0, 0);
977 rtx xop10 = XEXP (op1, 0);
978
979 #ifdef HAVE_cc0
980 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
981 #else
982 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
983 && GET_MODE (xop00) == GET_MODE (xop10)
984 && REGNO (xop00) == REGNO (xop10)
985 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
986 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
987 #endif
988 return xop00;
989 }
990
991 break;
992 case MINUS:
993 /* None of these optimizations can be done for IEEE
994 floating point. */
995 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
996 && FLOAT_MODE_P (mode) && ! flag_fast_math)
997 break;
998
999 /* We can't assume x-x is 0 even with non-IEEE floating point,
1000 but since it is zero except in very strange circumstances, we
1001 will treat it as zero with -ffast-math. */
1002 if (rtx_equal_p (op0, op1)
1003 && ! side_effects_p (op0)
1004 && (! FLOAT_MODE_P (mode) || flag_fast_math))
1005 return CONST0_RTX (mode);
1006
1007 /* Change subtraction from zero into negation. */
1008 if (op0 == CONST0_RTX (mode))
1009 return gen_rtx_NEG (mode, op1);
1010
1011 /* (-1 - a) is ~a. */
1012 if (op0 == constm1_rtx)
1013 return gen_rtx_NOT (mode, op1);
1014
1015 /* Subtracting 0 has no effect. */
1016 if (op1 == CONST0_RTX (mode))
1017 return op0;
1018
1019 /* See if this is something like X * C - X or vice versa or
1020 if the multiplication is written as a shift. If so, we can
1021 distribute and make a new multiply, shift, or maybe just
1022 have X (if C is 2 in the example above). But don't make
1023 real multiply if we didn't have one before. */
1024
1025 if (! FLOAT_MODE_P (mode))
1026 {
1027 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1028 rtx lhs = op0, rhs = op1;
1029 int had_mult = 0;
1030
1031 if (GET_CODE (lhs) == NEG)
1032 coeff0 = -1, lhs = XEXP (lhs, 0);
1033 else if (GET_CODE (lhs) == MULT
1034 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1035 {
1036 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1037 had_mult = 1;
1038 }
1039 else if (GET_CODE (lhs) == ASHIFT
1040 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1041 && INTVAL (XEXP (lhs, 1)) >= 0
1042 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1043 {
1044 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1045 lhs = XEXP (lhs, 0);
1046 }
1047
1048 if (GET_CODE (rhs) == NEG)
1049 coeff1 = - 1, rhs = XEXP (rhs, 0);
1050 else if (GET_CODE (rhs) == MULT
1051 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1052 {
1053 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1054 had_mult = 1;
1055 }
1056 else if (GET_CODE (rhs) == ASHIFT
1057 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1058 && INTVAL (XEXP (rhs, 1)) >= 0
1059 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1060 {
1061 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1062 rhs = XEXP (rhs, 0);
1063 }
1064
1065 if (rtx_equal_p (lhs, rhs))
1066 {
1067 tem = simplify_gen_binary (MULT, mode, lhs,
1068 GEN_INT (coeff0 - coeff1));
1069 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1070 }
1071 }
1072
1073 /* (a - (-b)) -> (a + b). */
1074 if (GET_CODE (op1) == NEG)
1075 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1076
1077 /* If one of the operands is a PLUS or a MINUS, see if we can
1078 simplify this by the associative law.
1079 Don't use the associative law for floating point.
1080 The inaccuracy makes it nonassociative,
1081 and subtle programs can break if operations are associated. */
1082
1083 if (INTEGRAL_MODE_P (mode)
1084 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1085 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS)
1086 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1087 return tem;
1088
1089 /* Don't let a relocatable value get a negative coeff. */
1090 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1091 return plus_constant (op0, - INTVAL (op1));
1092
1093 /* (x - (x & y)) -> (x & ~y) */
1094 if (GET_CODE (op1) == AND)
1095 {
1096 if (rtx_equal_p (op0, XEXP (op1, 0)))
1097 return simplify_gen_binary (AND, mode, op0,
1098 gen_rtx_NOT (mode, XEXP (op1, 1)));
1099 if (rtx_equal_p (op0, XEXP (op1, 1)))
1100 return simplify_gen_binary (AND, mode, op0,
1101 gen_rtx_NOT (mode, XEXP (op1, 0)));
1102 }
1103 break;
1104
1105 case MULT:
1106 if (op1 == constm1_rtx)
1107 {
1108 tem = simplify_unary_operation (NEG, mode, op0, mode);
1109
1110 return tem ? tem : gen_rtx_NEG (mode, op0);
1111 }
1112
1113 /* In IEEE floating point, x*0 is not always 0. */
1114 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1115 || ! FLOAT_MODE_P (mode) || flag_fast_math)
1116 && op1 == CONST0_RTX (mode)
1117 && ! side_effects_p (op0))
1118 return op1;
1119
1120 /* In IEEE floating point, x*1 is not equivalent to x for nans.
1121 However, ANSI says we can drop signals,
1122 so we can do this anyway. */
1123 if (op1 == CONST1_RTX (mode))
1124 return op0;
1125
1126 /* Convert multiply by constant power of two into shift unless
1127 we are still generating RTL. This test is a kludge. */
1128 if (GET_CODE (op1) == CONST_INT
1129 && (val = exact_log2 (INTVAL (op1))) >= 0
1130 /* If the mode is larger than the host word size, and the
1131 uppermost bit is set, then this isn't a power of two due
1132 to implicit sign extension. */
1133 && (width <= HOST_BITS_PER_WIDE_INT
1134 || val != HOST_BITS_PER_WIDE_INT - 1)
1135 && ! rtx_equal_function_value_matters)
1136 return gen_rtx_ASHIFT (mode, op0, GEN_INT (val));
1137
1138 if (GET_CODE (op1) == CONST_DOUBLE
1139 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_FLOAT)
1140 {
1141 REAL_VALUE_TYPE d;
1142 jmp_buf handler;
1143 int op1is2, op1ism1;
1144
1145 if (setjmp (handler))
1146 return 0;
1147
1148 set_float_handler (handler);
1149 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
1150 op1is2 = REAL_VALUES_EQUAL (d, dconst2);
1151 op1ism1 = REAL_VALUES_EQUAL (d, dconstm1);
1152 set_float_handler (NULL_PTR);
1153
1154 /* x*2 is x+x and x*(-1) is -x */
1155 if (op1is2 && GET_MODE (op0) == mode)
1156 return gen_rtx_PLUS (mode, op0, copy_rtx (op0));
1157
1158 else if (op1ism1 && GET_MODE (op0) == mode)
1159 return gen_rtx_NEG (mode, op0);
1160 }
1161 break;
1162
1163 case IOR:
1164 if (op1 == const0_rtx)
1165 return op0;
1166 if (GET_CODE (op1) == CONST_INT
1167 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
1168 return op1;
1169 if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1170 return op0;
1171 /* A | (~A) -> -1 */
1172 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1173 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1174 && ! side_effects_p (op0)
1175 && GET_MODE_CLASS (mode) != MODE_CC)
1176 return constm1_rtx;
1177 break;
1178
1179 case XOR:
1180 if (op1 == const0_rtx)
1181 return op0;
1182 if (GET_CODE (op1) == CONST_INT
1183 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
1184 return gen_rtx_NOT (mode, op0);
1185 if (op0 == op1 && ! side_effects_p (op0)
1186 && GET_MODE_CLASS (mode) != MODE_CC)
1187 return const0_rtx;
1188 break;
1189
1190 case AND:
1191 if (op1 == const0_rtx && ! side_effects_p (op0))
1192 return const0_rtx;
1193 if (GET_CODE (op1) == CONST_INT
1194 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
1195 return op0;
1196 if (op0 == op1 && ! side_effects_p (op0)
1197 && GET_MODE_CLASS (mode) != MODE_CC)
1198 return op0;
1199 /* A & (~A) -> 0 */
1200 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1201 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1202 && ! side_effects_p (op0)
1203 && GET_MODE_CLASS (mode) != MODE_CC)
1204 return const0_rtx;
1205 break;
1206
1207 case UDIV:
1208 /* Convert divide by power of two into shift (divide by 1 handled
1209 below). */
1210 if (GET_CODE (op1) == CONST_INT
1211 && (arg1 = exact_log2 (INTVAL (op1))) > 0)
1212 return gen_rtx_LSHIFTRT (mode, op0, GEN_INT (arg1));
1213
1214 /* ... fall through ... */
1215
1216 case DIV:
1217 if (op1 == CONST1_RTX (mode))
1218 return op0;
1219
1220 /* In IEEE floating point, 0/x is not always 0. */
1221 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1222 || ! FLOAT_MODE_P (mode) || flag_fast_math)
1223 && op0 == CONST0_RTX (mode)
1224 && ! side_effects_p (op1))
1225 return op0;
1226
1227 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1228 /* Change division by a constant into multiplication. Only do
1229 this with -ffast-math until an expert says it is safe in
1230 general. */
1231 else if (GET_CODE (op1) == CONST_DOUBLE
1232 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_FLOAT
1233 && op1 != CONST0_RTX (mode)
1234 && flag_fast_math)
1235 {
1236 REAL_VALUE_TYPE d;
1237 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
1238
1239 if (! REAL_VALUES_EQUAL (d, dconst0))
1240 {
1241 #if defined (REAL_ARITHMETIC)
1242 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1243 return gen_rtx_MULT (mode, op0,
1244 CONST_DOUBLE_FROM_REAL_VALUE (d, mode));
1245 #else
1246 return
1247 gen_rtx_MULT (mode, op0,
1248 CONST_DOUBLE_FROM_REAL_VALUE (1./d, mode));
1249 #endif
1250 }
1251 }
1252 #endif
1253 break;
1254
1255 case UMOD:
1256 /* Handle modulus by power of two (mod with 1 handled below). */
1257 if (GET_CODE (op1) == CONST_INT
1258 && exact_log2 (INTVAL (op1)) > 0)
1259 return gen_rtx_AND (mode, op0, GEN_INT (INTVAL (op1) - 1));
1260
1261 /* ... fall through ... */
1262
1263 case MOD:
1264 if ((op0 == const0_rtx || op1 == const1_rtx)
1265 && ! side_effects_p (op0) && ! side_effects_p (op1))
1266 return const0_rtx;
1267 break;
1268
1269 case ROTATERT:
1270 case ROTATE:
1271 /* Rotating ~0 always results in ~0. */
1272 if (GET_CODE (op0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1273 && (unsigned HOST_WIDE_INT) INTVAL (op0) == GET_MODE_MASK (mode)
1274 && ! side_effects_p (op1))
1275 return op0;
1276
1277 /* ... fall through ... */
1278
1279 case ASHIFT:
1280 case ASHIFTRT:
1281 case LSHIFTRT:
1282 if (op1 == const0_rtx)
1283 return op0;
1284 if (op0 == const0_rtx && ! side_effects_p (op1))
1285 return op0;
1286 break;
1287
1288 case SMIN:
1289 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (op1) == CONST_INT
1290 && INTVAL (op1) == (HOST_WIDE_INT) 1 << (width -1)
1291 && ! side_effects_p (op0))
1292 return op1;
1293 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1294 return op0;
1295 break;
1296
1297 case SMAX:
1298 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (op1) == CONST_INT
1299 && ((unsigned HOST_WIDE_INT) INTVAL (op1)
1300 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1301 && ! side_effects_p (op0))
1302 return op1;
1303 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1304 return op0;
1305 break;
1306
1307 case UMIN:
1308 if (op1 == const0_rtx && ! side_effects_p (op0))
1309 return op1;
1310 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1311 return op0;
1312 break;
1313
1314 case UMAX:
1315 if (op1 == constm1_rtx && ! side_effects_p (op0))
1316 return op1;
1317 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1318 return op0;
1319 break;
1320
1321 default:
1322 abort ();
1323 }
1324
1325 return 0;
1326 }
1327
1328 /* Get the integer argument values in two forms:
1329 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1330
1331 arg0 = INTVAL (op0);
1332 arg1 = INTVAL (op1);
1333
1334 if (width < HOST_BITS_PER_WIDE_INT)
1335 {
1336 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
1337 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
1338
1339 arg0s = arg0;
1340 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1341 arg0s |= ((HOST_WIDE_INT) (-1) << width);
1342
1343 arg1s = arg1;
1344 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1345 arg1s |= ((HOST_WIDE_INT) (-1) << width);
1346 }
1347 else
1348 {
1349 arg0s = arg0;
1350 arg1s = arg1;
1351 }
1352
1353 /* Compute the value of the arithmetic. */
1354
1355 switch (code)
1356 {
1357 case PLUS:
1358 val = arg0s + arg1s;
1359 break;
1360
1361 case MINUS:
1362 val = arg0s - arg1s;
1363 break;
1364
1365 case MULT:
1366 val = arg0s * arg1s;
1367 break;
1368
1369 case DIV:
1370 if (arg1s == 0)
1371 return 0;
1372 val = arg0s / arg1s;
1373 break;
1374
1375 case MOD:
1376 if (arg1s == 0)
1377 return 0;
1378 val = arg0s % arg1s;
1379 break;
1380
1381 case UDIV:
1382 if (arg1 == 0)
1383 return 0;
1384 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
1385 break;
1386
1387 case UMOD:
1388 if (arg1 == 0)
1389 return 0;
1390 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
1391 break;
1392
1393 case AND:
1394 val = arg0 & arg1;
1395 break;
1396
1397 case IOR:
1398 val = arg0 | arg1;
1399 break;
1400
1401 case XOR:
1402 val = arg0 ^ arg1;
1403 break;
1404
1405 case LSHIFTRT:
1406 /* If shift count is undefined, don't fold it; let the machine do
1407 what it wants. But truncate it if the machine will do that. */
1408 if (arg1 < 0)
1409 return 0;
1410
1411 #ifdef SHIFT_COUNT_TRUNCATED
1412 if (SHIFT_COUNT_TRUNCATED)
1413 arg1 %= width;
1414 #endif
1415
1416 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
1417 break;
1418
1419 case ASHIFT:
1420 if (arg1 < 0)
1421 return 0;
1422
1423 #ifdef SHIFT_COUNT_TRUNCATED
1424 if (SHIFT_COUNT_TRUNCATED)
1425 arg1 %= width;
1426 #endif
1427
1428 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
1429 break;
1430
1431 case ASHIFTRT:
1432 if (arg1 < 0)
1433 return 0;
1434
1435 #ifdef SHIFT_COUNT_TRUNCATED
1436 if (SHIFT_COUNT_TRUNCATED)
1437 arg1 %= width;
1438 #endif
1439
1440 val = arg0s >> arg1;
1441
1442 /* Bootstrap compiler may not have sign extended the right shift.
1443 Manually extend the sign to insure bootstrap cc matches gcc. */
1444 if (arg0s < 0 && arg1 > 0)
1445 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
1446
1447 break;
1448
1449 case ROTATERT:
1450 if (arg1 < 0)
1451 return 0;
1452
1453 arg1 %= width;
1454 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
1455 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
1456 break;
1457
1458 case ROTATE:
1459 if (arg1 < 0)
1460 return 0;
1461
1462 arg1 %= width;
1463 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
1464 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
1465 break;
1466
1467 case COMPARE:
1468 /* Do nothing here. */
1469 return 0;
1470
1471 case SMIN:
1472 val = arg0s <= arg1s ? arg0s : arg1s;
1473 break;
1474
1475 case UMIN:
1476 val = ((unsigned HOST_WIDE_INT) arg0
1477 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1478 break;
1479
1480 case SMAX:
1481 val = arg0s > arg1s ? arg0s : arg1s;
1482 break;
1483
1484 case UMAX:
1485 val = ((unsigned HOST_WIDE_INT) arg0
1486 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1487 break;
1488
1489 default:
1490 abort ();
1491 }
1492
1493 val = trunc_int_for_mode (val, mode);
1494
1495 return GEN_INT (val);
1496 }
1497 \f
1498 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1499 PLUS or MINUS.
1500
1501 Rather than test for specific case, we do this by a brute-force method
1502 and do all possible simplifications until no more changes occur. Then
1503 we rebuild the operation. */
1504
1505 static rtx
1506 simplify_plus_minus (code, mode, op0, op1)
1507 enum rtx_code code;
1508 enum machine_mode mode;
1509 rtx op0, op1;
1510 {
1511 rtx ops[8];
1512 int negs[8];
1513 rtx result, tem;
1514 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts = 0;
1515 int first = 1, negate = 0, changed;
1516 int i, j;
1517
1518 memset ((char *) ops, 0, sizeof ops);
1519
1520 /* Set up the two operands and then expand them until nothing has been
1521 changed. If we run out of room in our array, give up; this should
1522 almost never happen. */
1523
1524 ops[0] = op0, ops[1] = op1, negs[0] = 0, negs[1] = (code == MINUS);
1525
1526 changed = 1;
1527 while (changed)
1528 {
1529 changed = 0;
1530
1531 for (i = 0; i < n_ops; i++)
1532 switch (GET_CODE (ops[i]))
1533 {
1534 case PLUS:
1535 case MINUS:
1536 if (n_ops == 7)
1537 return 0;
1538
1539 ops[n_ops] = XEXP (ops[i], 1);
1540 negs[n_ops++] = GET_CODE (ops[i]) == MINUS ? !negs[i] : negs[i];
1541 ops[i] = XEXP (ops[i], 0);
1542 input_ops++;
1543 changed = 1;
1544 break;
1545
1546 case NEG:
1547 ops[i] = XEXP (ops[i], 0);
1548 negs[i] = ! negs[i];
1549 changed = 1;
1550 break;
1551
1552 case CONST:
1553 ops[i] = XEXP (ops[i], 0);
1554 input_consts++;
1555 changed = 1;
1556 break;
1557
1558 case NOT:
1559 /* ~a -> (-a - 1) */
1560 if (n_ops != 7)
1561 {
1562 ops[n_ops] = constm1_rtx;
1563 negs[n_ops++] = negs[i];
1564 ops[i] = XEXP (ops[i], 0);
1565 negs[i] = ! negs[i];
1566 changed = 1;
1567 }
1568 break;
1569
1570 case CONST_INT:
1571 if (negs[i])
1572 ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0, changed = 1;
1573 break;
1574
1575 default:
1576 break;
1577 }
1578 }
1579
1580 /* If we only have two operands, we can't do anything. */
1581 if (n_ops <= 2)
1582 return 0;
1583
1584 /* Now simplify each pair of operands until nothing changes. The first
1585 time through just simplify constants against each other. */
1586
1587 changed = 1;
1588 while (changed)
1589 {
1590 changed = first;
1591
1592 for (i = 0; i < n_ops - 1; i++)
1593 for (j = i + 1; j < n_ops; j++)
1594 if (ops[i] != 0 && ops[j] != 0
1595 && (! first || (CONSTANT_P (ops[i]) && CONSTANT_P (ops[j]))))
1596 {
1597 rtx lhs = ops[i], rhs = ops[j];
1598 enum rtx_code ncode = PLUS;
1599
1600 if (negs[i] && ! negs[j])
1601 lhs = ops[j], rhs = ops[i], ncode = MINUS;
1602 else if (! negs[i] && negs[j])
1603 ncode = MINUS;
1604
1605 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
1606 if (tem)
1607 {
1608 ops[i] = tem, ops[j] = 0;
1609 negs[i] = negs[i] && negs[j];
1610 if (GET_CODE (tem) == NEG)
1611 ops[i] = XEXP (tem, 0), negs[i] = ! negs[i];
1612
1613 if (GET_CODE (ops[i]) == CONST_INT && negs[i])
1614 ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0;
1615 changed = 1;
1616 }
1617 }
1618
1619 first = 0;
1620 }
1621
1622 /* Pack all the operands to the lower-numbered entries and give up if
1623 we didn't reduce the number of operands we had. Make sure we
1624 count a CONST as two operands. If we have the same number of
1625 operands, but have made more CONSTs than we had, this is also
1626 an improvement, so accept it. */
1627
1628 for (i = 0, j = 0; j < n_ops; j++)
1629 if (ops[j] != 0)
1630 {
1631 ops[i] = ops[j], negs[i++] = negs[j];
1632 if (GET_CODE (ops[j]) == CONST)
1633 n_consts++;
1634 }
1635
1636 if (i + n_consts > input_ops
1637 || (i + n_consts == input_ops && n_consts <= input_consts))
1638 return 0;
1639
1640 n_ops = i;
1641
1642 /* If we have a CONST_INT, put it last. */
1643 for (i = 0; i < n_ops - 1; i++)
1644 if (GET_CODE (ops[i]) == CONST_INT)
1645 {
1646 tem = ops[n_ops - 1], ops[n_ops - 1] = ops[i] , ops[i] = tem;
1647 j = negs[n_ops - 1], negs[n_ops - 1] = negs[i], negs[i] = j;
1648 }
1649
1650 /* Put a non-negated operand first. If there aren't any, make all
1651 operands positive and negate the whole thing later. */
1652 for (i = 0; i < n_ops && negs[i]; i++)
1653 ;
1654
1655 if (i == n_ops)
1656 {
1657 for (i = 0; i < n_ops; i++)
1658 negs[i] = 0;
1659 negate = 1;
1660 }
1661 else if (i != 0)
1662 {
1663 tem = ops[0], ops[0] = ops[i], ops[i] = tem;
1664 j = negs[0], negs[0] = negs[i], negs[i] = j;
1665 }
1666
1667 /* Now make the result by performing the requested operations. */
1668 result = ops[0];
1669 for (i = 1; i < n_ops; i++)
1670 result = simplify_gen_binary (negs[i] ? MINUS : PLUS, mode, result, ops[i]);
1671
1672 return negate ? gen_rtx_NEG (mode, result) : result;
1673 }
1674
1675 struct cfc_args
1676 {
1677 rtx op0, op1; /* Input */
1678 int equal, op0lt, op1lt; /* Output */
1679 };
1680
1681 static void
1682 check_fold_consts (data)
1683 PTR data;
1684 {
1685 struct cfc_args *args = (struct cfc_args *) data;
1686 REAL_VALUE_TYPE d0, d1;
1687
1688 REAL_VALUE_FROM_CONST_DOUBLE (d0, args->op0);
1689 REAL_VALUE_FROM_CONST_DOUBLE (d1, args->op1);
1690 args->equal = REAL_VALUES_EQUAL (d0, d1);
1691 args->op0lt = REAL_VALUES_LESS (d0, d1);
1692 args->op1lt = REAL_VALUES_LESS (d1, d0);
1693 }
1694
1695 /* Like simplify_binary_operation except used for relational operators.
1696 MODE is the mode of the operands, not that of the result. If MODE
1697 is VOIDmode, both operands must also be VOIDmode and we compare the
1698 operands in "infinite precision".
1699
1700 If no simplification is possible, this function returns zero. Otherwise,
1701 it returns either const_true_rtx or const0_rtx. */
1702
1703 rtx
1704 simplify_relational_operation (code, mode, op0, op1)
1705 enum rtx_code code;
1706 enum machine_mode mode;
1707 rtx op0, op1;
1708 {
1709 int equal, op0lt, op0ltu, op1lt, op1ltu;
1710 rtx tem;
1711
1712 if (mode == VOIDmode
1713 && (GET_MODE (op0) != VOIDmode
1714 || GET_MODE (op1) != VOIDmode))
1715 abort ();
1716
1717 /* If op0 is a compare, extract the comparison arguments from it. */
1718 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
1719 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
1720
1721 /* We can't simplify MODE_CC values since we don't know what the
1722 actual comparison is. */
1723 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC
1724 #ifdef HAVE_cc0
1725 || op0 == cc0_rtx
1726 #endif
1727 )
1728 return 0;
1729
1730 /* Make sure the constant is second. */
1731 if ((CONSTANT_P (op0) && ! CONSTANT_P (op1))
1732 || (GET_CODE (op0) == CONST_INT && GET_CODE (op1) != CONST_INT))
1733 {
1734 tem = op0, op0 = op1, op1 = tem;
1735 code = swap_condition (code);
1736 }
1737
1738 /* For integer comparisons of A and B maybe we can simplify A - B and can
1739 then simplify a comparison of that with zero. If A and B are both either
1740 a register or a CONST_INT, this can't help; testing for these cases will
1741 prevent infinite recursion here and speed things up.
1742
1743 If CODE is an unsigned comparison, then we can never do this optimization,
1744 because it gives an incorrect result if the subtraction wraps around zero.
1745 ANSI C defines unsigned operations such that they never overflow, and
1746 thus such cases can not be ignored. */
1747
1748 if (INTEGRAL_MODE_P (mode) && op1 != const0_rtx
1749 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == CONST_INT)
1750 && (GET_CODE (op1) == REG || GET_CODE (op1) == CONST_INT))
1751 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
1752 && code != GTU && code != GEU && code != LTU && code != LEU)
1753 return simplify_relational_operation (signed_condition (code),
1754 mode, tem, const0_rtx);
1755
1756 /* For non-IEEE floating-point, if the two operands are equal, we know the
1757 result. */
1758 if (rtx_equal_p (op0, op1)
1759 && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1760 || ! FLOAT_MODE_P (GET_MODE (op0)) || flag_fast_math))
1761 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
1762
1763 /* If the operands are floating-point constants, see if we can fold
1764 the result. */
1765 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1766 else if (GET_CODE (op0) == CONST_DOUBLE && GET_CODE (op1) == CONST_DOUBLE
1767 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
1768 {
1769 struct cfc_args args;
1770
1771 /* Setup input for check_fold_consts() */
1772 args.op0 = op0;
1773 args.op1 = op1;
1774
1775 if (do_float_handler(check_fold_consts, (PTR) &args) == 0)
1776 /* We got an exception from check_fold_consts() */
1777 return 0;
1778
1779 /* Receive output from check_fold_consts() */
1780 equal = args.equal;
1781 op0lt = op0ltu = args.op0lt;
1782 op1lt = op1ltu = args.op1lt;
1783 }
1784 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1785
1786 /* Otherwise, see if the operands are both integers. */
1787 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
1788 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
1789 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
1790 {
1791 int width = GET_MODE_BITSIZE (mode);
1792 HOST_WIDE_INT l0s, h0s, l1s, h1s;
1793 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
1794
1795 /* Get the two words comprising each integer constant. */
1796 if (GET_CODE (op0) == CONST_DOUBLE)
1797 {
1798 l0u = l0s = CONST_DOUBLE_LOW (op0);
1799 h0u = h0s = CONST_DOUBLE_HIGH (op0);
1800 }
1801 else
1802 {
1803 l0u = l0s = INTVAL (op0);
1804 h0u = h0s = HWI_SIGN_EXTEND (l0s);
1805 }
1806
1807 if (GET_CODE (op1) == CONST_DOUBLE)
1808 {
1809 l1u = l1s = CONST_DOUBLE_LOW (op1);
1810 h1u = h1s = CONST_DOUBLE_HIGH (op1);
1811 }
1812 else
1813 {
1814 l1u = l1s = INTVAL (op1);
1815 h1u = h1s = HWI_SIGN_EXTEND (l1s);
1816 }
1817
1818 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
1819 we have to sign or zero-extend the values. */
1820 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
1821 {
1822 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
1823 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
1824
1825 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1826 l0s |= ((HOST_WIDE_INT) (-1) << width);
1827
1828 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1829 l1s |= ((HOST_WIDE_INT) (-1) << width);
1830 }
1831 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
1832 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
1833
1834 equal = (h0u == h1u && l0u == l1u);
1835 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
1836 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
1837 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
1838 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
1839 }
1840
1841 /* Otherwise, there are some code-specific tests we can make. */
1842 else
1843 {
1844 switch (code)
1845 {
1846 case EQ:
1847 /* References to the frame plus a constant or labels cannot
1848 be zero, but a SYMBOL_REF can due to #pragma weak. */
1849 if (((NONZERO_BASE_PLUS_P (op0) && op1 == const0_rtx)
1850 || GET_CODE (op0) == LABEL_REF)
1851 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1852 /* On some machines, the ap reg can be 0 sometimes. */
1853 && op0 != arg_pointer_rtx
1854 #endif
1855 )
1856 return const0_rtx;
1857 break;
1858
1859 case NE:
1860 if (((NONZERO_BASE_PLUS_P (op0) && op1 == const0_rtx)
1861 || GET_CODE (op0) == LABEL_REF)
1862 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1863 && op0 != arg_pointer_rtx
1864 #endif
1865 )
1866 return const_true_rtx;
1867 break;
1868
1869 case GEU:
1870 /* Unsigned values are never negative. */
1871 if (op1 == const0_rtx)
1872 return const_true_rtx;
1873 break;
1874
1875 case LTU:
1876 if (op1 == const0_rtx)
1877 return const0_rtx;
1878 break;
1879
1880 case LEU:
1881 /* Unsigned values are never greater than the largest
1882 unsigned value. */
1883 if (GET_CODE (op1) == CONST_INT
1884 && (unsigned HOST_WIDE_INT) INTVAL (op1) == GET_MODE_MASK (mode)
1885 && INTEGRAL_MODE_P (mode))
1886 return const_true_rtx;
1887 break;
1888
1889 case GTU:
1890 if (GET_CODE (op1) == CONST_INT
1891 && (unsigned HOST_WIDE_INT) INTVAL (op1) == GET_MODE_MASK (mode)
1892 && INTEGRAL_MODE_P (mode))
1893 return const0_rtx;
1894 break;
1895
1896 default:
1897 break;
1898 }
1899
1900 return 0;
1901 }
1902
1903 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
1904 as appropriate. */
1905 switch (code)
1906 {
1907 case EQ:
1908 return equal ? const_true_rtx : const0_rtx;
1909 case NE:
1910 return ! equal ? const_true_rtx : const0_rtx;
1911 case LT:
1912 return op0lt ? const_true_rtx : const0_rtx;
1913 case GT:
1914 return op1lt ? const_true_rtx : const0_rtx;
1915 case LTU:
1916 return op0ltu ? const_true_rtx : const0_rtx;
1917 case GTU:
1918 return op1ltu ? const_true_rtx : const0_rtx;
1919 case LE:
1920 return equal || op0lt ? const_true_rtx : const0_rtx;
1921 case GE:
1922 return equal || op1lt ? const_true_rtx : const0_rtx;
1923 case LEU:
1924 return equal || op0ltu ? const_true_rtx : const0_rtx;
1925 case GEU:
1926 return equal || op1ltu ? const_true_rtx : const0_rtx;
1927 default:
1928 abort ();
1929 }
1930 }
1931 \f
1932 /* Simplify CODE, an operation with result mode MODE and three operands,
1933 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
1934 a constant. Return 0 if no simplifications is possible. */
1935
1936 rtx
1937 simplify_ternary_operation (code, mode, op0_mode, op0, op1, op2)
1938 enum rtx_code code;
1939 enum machine_mode mode, op0_mode;
1940 rtx op0, op1, op2;
1941 {
1942 unsigned int width = GET_MODE_BITSIZE (mode);
1943
1944 /* VOIDmode means "infinite" precision. */
1945 if (width == 0)
1946 width = HOST_BITS_PER_WIDE_INT;
1947
1948 switch (code)
1949 {
1950 case SIGN_EXTRACT:
1951 case ZERO_EXTRACT:
1952 if (GET_CODE (op0) == CONST_INT
1953 && GET_CODE (op1) == CONST_INT
1954 && GET_CODE (op2) == CONST_INT
1955 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
1956 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
1957 {
1958 /* Extracting a bit-field from a constant */
1959 HOST_WIDE_INT val = INTVAL (op0);
1960
1961 if (BITS_BIG_ENDIAN)
1962 val >>= (GET_MODE_BITSIZE (op0_mode)
1963 - INTVAL (op2) - INTVAL (op1));
1964 else
1965 val >>= INTVAL (op2);
1966
1967 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
1968 {
1969 /* First zero-extend. */
1970 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
1971 /* If desired, propagate sign bit. */
1972 if (code == SIGN_EXTRACT
1973 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
1974 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
1975 }
1976
1977 /* Clear the bits that don't belong in our mode,
1978 unless they and our sign bit are all one.
1979 So we get either a reasonable negative value or a reasonable
1980 unsigned value for this mode. */
1981 if (width < HOST_BITS_PER_WIDE_INT
1982 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
1983 != ((HOST_WIDE_INT) (-1) << (width - 1))))
1984 val &= ((HOST_WIDE_INT) 1 << width) - 1;
1985
1986 return GEN_INT (val);
1987 }
1988 break;
1989
1990 case IF_THEN_ELSE:
1991 if (GET_CODE (op0) == CONST_INT)
1992 return op0 != const0_rtx ? op1 : op2;
1993
1994 /* Convert a == b ? b : a to "a". */
1995 if (GET_CODE (op0) == NE && ! side_effects_p (op0)
1996 && (! FLOAT_MODE_P (mode) || flag_fast_math)
1997 && rtx_equal_p (XEXP (op0, 0), op1)
1998 && rtx_equal_p (XEXP (op0, 1), op2))
1999 return op1;
2000 else if (GET_CODE (op0) == EQ && ! side_effects_p (op0)
2001 && (! FLOAT_MODE_P (mode) || flag_fast_math)
2002 && rtx_equal_p (XEXP (op0, 1), op1)
2003 && rtx_equal_p (XEXP (op0, 0), op2))
2004 return op2;
2005 else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2006 {
2007 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2008 ? GET_MODE (XEXP (op0, 1))
2009 : GET_MODE (XEXP (op0, 0)));
2010 rtx temp
2011 = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2012 XEXP (op0, 0), XEXP (op0, 1));
2013
2014 /* See if any simplifications were possible. */
2015 if (temp == const0_rtx)
2016 return op2;
2017 else if (temp == const1_rtx)
2018 return op1;
2019 else if (temp)
2020 op0 = temp;
2021
2022 /* Look for happy constants in op1 and op2. */
2023 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2024 {
2025 HOST_WIDE_INT t = INTVAL (op1);
2026 HOST_WIDE_INT f = INTVAL (op2);
2027
2028 if (t == STORE_FLAG_VALUE && f == 0)
2029 code = GET_CODE (op0);
2030 else if (t == 0 && f == STORE_FLAG_VALUE
2031 && can_reverse_comparison_p (op0, NULL_RTX))
2032 code = reverse_condition (GET_CODE (op0));
2033 else
2034 break;
2035
2036 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2037 }
2038 }
2039 break;
2040
2041 default:
2042 abort ();
2043 }
2044
2045 return 0;
2046 }
2047
2048 /* Simplify X, an rtx expression.
2049
2050 Return the simplified expression or NULL if no simplifications
2051 were possible.
2052
2053 This is the preferred entry point into the simplification routines;
2054 however, we still allow passes to call the more specific routines.
2055
2056 Right now GCC has three (yes, three) major bodies of RTL simplficiation
2057 code that need to be unified.
2058
2059 1. fold_rtx in cse.c. This code uses various CSE specific
2060 information to aid in RTL simplification.
2061
2062 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
2063 it uses combine specific information to aid in RTL
2064 simplification.
2065
2066 3. The routines in this file.
2067
2068
2069 Long term we want to only have one body of simplification code; to
2070 get to that state I recommend the following steps:
2071
2072 1. Pour over fold_rtx & simplify_rtx and move any simplifications
2073 which are not pass dependent state into these routines.
2074
2075 2. As code is moved by #1, change fold_rtx & simplify_rtx to
2076 use this routine whenever possible.
2077
2078 3. Allow for pass dependent state to be provided to these
2079 routines and add simplifications based on the pass dependent
2080 state. Remove code from cse.c & combine.c that becomes
2081 redundant/dead.
2082
2083 It will take time, but ultimately the compiler will be easier to
2084 maintain and improve. It's totally silly that when we add a
2085 simplification that it needs to be added to 4 places (3 for RTL
2086 simplification and 1 for tree simplification. */
2087
2088 rtx
2089 simplify_rtx (x)
2090 rtx x;
2091 {
2092 enum rtx_code code;
2093 enum machine_mode mode;
2094
2095 mode = GET_MODE (x);
2096 code = GET_CODE (x);
2097
2098 switch (GET_RTX_CLASS (code))
2099 {
2100 case '1':
2101 return simplify_unary_operation (code, mode,
2102 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
2103 case '2':
2104 case 'c':
2105 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
2106
2107 case '3':
2108 case 'b':
2109 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
2110 XEXP (x, 0), XEXP (x, 1), XEXP (x, 2));
2111
2112 case '<':
2113 return simplify_relational_operation (code,
2114 (GET_MODE (XEXP (x, 0)) != VOIDmode
2115 ? GET_MODE (XEXP (x, 0))
2116 : GET_MODE (XEXP (x, 1))),
2117 XEXP (x, 0), XEXP (x, 1));
2118 default:
2119 return NULL;
2120 }
2121 }
2122 \f
2123
2124 /* Allocate a struct elt_list and fill in its two elements with the
2125 arguments. */
2126
2127 static struct elt_list *
2128 new_elt_list (next, elt)
2129 struct elt_list *next;
2130 cselib_val *elt;
2131 {
2132 struct elt_list *el = empty_elt_lists;
2133
2134 if (el)
2135 empty_elt_lists = el->next;
2136 else
2137 el = (struct elt_list *) obstack_alloc (&cselib_obstack,
2138 sizeof (struct elt_list));
2139 el->next = next;
2140 el->elt = elt;
2141 return el;
2142 }
2143
2144 /* Allocate a struct elt_loc_list and fill in its two elements with the
2145 arguments. */
2146
2147 static struct elt_loc_list *
2148 new_elt_loc_list (next, loc)
2149 struct elt_loc_list *next;
2150 rtx loc;
2151 {
2152 struct elt_loc_list *el = empty_elt_loc_lists;
2153
2154 if (el)
2155 empty_elt_loc_lists = el->next;
2156 else
2157 el = (struct elt_loc_list *) obstack_alloc (&cselib_obstack,
2158 sizeof (struct elt_loc_list));
2159 el->next = next;
2160 el->loc = loc;
2161 el->setting_insn = cselib_current_insn;
2162 return el;
2163 }
2164
2165 /* The elt_list at *PL is no longer needed. Unchain it and free its
2166 storage. */
2167
2168 static void
2169 unchain_one_elt_list (pl)
2170 struct elt_list **pl;
2171 {
2172 struct elt_list *l = *pl;
2173
2174 *pl = l->next;
2175 l->next = empty_elt_lists;
2176 empty_elt_lists = l;
2177 }
2178
2179 /* Likewise for elt_loc_lists. */
2180
2181 static void
2182 unchain_one_elt_loc_list (pl)
2183 struct elt_loc_list **pl;
2184 {
2185 struct elt_loc_list *l = *pl;
2186
2187 *pl = l->next;
2188 l->next = empty_elt_loc_lists;
2189 empty_elt_loc_lists = l;
2190 }
2191
2192 /* Likewise for cselib_vals. This also frees the addr_list associated with
2193 V. */
2194
2195 static void
2196 unchain_one_value (v)
2197 cselib_val *v;
2198 {
2199 while (v->addr_list)
2200 unchain_one_elt_list (&v->addr_list);
2201
2202 v->u.next_free = empty_vals;
2203 empty_vals = v;
2204 }
2205
2206 /* Remove all entries from the hash table. Also used during
2207 initialization. If CLEAR_ALL isn't set, then only clear the entries
2208 which are known to have been used. */
2209
2210 static void
2211 clear_table (clear_all)
2212 int clear_all;
2213 {
2214 unsigned int i;
2215
2216 if (clear_all)
2217 for (i = 0; i < cselib_nregs; i++)
2218 REG_VALUES (i) = 0;
2219 else
2220 for (i = 0; i < VARRAY_ACTIVE_SIZE (used_regs); i++)
2221 REG_VALUES (VARRAY_UINT (used_regs, i)) = 0;
2222
2223 VARRAY_POP_ALL (used_regs);
2224
2225 htab_empty (hash_table);
2226 obstack_free (&cselib_obstack, cselib_startobj);
2227
2228 empty_vals = 0;
2229 empty_elt_lists = 0;
2230 empty_elt_loc_lists = 0;
2231 n_useless_values = 0;
2232
2233 next_unknown_value = 0;
2234 }
2235
2236 /* The equality test for our hash table. The first argument ENTRY is a table
2237 element (i.e. a cselib_val), while the second arg X is an rtx. We know
2238 that all callers of htab_find_slot_with_hash will wrap CONST_INTs into a
2239 CONST of an appropriate mode. */
2240
2241 static int
2242 entry_and_rtx_equal_p (entry, x_arg)
2243 const void *entry, *x_arg;
2244 {
2245 struct elt_loc_list *l;
2246 const cselib_val *v = (const cselib_val *) entry;
2247 rtx x = (rtx) x_arg;
2248 enum machine_mode mode = GET_MODE (x);
2249
2250 if (GET_CODE (x) == CONST_INT
2251 || (mode == VOIDmode && GET_CODE (x) == CONST_DOUBLE))
2252 abort ();
2253 if (mode != GET_MODE (v->u.val_rtx))
2254 return 0;
2255
2256 /* Unwrap X if necessary. */
2257 if (GET_CODE (x) == CONST
2258 && (GET_CODE (XEXP (x, 0)) == CONST_INT
2259 || GET_CODE (XEXP (x, 0)) == CONST_DOUBLE))
2260 x = XEXP (x, 0);
2261
2262 /* We don't guarantee that distinct rtx's have different hash values,
2263 so we need to do a comparison. */
2264 for (l = v->locs; l; l = l->next)
2265 if (rtx_equal_for_cselib_p (l->loc, x))
2266 return 1;
2267
2268 return 0;
2269 }
2270
2271 /* The hash function for our hash table. The value is always computed with
2272 hash_rtx when adding an element; this function just extracts the hash
2273 value from a cselib_val structure. */
2274
2275 static unsigned int
2276 get_value_hash (entry)
2277 const void *entry;
2278 {
2279 const cselib_val *v = (const cselib_val *) entry;
2280 return v->value;
2281 }
2282
2283 /* Return true if X contains a VALUE rtx. If ONLY_USELESS is set, we
2284 only return true for values which point to a cselib_val whose value
2285 element has been set to zero, which implies the cselib_val will be
2286 removed. */
2287
2288 int
2289 references_value_p (x, only_useless)
2290 rtx x;
2291 int only_useless;
2292 {
2293 enum rtx_code code = GET_CODE (x);
2294 const char *fmt = GET_RTX_FORMAT (code);
2295 int i, j;
2296
2297 if (GET_CODE (x) == VALUE
2298 && (! only_useless || CSELIB_VAL_PTR (x)->locs == 0))
2299 return 1;
2300
2301 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2302 {
2303 if (fmt[i] == 'e' && references_value_p (XEXP (x, i), only_useless))
2304 return 1;
2305 else if (fmt[i] == 'E')
2306 for (j = 0; j < XVECLEN (x, i); j++)
2307 if (references_value_p (XVECEXP (x, i, j), only_useless))
2308 return 1;
2309 }
2310
2311 return 0;
2312 }
2313
2314 /* For all locations found in X, delete locations that reference useless
2315 values (i.e. values without any location). Called through
2316 htab_traverse. */
2317
2318 static int
2319 discard_useless_locs (x, info)
2320 void **x;
2321 void *info ATTRIBUTE_UNUSED;
2322 {
2323 cselib_val *v = (cselib_val *)*x;
2324 struct elt_loc_list **p = &v->locs;
2325 int had_locs = v->locs != 0;
2326
2327 while (*p)
2328 {
2329 if (references_value_p ((*p)->loc, 1))
2330 unchain_one_elt_loc_list (p);
2331 else
2332 p = &(*p)->next;
2333 }
2334
2335 if (had_locs && v->locs == 0)
2336 {
2337 n_useless_values++;
2338 values_became_useless = 1;
2339 }
2340 return 1;
2341 }
2342
2343 /* If X is a value with no locations, remove it from the hashtable. */
2344
2345 static int
2346 discard_useless_values (x, info)
2347 void **x;
2348 void *info ATTRIBUTE_UNUSED;
2349 {
2350 cselib_val *v = (cselib_val *)*x;
2351
2352 if (v->locs == 0)
2353 {
2354 htab_clear_slot (hash_table, x);
2355 unchain_one_value (v);
2356 n_useless_values--;
2357 }
2358
2359 return 1;
2360 }
2361
2362 /* Clean out useless values (i.e. those which no longer have locations
2363 associated with them) from the hash table. */
2364
2365 static void
2366 remove_useless_values ()
2367 {
2368 /* First pass: eliminate locations that reference the value. That in
2369 turn can make more values useless. */
2370 do
2371 {
2372 values_became_useless = 0;
2373 htab_traverse (hash_table, discard_useless_locs, 0);
2374 }
2375 while (values_became_useless);
2376
2377 /* Second pass: actually remove the values. */
2378 htab_traverse (hash_table, discard_useless_values, 0);
2379
2380 if (n_useless_values != 0)
2381 abort ();
2382 }
2383
2384 /* Return nonzero if we can prove that X and Y contain the same value, taking
2385 our gathered information into account. */
2386
2387 int
2388 rtx_equal_for_cselib_p (x, y)
2389 rtx x, y;
2390 {
2391 enum rtx_code code;
2392 const char *fmt;
2393 int i;
2394
2395 if (GET_CODE (x) == REG || GET_CODE (x) == MEM)
2396 {
2397 cselib_val *e = cselib_lookup (x, GET_MODE (x), 0);
2398
2399 if (e)
2400 x = e->u.val_rtx;
2401 }
2402
2403 if (GET_CODE (y) == REG || GET_CODE (y) == MEM)
2404 {
2405 cselib_val *e = cselib_lookup (y, GET_MODE (y), 0);
2406
2407 if (e)
2408 y = e->u.val_rtx;
2409 }
2410
2411 if (x == y)
2412 return 1;
2413
2414 if (GET_CODE (x) == VALUE && GET_CODE (y) == VALUE)
2415 return CSELIB_VAL_PTR (x) == CSELIB_VAL_PTR (y);
2416
2417 if (GET_CODE (x) == VALUE)
2418 {
2419 cselib_val *e = CSELIB_VAL_PTR (x);
2420 struct elt_loc_list *l;
2421
2422 for (l = e->locs; l; l = l->next)
2423 {
2424 rtx t = l->loc;
2425
2426 /* Avoid infinite recursion. */
2427 if (GET_CODE (t) == REG || GET_CODE (t) == MEM)
2428 continue;
2429 else if (rtx_equal_for_cselib_p (t, y))
2430 return 1;
2431 }
2432
2433 return 0;
2434 }
2435
2436 if (GET_CODE (y) == VALUE)
2437 {
2438 cselib_val *e = CSELIB_VAL_PTR (y);
2439 struct elt_loc_list *l;
2440
2441 for (l = e->locs; l; l = l->next)
2442 {
2443 rtx t = l->loc;
2444
2445 if (GET_CODE (t) == REG || GET_CODE (t) == MEM)
2446 continue;
2447 else if (rtx_equal_for_cselib_p (x, t))
2448 return 1;
2449 }
2450
2451 return 0;
2452 }
2453
2454 if (GET_CODE (x) != GET_CODE (y) || GET_MODE (x) != GET_MODE (y))
2455 return 0;
2456
2457 /* This won't be handled correctly by the code below. */
2458 if (GET_CODE (x) == LABEL_REF)
2459 return XEXP (x, 0) == XEXP (y, 0);
2460
2461 code = GET_CODE (x);
2462 fmt = GET_RTX_FORMAT (code);
2463
2464 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2465 {
2466 int j;
2467
2468 switch (fmt[i])
2469 {
2470 case 'w':
2471 if (XWINT (x, i) != XWINT (y, i))
2472 return 0;
2473 break;
2474
2475 case 'n':
2476 case 'i':
2477 if (XINT (x, i) != XINT (y, i))
2478 return 0;
2479 break;
2480
2481 case 'V':
2482 case 'E':
2483 /* Two vectors must have the same length. */
2484 if (XVECLEN (x, i) != XVECLEN (y, i))
2485 return 0;
2486
2487 /* And the corresponding elements must match. */
2488 for (j = 0; j < XVECLEN (x, i); j++)
2489 if (! rtx_equal_for_cselib_p (XVECEXP (x, i, j),
2490 XVECEXP (y, i, j)))
2491 return 0;
2492 break;
2493
2494 case 'e':
2495 if (! rtx_equal_for_cselib_p (XEXP (x, i), XEXP (y, i)))
2496 return 0;
2497 break;
2498
2499 case 'S':
2500 case 's':
2501 if (strcmp (XSTR (x, i), XSTR (y, i)))
2502 return 0;
2503 break;
2504
2505 case 'u':
2506 /* These are just backpointers, so they don't matter. */
2507 break;
2508
2509 case '0':
2510 case 't':
2511 break;
2512
2513 /* It is believed that rtx's at this level will never
2514 contain anything but integers and other rtx's,
2515 except for within LABEL_REFs and SYMBOL_REFs. */
2516 default:
2517 abort ();
2518 }
2519 }
2520 return 1;
2521 }
2522
2523 /* We need to pass down the mode of constants through the hash table
2524 functions. For that purpose, wrap them in a CONST of the appropriate
2525 mode. */
2526 static rtx
2527 wrap_constant (mode, x)
2528 enum machine_mode mode;
2529 rtx x;
2530 {
2531 if (GET_CODE (x) != CONST_INT
2532 && (GET_CODE (x) != CONST_DOUBLE || GET_MODE (x) != VOIDmode))
2533 return x;
2534 if (mode == VOIDmode)
2535 abort ();
2536 return gen_rtx_CONST (mode, x);
2537 }
2538
2539 /* Hash an rtx. Return 0 if we couldn't hash the rtx.
2540 For registers and memory locations, we look up their cselib_val structure
2541 and return its VALUE element.
2542 Possible reasons for return 0 are: the object is volatile, or we couldn't
2543 find a register or memory location in the table and CREATE is zero. If
2544 CREATE is nonzero, table elts are created for regs and mem.
2545 MODE is used in hashing for CONST_INTs only;
2546 otherwise the mode of X is used. */
2547
2548 static unsigned int
2549 hash_rtx (x, mode, create)
2550 rtx x;
2551 enum machine_mode mode;
2552 int create;
2553 {
2554 cselib_val *e;
2555 int i, j;
2556 enum rtx_code code;
2557 const char *fmt;
2558 unsigned int hash = 0;
2559
2560 /* repeat is used to turn tail-recursion into iteration. */
2561 repeat:
2562 code = GET_CODE (x);
2563 hash += (unsigned) code + (unsigned) GET_MODE (x);
2564
2565 switch (code)
2566 {
2567 case MEM:
2568 case REG:
2569 e = cselib_lookup (x, GET_MODE (x), create);
2570 if (! e)
2571 return 0;
2572
2573 hash += e->value;
2574 return hash;
2575
2576 case CONST_INT:
2577 hash += ((unsigned) CONST_INT << 7) + (unsigned) mode + INTVAL (x);
2578 return hash ? hash : CONST_INT;
2579
2580 case CONST_DOUBLE:
2581 /* This is like the general case, except that it only counts
2582 the integers representing the constant. */
2583 hash += (unsigned) code + (unsigned) GET_MODE (x);
2584 if (GET_MODE (x) != VOIDmode)
2585 for (i = 2; i < GET_RTX_LENGTH (CONST_DOUBLE); i++)
2586 hash += XWINT (x, i);
2587 else
2588 hash += ((unsigned) CONST_DOUBLE_LOW (x)
2589 + (unsigned) CONST_DOUBLE_HIGH (x));
2590 return hash ? hash : CONST_DOUBLE;
2591
2592 /* Assume there is only one rtx object for any given label. */
2593 case LABEL_REF:
2594 hash
2595 += ((unsigned) LABEL_REF << 7) + (unsigned long) XEXP (x, 0);
2596 return hash ? hash : LABEL_REF;
2597
2598 case SYMBOL_REF:
2599 hash
2600 += ((unsigned) SYMBOL_REF << 7) + (unsigned long) XSTR (x, 0);
2601 return hash ? hash : SYMBOL_REF;
2602
2603 case PRE_DEC:
2604 case PRE_INC:
2605 case POST_DEC:
2606 case POST_INC:
2607 case POST_MODIFY:
2608 case PRE_MODIFY:
2609 case PC:
2610 case CC0:
2611 case CALL:
2612 case UNSPEC_VOLATILE:
2613 return 0;
2614
2615 case ASM_OPERANDS:
2616 if (MEM_VOLATILE_P (x))
2617 return 0;
2618
2619 break;
2620
2621 default:
2622 break;
2623 }
2624
2625 i = GET_RTX_LENGTH (code) - 1;
2626 fmt = GET_RTX_FORMAT (code);
2627 for (; i >= 0; i--)
2628 {
2629 if (fmt[i] == 'e')
2630 {
2631 rtx tem = XEXP (x, i);
2632 unsigned int tem_hash;
2633
2634 /* If we are about to do the last recursive call
2635 needed at this level, change it into iteration.
2636 This function is called enough to be worth it. */
2637 if (i == 0)
2638 {
2639 x = tem;
2640 goto repeat;
2641 }
2642
2643 tem_hash = hash_rtx (tem, 0, create);
2644 if (tem_hash == 0)
2645 return 0;
2646
2647 hash += tem_hash;
2648 }
2649 else if (fmt[i] == 'E')
2650 for (j = 0; j < XVECLEN (x, i); j++)
2651 {
2652 unsigned int tem_hash = hash_rtx (XVECEXP (x, i, j), 0, create);
2653
2654 if (tem_hash == 0)
2655 return 0;
2656
2657 hash += tem_hash;
2658 }
2659 else if (fmt[i] == 's')
2660 {
2661 const unsigned char *p = (const unsigned char *) XSTR (x, i);
2662
2663 if (p)
2664 while (*p)
2665 hash += *p++;
2666 }
2667 else if (fmt[i] == 'i')
2668 hash += XINT (x, i);
2669 else if (fmt[i] == '0' || fmt[i] == 't')
2670 /* unused */;
2671 else
2672 abort ();
2673 }
2674
2675 return hash ? hash : 1 + GET_CODE (x);
2676 }
2677
2678 /* Create a new value structure for VALUE and initialize it. The mode of the
2679 value is MODE. */
2680
2681 static cselib_val *
2682 new_cselib_val (value, mode)
2683 unsigned int value;
2684 enum machine_mode mode;
2685 {
2686 cselib_val *e = empty_vals;
2687
2688 if (e)
2689 empty_vals = e->u.next_free;
2690 else
2691 e = (cselib_val *) obstack_alloc (&cselib_obstack, sizeof (cselib_val));
2692
2693 if (value == 0)
2694 abort ();
2695
2696 e->value = value;
2697 e->u.val_rtx = gen_rtx_VALUE (mode);
2698 CSELIB_VAL_PTR (e->u.val_rtx) = e;
2699 e->addr_list = 0;
2700 e->locs = 0;
2701 return e;
2702 }
2703
2704 /* ADDR_ELT is a value that is used as address. MEM_ELT is the value that
2705 contains the data at this address. X is a MEM that represents the
2706 value. Update the two value structures to represent this situation. */
2707
2708 static void
2709 add_mem_for_addr (addr_elt, mem_elt, x)
2710 cselib_val *addr_elt, *mem_elt;
2711 rtx x;
2712 {
2713 rtx new;
2714 struct elt_loc_list *l;
2715
2716 /* Avoid duplicates. */
2717 for (l = mem_elt->locs; l; l = l->next)
2718 if (GET_CODE (l->loc) == MEM
2719 && CSELIB_VAL_PTR (XEXP (l->loc, 0)) == addr_elt)
2720 return;
2721
2722 new = gen_rtx_MEM (GET_MODE (x), addr_elt->u.val_rtx);
2723 MEM_COPY_ATTRIBUTES (new, x);
2724
2725 addr_elt->addr_list = new_elt_list (addr_elt->addr_list, mem_elt);
2726 mem_elt->locs = new_elt_loc_list (mem_elt->locs, new);
2727 }
2728
2729 /* Subroutine of cselib_lookup. Return a value for X, which is a MEM rtx.
2730 If CREATE, make a new one if we haven't seen it before. */
2731
2732 static cselib_val *
2733 cselib_lookup_mem (x, create)
2734 rtx x;
2735 int create;
2736 {
2737 enum machine_mode mode = GET_MODE (x);
2738 void **slot;
2739 cselib_val *addr;
2740 cselib_val *mem_elt;
2741 struct elt_list *l;
2742
2743 if (MEM_VOLATILE_P (x) || mode == BLKmode
2744 || (FLOAT_MODE_P (mode) && flag_float_store))
2745 return 0;
2746
2747 /* Look up the value for the address. */
2748 addr = cselib_lookup (XEXP (x, 0), mode, create);
2749 if (! addr)
2750 return 0;
2751
2752 /* Find a value that describes a value of our mode at that address. */
2753 for (l = addr->addr_list; l; l = l->next)
2754 if (GET_MODE (l->elt->u.val_rtx) == mode)
2755 return l->elt;
2756
2757 if (! create)
2758 return 0;
2759
2760 mem_elt = new_cselib_val (++next_unknown_value, mode);
2761 add_mem_for_addr (addr, mem_elt, x);
2762 slot = htab_find_slot_with_hash (hash_table, wrap_constant (mode, x),
2763 mem_elt->value, INSERT);
2764 *slot = mem_elt;
2765 return mem_elt;
2766 }
2767
2768 /* Walk rtx X and replace all occurrences of REG and MEM subexpressions
2769 with VALUE expressions. This way, it becomes independent of changes
2770 to registers and memory.
2771 X isn't actually modified; if modifications are needed, new rtl is
2772 allocated. However, the return value can share rtl with X. */
2773
2774 static rtx
2775 cselib_subst_to_values (x)
2776 rtx x;
2777 {
2778 enum rtx_code code = GET_CODE (x);
2779 const char *fmt = GET_RTX_FORMAT (code);
2780 cselib_val *e;
2781 struct elt_list *l;
2782 rtx copy = x;
2783 int i;
2784
2785 switch (code)
2786 {
2787 case REG:
2788 for (l = REG_VALUES (REGNO (x)); l; l = l->next)
2789 if (GET_MODE (l->elt->u.val_rtx) == GET_MODE (x))
2790 return l->elt->u.val_rtx;
2791
2792 abort ();
2793
2794 case MEM:
2795 e = cselib_lookup_mem (x, 0);
2796 if (! e)
2797 abort ();
2798 return e->u.val_rtx;
2799
2800 /* CONST_DOUBLEs must be special-cased here so that we won't try to
2801 look up the CONST_DOUBLE_MEM inside. */
2802 case CONST_DOUBLE:
2803 case CONST_INT:
2804 return x;
2805
2806 default:
2807 break;
2808 }
2809
2810 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2811 {
2812 if (fmt[i] == 'e')
2813 {
2814 rtx t = cselib_subst_to_values (XEXP (x, i));
2815
2816 if (t != XEXP (x, i) && x == copy)
2817 copy = shallow_copy_rtx (x);
2818
2819 XEXP (copy, i) = t;
2820 }
2821 else if (fmt[i] == 'E')
2822 {
2823 int j, k;
2824
2825 for (j = 0; j < XVECLEN (x, i); j++)
2826 {
2827 rtx t = cselib_subst_to_values (XVECEXP (x, i, j));
2828
2829 if (t != XVECEXP (x, i, j) && XVEC (x, i) == XVEC (copy, i))
2830 {
2831 if (x == copy)
2832 copy = shallow_copy_rtx (x);
2833
2834 XVEC (copy, i) = rtvec_alloc (XVECLEN (x, i));
2835 for (k = 0; k < j; k++)
2836 XVECEXP (copy, i, k) = XVECEXP (x, i, k);
2837 }
2838
2839 XVECEXP (copy, i, j) = t;
2840 }
2841 }
2842 }
2843
2844 return copy;
2845 }
2846
2847 /* Look up the rtl expression X in our tables and return the value it has.
2848 If CREATE is zero, we return NULL if we don't know the value. Otherwise,
2849 we create a new one if possible, using mode MODE if X doesn't have a mode
2850 (i.e. because it's a constant). */
2851
2852 cselib_val *
2853 cselib_lookup (x, mode, create)
2854 rtx x;
2855 enum machine_mode mode;
2856 int create;
2857 {
2858 void **slot;
2859 cselib_val *e;
2860 unsigned int hashval;
2861
2862 if (GET_MODE (x) != VOIDmode)
2863 mode = GET_MODE (x);
2864
2865 if (GET_CODE (x) == VALUE)
2866 return CSELIB_VAL_PTR (x);
2867
2868 if (GET_CODE (x) == REG)
2869 {
2870 struct elt_list *l;
2871 unsigned int i = REGNO (x);
2872
2873 for (l = REG_VALUES (i); l; l = l->next)
2874 if (mode == GET_MODE (l->elt->u.val_rtx))
2875 return l->elt;
2876
2877 if (! create)
2878 return 0;
2879
2880 e = new_cselib_val (++next_unknown_value, GET_MODE (x));
2881 e->locs = new_elt_loc_list (e->locs, x);
2882 if (REG_VALUES (i) == 0)
2883 VARRAY_PUSH_UINT (used_regs, i);
2884 REG_VALUES (i) = new_elt_list (REG_VALUES (i), e);
2885 slot = htab_find_slot_with_hash (hash_table, x, e->value, INSERT);
2886 *slot = e;
2887 return e;
2888 }
2889
2890 if (GET_CODE (x) == MEM)
2891 return cselib_lookup_mem (x, create);
2892
2893 hashval = hash_rtx (x, mode, create);
2894 /* Can't even create if hashing is not possible. */
2895 if (! hashval)
2896 return 0;
2897
2898 slot = htab_find_slot_with_hash (hash_table, wrap_constant (mode, x),
2899 hashval, create ? INSERT : NO_INSERT);
2900 if (slot == 0)
2901 return 0;
2902
2903 e = (cselib_val *) *slot;
2904 if (e)
2905 return e;
2906
2907 e = new_cselib_val (hashval, mode);
2908
2909 /* We have to fill the slot before calling cselib_subst_to_values:
2910 the hash table is inconsistent until we do so, and
2911 cselib_subst_to_values will need to do lookups. */
2912 *slot = (void *) e;
2913 e->locs = new_elt_loc_list (e->locs, cselib_subst_to_values (x));
2914 return e;
2915 }
2916
2917 /* Invalidate any entries in reg_values that overlap REGNO. This is called
2918 if REGNO is changing. MODE is the mode of the assignment to REGNO, which
2919 is used to determine how many hard registers are being changed. If MODE
2920 is VOIDmode, then only REGNO is being changed; this is used when
2921 invalidating call clobbered registers across a call. */
2922
2923 static void
2924 cselib_invalidate_regno (regno, mode)
2925 unsigned int regno;
2926 enum machine_mode mode;
2927 {
2928 unsigned int endregno;
2929 unsigned int i;
2930
2931 /* If we see pseudos after reload, something is _wrong_. */
2932 if (reload_completed && regno >= FIRST_PSEUDO_REGISTER
2933 && reg_renumber[regno] >= 0)
2934 abort ();
2935
2936 /* Determine the range of registers that must be invalidated. For
2937 pseudos, only REGNO is affected. For hard regs, we must take MODE
2938 into account, and we must also invalidate lower register numbers
2939 if they contain values that overlap REGNO. */
2940 endregno = regno + 1;
2941 if (regno < FIRST_PSEUDO_REGISTER && mode != VOIDmode)
2942 endregno = regno + HARD_REGNO_NREGS (regno, mode);
2943
2944 for (i = 0; i < endregno; i++)
2945 {
2946 struct elt_list **l = &REG_VALUES (i);
2947
2948 /* Go through all known values for this reg; if it overlaps the range
2949 we're invalidating, remove the value. */
2950 while (*l)
2951 {
2952 cselib_val *v = (*l)->elt;
2953 struct elt_loc_list **p;
2954 unsigned int this_last = i;
2955
2956 if (i < FIRST_PSEUDO_REGISTER)
2957 this_last += HARD_REGNO_NREGS (i, GET_MODE (v->u.val_rtx)) - 1;
2958
2959 if (this_last < regno)
2960 {
2961 l = &(*l)->next;
2962 continue;
2963 }
2964
2965 /* We have an overlap. */
2966 unchain_one_elt_list (l);
2967
2968 /* Now, we clear the mapping from value to reg. It must exist, so
2969 this code will crash intentionally if it doesn't. */
2970 for (p = &v->locs; ; p = &(*p)->next)
2971 {
2972 rtx x = (*p)->loc;
2973
2974 if (GET_CODE (x) == REG && REGNO (x) == i)
2975 {
2976 unchain_one_elt_loc_list (p);
2977 break;
2978 }
2979 }
2980 if (v->locs == 0)
2981 n_useless_values++;
2982 }
2983 }
2984 }
2985
2986 /* The memory at address MEM_BASE is being changed.
2987 Return whether this change will invalidate VAL. */
2988
2989 static int
2990 cselib_mem_conflict_p (mem_base, val)
2991 rtx mem_base;
2992 rtx val;
2993 {
2994 enum rtx_code code;
2995 const char *fmt;
2996 int i, j;
2997
2998 code = GET_CODE (val);
2999 switch (code)
3000 {
3001 /* Get rid of a few simple cases quickly. */
3002 case REG:
3003 case PC:
3004 case CC0:
3005 case SCRATCH:
3006 case CONST:
3007 case CONST_INT:
3008 case CONST_DOUBLE:
3009 case SYMBOL_REF:
3010 case LABEL_REF:
3011 return 0;
3012
3013 case MEM:
3014 if (GET_MODE (mem_base) == BLKmode
3015 || GET_MODE (val) == BLKmode
3016 || anti_dependence (val, mem_base))
3017 return 1;
3018
3019 /* The address may contain nested MEMs. */
3020 break;
3021
3022 default:
3023 break;
3024 }
3025
3026 fmt = GET_RTX_FORMAT (code);
3027 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3028 {
3029 if (fmt[i] == 'e')
3030 {
3031 if (cselib_mem_conflict_p (mem_base, XEXP (val, i)))
3032 return 1;
3033 }
3034 else if (fmt[i] == 'E')
3035 for (j = 0; j < XVECLEN (val, i); j++)
3036 if (cselib_mem_conflict_p (mem_base, XVECEXP (val, i, j)))
3037 return 1;
3038 }
3039
3040 return 0;
3041 }
3042
3043 /* For the value found in SLOT, walk its locations to determine if any overlap
3044 INFO (which is a MEM rtx). */
3045
3046 static int
3047 cselib_invalidate_mem_1 (slot, info)
3048 void **slot;
3049 void *info;
3050 {
3051 cselib_val *v = (cselib_val *) *slot;
3052 rtx mem_rtx = (rtx) info;
3053 struct elt_loc_list **p = &v->locs;
3054 int had_locs = v->locs != 0;
3055
3056 while (*p)
3057 {
3058 rtx x = (*p)->loc;
3059 cselib_val *addr;
3060 struct elt_list **mem_chain;
3061
3062 /* MEMs may occur in locations only at the top level; below
3063 that every MEM or REG is substituted by its VALUE. */
3064 if (GET_CODE (x) != MEM
3065 || ! cselib_mem_conflict_p (mem_rtx, x))
3066 {
3067 p = &(*p)->next;
3068 continue;
3069 }
3070
3071 /* This one overlaps. */
3072 /* We must have a mapping from this MEM's address to the
3073 value (E). Remove that, too. */
3074 addr = cselib_lookup (XEXP (x, 0), VOIDmode, 0);
3075 mem_chain = &addr->addr_list;
3076 for (;;)
3077 {
3078 if ((*mem_chain)->elt == v)
3079 {
3080 unchain_one_elt_list (mem_chain);
3081 break;
3082 }
3083
3084 mem_chain = &(*mem_chain)->next;
3085 }
3086
3087 unchain_one_elt_loc_list (p);
3088 }
3089
3090 if (had_locs && v->locs == 0)
3091 n_useless_values++;
3092
3093 return 1;
3094 }
3095
3096 /* Invalidate any locations in the table which are changed because of a
3097 store to MEM_RTX. If this is called because of a non-const call
3098 instruction, MEM_RTX is (mem:BLK const0_rtx). */
3099
3100 static void
3101 cselib_invalidate_mem (mem_rtx)
3102 rtx mem_rtx;
3103 {
3104 htab_traverse (hash_table, cselib_invalidate_mem_1, mem_rtx);
3105 }
3106
3107 /* Invalidate DEST, which is being assigned to or clobbered. The second and
3108 the third parameter exist so that this function can be passed to
3109 note_stores; they are ignored. */
3110
3111 static void
3112 cselib_invalidate_rtx (dest, ignore, data)
3113 rtx dest;
3114 rtx ignore ATTRIBUTE_UNUSED;
3115 void *data ATTRIBUTE_UNUSED;
3116 {
3117 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SIGN_EXTRACT
3118 || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SUBREG)
3119 dest = XEXP (dest, 0);
3120
3121 if (GET_CODE (dest) == REG)
3122 cselib_invalidate_regno (REGNO (dest), GET_MODE (dest));
3123 else if (GET_CODE (dest) == MEM)
3124 cselib_invalidate_mem (dest);
3125
3126 /* Some machines don't define AUTO_INC_DEC, but they still use push
3127 instructions. We need to catch that case here in order to
3128 invalidate the stack pointer correctly. Note that invalidating
3129 the stack pointer is different from invalidating DEST. */
3130 if (push_operand (dest, GET_MODE (dest)))
3131 cselib_invalidate_rtx (stack_pointer_rtx, NULL_RTX, NULL);
3132 }
3133
3134 /* Record the result of a SET instruction. DEST is being set; the source
3135 contains the value described by SRC_ELT. If DEST is a MEM, DEST_ADDR_ELT
3136 describes its address. */
3137
3138 static void
3139 cselib_record_set (dest, src_elt, dest_addr_elt)
3140 rtx dest;
3141 cselib_val *src_elt, *dest_addr_elt;
3142 {
3143 int dreg = GET_CODE (dest) == REG ? (int) REGNO (dest) : -1;
3144
3145 if (src_elt == 0 || side_effects_p (dest))
3146 return;
3147
3148 if (dreg >= 0)
3149 {
3150 if (REG_VALUES (dreg) == 0)
3151 VARRAY_PUSH_UINT (used_regs, dreg);
3152
3153 REG_VALUES (dreg) = new_elt_list (REG_VALUES (dreg), src_elt);
3154 if (src_elt->locs == 0)
3155 n_useless_values--;
3156 src_elt->locs = new_elt_loc_list (src_elt->locs, dest);
3157 }
3158 else if (GET_CODE (dest) == MEM && dest_addr_elt != 0)
3159 {
3160 if (src_elt->locs == 0)
3161 n_useless_values--;
3162 add_mem_for_addr (dest_addr_elt, src_elt, dest);
3163 }
3164 }
3165
3166 /* Describe a single set that is part of an insn. */
3167 struct set
3168 {
3169 rtx src;
3170 rtx dest;
3171 cselib_val *src_elt;
3172 cselib_val *dest_addr_elt;
3173 };
3174
3175 /* There is no good way to determine how many elements there can be
3176 in a PARALLEL. Since it's fairly cheap, use a really large number. */
3177 #define MAX_SETS (FIRST_PSEUDO_REGISTER * 2)
3178
3179 /* Record the effects of any sets in INSN. */
3180 static void
3181 cselib_record_sets (insn)
3182 rtx insn;
3183 {
3184 int n_sets = 0;
3185 int i;
3186 struct set sets[MAX_SETS];
3187 rtx body = PATTERN (insn);
3188
3189 body = PATTERN (insn);
3190 /* Find all sets. */
3191 if (GET_CODE (body) == SET)
3192 {
3193 sets[0].src = SET_SRC (body);
3194 sets[0].dest = SET_DEST (body);
3195 n_sets = 1;
3196 }
3197 else if (GET_CODE (body) == PARALLEL)
3198 {
3199 /* Look through the PARALLEL and record the values being
3200 set, if possible. Also handle any CLOBBERs. */
3201 for (i = XVECLEN (body, 0) - 1; i >= 0; --i)
3202 {
3203 rtx x = XVECEXP (body, 0, i);
3204
3205 if (GET_CODE (x) == SET)
3206 {
3207 sets[n_sets].src = SET_SRC (x);
3208 sets[n_sets].dest = SET_DEST (x);
3209 n_sets++;
3210 }
3211 }
3212 }
3213
3214 /* Look up the values that are read. Do this before invalidating the
3215 locations that are written. */
3216 for (i = 0; i < n_sets; i++)
3217 {
3218 rtx dest = sets[i].dest;
3219
3220 /* A STRICT_LOW_PART can be ignored; we'll record the equivalence for
3221 the low part after invalidating any knowledge about larger modes. */
3222 if (GET_CODE (sets[i].dest) == STRICT_LOW_PART)
3223 sets[i].dest = dest = XEXP (dest, 0);
3224
3225 /* We don't know how to record anything but REG or MEM. */
3226 if (GET_CODE (dest) == REG || GET_CODE (dest) == MEM)
3227 {
3228 sets[i].src_elt = cselib_lookup (sets[i].src, GET_MODE (dest), 1);
3229 if (GET_CODE (dest) == MEM)
3230 sets[i].dest_addr_elt = cselib_lookup (XEXP (dest, 0), Pmode, 1);
3231 else
3232 sets[i].dest_addr_elt = 0;
3233 }
3234 }
3235
3236 /* Invalidate all locations written by this insn. Note that the elts we
3237 looked up in the previous loop aren't affected, just some of their
3238 locations may go away. */
3239 note_stores (body, cselib_invalidate_rtx, NULL);
3240
3241 /* Now enter the equivalences in our tables. */
3242 for (i = 0; i < n_sets; i++)
3243 {
3244 rtx dest = sets[i].dest;
3245 if (GET_CODE (dest) == REG || GET_CODE (dest) == MEM)
3246 cselib_record_set (dest, sets[i].src_elt, sets[i].dest_addr_elt);
3247 }
3248 }
3249
3250 /* Record the effects of INSN. */
3251
3252 void
3253 cselib_process_insn (insn)
3254 rtx insn;
3255 {
3256 int i;
3257 rtx x;
3258
3259 cselib_current_insn = insn;
3260
3261 /* Forget everything at a CODE_LABEL, a volatile asm, or a setjmp. */
3262 if (GET_CODE (insn) == CODE_LABEL
3263 || (GET_CODE (insn) == NOTE
3264 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_SETJMP)
3265 || (GET_CODE (insn) == INSN
3266 && GET_CODE (PATTERN (insn)) == ASM_OPERANDS
3267 && MEM_VOLATILE_P (PATTERN (insn))))
3268 {
3269 clear_table (0);
3270 return;
3271 }
3272
3273 if (! INSN_P (insn))
3274 {
3275 cselib_current_insn = 0;
3276 return;
3277 }
3278
3279 /* If this is a call instruction, forget anything stored in a
3280 call clobbered register, or, if this is not a const call, in
3281 memory. */
3282 if (GET_CODE (insn) == CALL_INSN)
3283 {
3284 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3285 if (call_used_regs[i])
3286 cselib_invalidate_regno (i, VOIDmode);
3287
3288 if (! CONST_CALL_P (insn))
3289 cselib_invalidate_mem (callmem);
3290 }
3291
3292 cselib_record_sets (insn);
3293
3294 #ifdef AUTO_INC_DEC
3295 /* Clobber any registers which appear in REG_INC notes. We
3296 could keep track of the changes to their values, but it is
3297 unlikely to help. */
3298 for (x = REG_NOTES (insn); x; x = XEXP (x, 1))
3299 if (REG_NOTE_KIND (x) == REG_INC)
3300 cselib_invalidate_rtx (XEXP (x, 0), NULL_RTX, NULL);
3301 #endif
3302
3303 /* Look for any CLOBBERs in CALL_INSN_FUNCTION_USAGE, but only
3304 after we have processed the insn. */
3305 if (GET_CODE (insn) == CALL_INSN)
3306 for (x = CALL_INSN_FUNCTION_USAGE (insn); x; x = XEXP (x, 1))
3307 if (GET_CODE (XEXP (x, 0)) == CLOBBER)
3308 cselib_invalidate_rtx (XEXP (XEXP (x, 0), 0), NULL_RTX, NULL);
3309
3310 cselib_current_insn = 0;
3311
3312 if (n_useless_values > MAX_USELESS_VALUES)
3313 remove_useless_values ();
3314 }
3315
3316 /* Make sure our varrays are big enough. Not called from any cselib routines;
3317 it must be called by the user if it allocated new registers. */
3318
3319 void
3320 cselib_update_varray_sizes ()
3321 {
3322 unsigned int nregs = max_reg_num ();
3323
3324 if (nregs == cselib_nregs)
3325 return;
3326
3327 cselib_nregs = nregs;
3328 VARRAY_GROW (reg_values, nregs);
3329 VARRAY_GROW (used_regs, nregs);
3330 }
3331
3332 /* Initialize cselib for one pass. The caller must also call
3333 init_alias_analysis. */
3334
3335 void
3336 cselib_init ()
3337 {
3338 /* These are only created once. */
3339 if (! callmem)
3340 {
3341 gcc_obstack_init (&cselib_obstack);
3342 cselib_startobj = obstack_alloc (&cselib_obstack, 0);
3343
3344 callmem = gen_rtx_MEM (BLKmode, const0_rtx);
3345 ggc_add_rtx_root (&callmem, 1);
3346 }
3347
3348 cselib_nregs = max_reg_num ();
3349 VARRAY_ELT_LIST_INIT (reg_values, cselib_nregs, "reg_values");
3350 VARRAY_UINT_INIT (used_regs, cselib_nregs, "used_regs");
3351 hash_table = htab_create (31, get_value_hash, entry_and_rtx_equal_p, NULL);
3352 clear_table (1);
3353 }
3354
3355 /* Called when the current user is done with cselib. */
3356
3357 void
3358 cselib_finish ()
3359 {
3360 clear_table (0);
3361 VARRAY_FREE (reg_values);
3362 VARRAY_FREE (used_regs);
3363 htab_delete (hash_table);
3364 }