(find_best_addr): Add missing rtx_cost arguments.
[gcc.git] / gcc / cse.c
1 /* Common subexpression elimination for GNU compiler.
2 Copyright (C) 1987, 88, 89, 92-6, 1997 Free Software Foundation, Inc.
3
4 This file is part of GNU CC.
5
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
10
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
20
21
22 #include "config.h"
23 /* Must precede rtl.h for FFS. */
24 #include <stdio.h>
25
26 #include "rtl.h"
27 #include "regs.h"
28 #include "hard-reg-set.h"
29 #include "flags.h"
30 #include "real.h"
31 #include "insn-config.h"
32 #include "recog.h"
33
34 #include <setjmp.h>
35
36 /* The basic idea of common subexpression elimination is to go
37 through the code, keeping a record of expressions that would
38 have the same value at the current scan point, and replacing
39 expressions encountered with the cheapest equivalent expression.
40
41 It is too complicated to keep track of the different possibilities
42 when control paths merge; so, at each label, we forget all that is
43 known and start fresh. This can be described as processing each
44 basic block separately. Note, however, that these are not quite
45 the same as the basic blocks found by a later pass and used for
46 data flow analysis and register packing. We do not need to start fresh
47 after a conditional jump instruction if there is no label there.
48
49 We use two data structures to record the equivalent expressions:
50 a hash table for most expressions, and several vectors together
51 with "quantity numbers" to record equivalent (pseudo) registers.
52
53 The use of the special data structure for registers is desirable
54 because it is faster. It is possible because registers references
55 contain a fairly small number, the register number, taken from
56 a contiguously allocated series, and two register references are
57 identical if they have the same number. General expressions
58 do not have any such thing, so the only way to retrieve the
59 information recorded on an expression other than a register
60 is to keep it in a hash table.
61
62 Registers and "quantity numbers":
63
64 At the start of each basic block, all of the (hardware and pseudo)
65 registers used in the function are given distinct quantity
66 numbers to indicate their contents. During scan, when the code
67 copies one register into another, we copy the quantity number.
68 When a register is loaded in any other way, we allocate a new
69 quantity number to describe the value generated by this operation.
70 `reg_qty' records what quantity a register is currently thought
71 of as containing.
72
73 All real quantity numbers are greater than or equal to `max_reg'.
74 If register N has not been assigned a quantity, reg_qty[N] will equal N.
75
76 Quantity numbers below `max_reg' do not exist and none of the `qty_...'
77 variables should be referenced with an index below `max_reg'.
78
79 We also maintain a bidirectional chain of registers for each
80 quantity number. `qty_first_reg', `qty_last_reg',
81 `reg_next_eqv' and `reg_prev_eqv' hold these chains.
82
83 The first register in a chain is the one whose lifespan is least local.
84 Among equals, it is the one that was seen first.
85 We replace any equivalent register with that one.
86
87 If two registers have the same quantity number, it must be true that
88 REG expressions with `qty_mode' must be in the hash table for both
89 registers and must be in the same class.
90
91 The converse is not true. Since hard registers may be referenced in
92 any mode, two REG expressions might be equivalent in the hash table
93 but not have the same quantity number if the quantity number of one
94 of the registers is not the same mode as those expressions.
95
96 Constants and quantity numbers
97
98 When a quantity has a known constant value, that value is stored
99 in the appropriate element of qty_const. This is in addition to
100 putting the constant in the hash table as is usual for non-regs.
101
102 Whether a reg or a constant is preferred is determined by the configuration
103 macro CONST_COSTS and will often depend on the constant value. In any
104 event, expressions containing constants can be simplified, by fold_rtx.
105
106 When a quantity has a known nearly constant value (such as an address
107 of a stack slot), that value is stored in the appropriate element
108 of qty_const.
109
110 Integer constants don't have a machine mode. However, cse
111 determines the intended machine mode from the destination
112 of the instruction that moves the constant. The machine mode
113 is recorded in the hash table along with the actual RTL
114 constant expression so that different modes are kept separate.
115
116 Other expressions:
117
118 To record known equivalences among expressions in general
119 we use a hash table called `table'. It has a fixed number of buckets
120 that contain chains of `struct table_elt' elements for expressions.
121 These chains connect the elements whose expressions have the same
122 hash codes.
123
124 Other chains through the same elements connect the elements which
125 currently have equivalent values.
126
127 Register references in an expression are canonicalized before hashing
128 the expression. This is done using `reg_qty' and `qty_first_reg'.
129 The hash code of a register reference is computed using the quantity
130 number, not the register number.
131
132 When the value of an expression changes, it is necessary to remove from the
133 hash table not just that expression but all expressions whose values
134 could be different as a result.
135
136 1. If the value changing is in memory, except in special cases
137 ANYTHING referring to memory could be changed. That is because
138 nobody knows where a pointer does not point.
139 The function `invalidate_memory' removes what is necessary.
140
141 The special cases are when the address is constant or is
142 a constant plus a fixed register such as the frame pointer
143 or a static chain pointer. When such addresses are stored in,
144 we can tell exactly which other such addresses must be invalidated
145 due to overlap. `invalidate' does this.
146 All expressions that refer to non-constant
147 memory addresses are also invalidated. `invalidate_memory' does this.
148
149 2. If the value changing is a register, all expressions
150 containing references to that register, and only those,
151 must be removed.
152
153 Because searching the entire hash table for expressions that contain
154 a register is very slow, we try to figure out when it isn't necessary.
155 Precisely, this is necessary only when expressions have been
156 entered in the hash table using this register, and then the value has
157 changed, and then another expression wants to be added to refer to
158 the register's new value. This sequence of circumstances is rare
159 within any one basic block.
160
161 The vectors `reg_tick' and `reg_in_table' are used to detect this case.
162 reg_tick[i] is incremented whenever a value is stored in register i.
163 reg_in_table[i] holds -1 if no references to register i have been
164 entered in the table; otherwise, it contains the value reg_tick[i] had
165 when the references were entered. If we want to enter a reference
166 and reg_in_table[i] != reg_tick[i], we must scan and remove old references.
167 Until we want to enter a new entry, the mere fact that the two vectors
168 don't match makes the entries be ignored if anyone tries to match them.
169
170 Registers themselves are entered in the hash table as well as in
171 the equivalent-register chains. However, the vectors `reg_tick'
172 and `reg_in_table' do not apply to expressions which are simple
173 register references. These expressions are removed from the table
174 immediately when they become invalid, and this can be done even if
175 we do not immediately search for all the expressions that refer to
176 the register.
177
178 A CLOBBER rtx in an instruction invalidates its operand for further
179 reuse. A CLOBBER or SET rtx whose operand is a MEM:BLK
180 invalidates everything that resides in memory.
181
182 Related expressions:
183
184 Constant expressions that differ only by an additive integer
185 are called related. When a constant expression is put in
186 the table, the related expression with no constant term
187 is also entered. These are made to point at each other
188 so that it is possible to find out if there exists any
189 register equivalent to an expression related to a given expression. */
190
191 /* One plus largest register number used in this function. */
192
193 static int max_reg;
194
195 /* Length of vectors indexed by quantity number.
196 We know in advance we will not need a quantity number this big. */
197
198 static int max_qty;
199
200 /* Next quantity number to be allocated.
201 This is 1 + the largest number needed so far. */
202
203 static int next_qty;
204
205 /* Indexed by quantity number, gives the first (or last) register
206 in the chain of registers that currently contain this quantity. */
207
208 static int *qty_first_reg;
209 static int *qty_last_reg;
210
211 /* Index by quantity number, gives the mode of the quantity. */
212
213 static enum machine_mode *qty_mode;
214
215 /* Indexed by quantity number, gives the rtx of the constant value of the
216 quantity, or zero if it does not have a known value.
217 A sum of the frame pointer (or arg pointer) plus a constant
218 can also be entered here. */
219
220 static rtx *qty_const;
221
222 /* Indexed by qty number, gives the insn that stored the constant value
223 recorded in `qty_const'. */
224
225 static rtx *qty_const_insn;
226
227 /* The next three variables are used to track when a comparison between a
228 quantity and some constant or register has been passed. In that case, we
229 know the results of the comparison in case we see it again. These variables
230 record a comparison that is known to be true. */
231
232 /* Indexed by qty number, gives the rtx code of a comparison with a known
233 result involving this quantity. If none, it is UNKNOWN. */
234 static enum rtx_code *qty_comparison_code;
235
236 /* Indexed by qty number, gives the constant being compared against in a
237 comparison of known result. If no such comparison, it is undefined.
238 If the comparison is not with a constant, it is zero. */
239
240 static rtx *qty_comparison_const;
241
242 /* Indexed by qty number, gives the quantity being compared against in a
243 comparison of known result. If no such comparison, if it undefined.
244 If the comparison is not with a register, it is -1. */
245
246 static int *qty_comparison_qty;
247
248 #ifdef HAVE_cc0
249 /* For machines that have a CC0, we do not record its value in the hash
250 table since its use is guaranteed to be the insn immediately following
251 its definition and any other insn is presumed to invalidate it.
252
253 Instead, we store below the value last assigned to CC0. If it should
254 happen to be a constant, it is stored in preference to the actual
255 assigned value. In case it is a constant, we store the mode in which
256 the constant should be interpreted. */
257
258 static rtx prev_insn_cc0;
259 static enum machine_mode prev_insn_cc0_mode;
260 #endif
261
262 /* Previous actual insn. 0 if at first insn of basic block. */
263
264 static rtx prev_insn;
265
266 /* Insn being scanned. */
267
268 static rtx this_insn;
269
270 /* Index by register number, gives the quantity number
271 of the register's current contents. */
272
273 static int *reg_qty;
274
275 /* Index by register number, gives the number of the next (or
276 previous) register in the chain of registers sharing the same
277 value.
278
279 Or -1 if this register is at the end of the chain.
280
281 If reg_qty[N] == N, reg_next_eqv[N] is undefined. */
282
283 static int *reg_next_eqv;
284 static int *reg_prev_eqv;
285
286 /* Index by register number, gives the number of times
287 that register has been altered in the current basic block. */
288
289 static int *reg_tick;
290
291 /* Index by register number, gives the reg_tick value at which
292 rtx's containing this register are valid in the hash table.
293 If this does not equal the current reg_tick value, such expressions
294 existing in the hash table are invalid.
295 If this is -1, no expressions containing this register have been
296 entered in the table. */
297
298 static int *reg_in_table;
299
300 /* A HARD_REG_SET containing all the hard registers for which there is
301 currently a REG expression in the hash table. Note the difference
302 from the above variables, which indicate if the REG is mentioned in some
303 expression in the table. */
304
305 static HARD_REG_SET hard_regs_in_table;
306
307 /* A HARD_REG_SET containing all the hard registers that are invalidated
308 by a CALL_INSN. */
309
310 static HARD_REG_SET regs_invalidated_by_call;
311
312 /* Two vectors of ints:
313 one containing max_reg -1's; the other max_reg + 500 (an approximation
314 for max_qty) elements where element i contains i.
315 These are used to initialize various other vectors fast. */
316
317 static int *all_minus_one;
318 static int *consec_ints;
319
320 /* CUID of insn that starts the basic block currently being cse-processed. */
321
322 static int cse_basic_block_start;
323
324 /* CUID of insn that ends the basic block currently being cse-processed. */
325
326 static int cse_basic_block_end;
327
328 /* Vector mapping INSN_UIDs to cuids.
329 The cuids are like uids but increase monotonically always.
330 We use them to see whether a reg is used outside a given basic block. */
331
332 static int *uid_cuid;
333
334 /* Highest UID in UID_CUID. */
335 static int max_uid;
336
337 /* Get the cuid of an insn. */
338
339 #define INSN_CUID(INSN) (uid_cuid[INSN_UID (INSN)])
340
341 /* Nonzero if cse has altered conditional jump insns
342 in such a way that jump optimization should be redone. */
343
344 static int cse_jumps_altered;
345
346 /* Nonzero if we put a LABEL_REF into the hash table. Since we may have put
347 it into an INSN without a REG_LABEL, we have to rerun jump after CSE
348 to put in the note. */
349 static int recorded_label_ref;
350
351 /* canon_hash stores 1 in do_not_record
352 if it notices a reference to CC0, PC, or some other volatile
353 subexpression. */
354
355 static int do_not_record;
356
357 #ifdef LOAD_EXTEND_OP
358
359 /* Scratch rtl used when looking for load-extended copy of a MEM. */
360 static rtx memory_extend_rtx;
361 #endif
362
363 /* canon_hash stores 1 in hash_arg_in_memory
364 if it notices a reference to memory within the expression being hashed. */
365
366 static int hash_arg_in_memory;
367
368 /* canon_hash stores 1 in hash_arg_in_struct
369 if it notices a reference to memory that's part of a structure. */
370
371 static int hash_arg_in_struct;
372
373 /* The hash table contains buckets which are chains of `struct table_elt's,
374 each recording one expression's information.
375 That expression is in the `exp' field.
376
377 Those elements with the same hash code are chained in both directions
378 through the `next_same_hash' and `prev_same_hash' fields.
379
380 Each set of expressions with equivalent values
381 are on a two-way chain through the `next_same_value'
382 and `prev_same_value' fields, and all point with
383 the `first_same_value' field at the first element in
384 that chain. The chain is in order of increasing cost.
385 Each element's cost value is in its `cost' field.
386
387 The `in_memory' field is nonzero for elements that
388 involve any reference to memory. These elements are removed
389 whenever a write is done to an unidentified location in memory.
390 To be safe, we assume that a memory address is unidentified unless
391 the address is either a symbol constant or a constant plus
392 the frame pointer or argument pointer.
393
394 The `in_struct' field is nonzero for elements that
395 involve any reference to memory inside a structure or array.
396
397 The `related_value' field is used to connect related expressions
398 (that differ by adding an integer).
399 The related expressions are chained in a circular fashion.
400 `related_value' is zero for expressions for which this
401 chain is not useful.
402
403 The `cost' field stores the cost of this element's expression.
404
405 The `is_const' flag is set if the element is a constant (including
406 a fixed address).
407
408 The `flag' field is used as a temporary during some search routines.
409
410 The `mode' field is usually the same as GET_MODE (`exp'), but
411 if `exp' is a CONST_INT and has no machine mode then the `mode'
412 field is the mode it was being used as. Each constant is
413 recorded separately for each mode it is used with. */
414
415
416 struct table_elt
417 {
418 rtx exp;
419 struct table_elt *next_same_hash;
420 struct table_elt *prev_same_hash;
421 struct table_elt *next_same_value;
422 struct table_elt *prev_same_value;
423 struct table_elt *first_same_value;
424 struct table_elt *related_value;
425 int cost;
426 enum machine_mode mode;
427 char in_memory;
428 char in_struct;
429 char is_const;
430 char flag;
431 };
432
433 /* We don't want a lot of buckets, because we rarely have very many
434 things stored in the hash table, and a lot of buckets slows
435 down a lot of loops that happen frequently. */
436 #define NBUCKETS 31
437
438 /* Compute hash code of X in mode M. Special-case case where X is a pseudo
439 register (hard registers may require `do_not_record' to be set). */
440
441 #define HASH(X, M) \
442 (GET_CODE (X) == REG && REGNO (X) >= FIRST_PSEUDO_REGISTER \
443 ? (((unsigned) REG << 7) + (unsigned) reg_qty[REGNO (X)]) % NBUCKETS \
444 : canon_hash (X, M) % NBUCKETS)
445
446 /* Determine whether register number N is considered a fixed register for CSE.
447 It is desirable to replace other regs with fixed regs, to reduce need for
448 non-fixed hard regs.
449 A reg wins if it is either the frame pointer or designated as fixed,
450 but not if it is an overlapping register. */
451 #ifdef OVERLAPPING_REGNO_P
452 #define FIXED_REGNO_P(N) \
453 (((N) == FRAME_POINTER_REGNUM || (N) == HARD_FRAME_POINTER_REGNUM \
454 || fixed_regs[N] || global_regs[N]) \
455 && ! OVERLAPPING_REGNO_P ((N)))
456 #else
457 #define FIXED_REGNO_P(N) \
458 ((N) == FRAME_POINTER_REGNUM || (N) == HARD_FRAME_POINTER_REGNUM \
459 || fixed_regs[N] || global_regs[N])
460 #endif
461
462 /* Compute cost of X, as stored in the `cost' field of a table_elt. Fixed
463 hard registers and pointers into the frame are the cheapest with a cost
464 of 0. Next come pseudos with a cost of one and other hard registers with
465 a cost of 2. Aside from these special cases, call `rtx_cost'. */
466
467 #define CHEAP_REGNO(N) \
468 ((N) == FRAME_POINTER_REGNUM || (N) == HARD_FRAME_POINTER_REGNUM \
469 || (N) == STACK_POINTER_REGNUM || (N) == ARG_POINTER_REGNUM \
470 || ((N) >= FIRST_VIRTUAL_REGISTER && (N) <= LAST_VIRTUAL_REGISTER) \
471 || ((N) < FIRST_PSEUDO_REGISTER \
472 && FIXED_REGNO_P (N) && REGNO_REG_CLASS (N) != NO_REGS))
473
474 /* A register is cheap if it is a user variable assigned to the register
475 or if its register number always corresponds to a cheap register. */
476
477 #define CHEAP_REG(N) \
478 ((REG_USERVAR_P (N) && REGNO (N) < FIRST_PSEUDO_REGISTER) \
479 || CHEAP_REGNO (REGNO (N)))
480
481 #define COST(X) \
482 (GET_CODE (X) == REG \
483 ? (CHEAP_REG (X) ? 0 \
484 : REGNO (X) >= FIRST_PSEUDO_REGISTER ? 1 \
485 : 2) \
486 : notreg_cost(X))
487
488 /* Determine if the quantity number for register X represents a valid index
489 into the `qty_...' variables. */
490
491 #define REGNO_QTY_VALID_P(N) (reg_qty[N] != (N))
492
493 static struct table_elt *table[NBUCKETS];
494
495 /* Chain of `struct table_elt's made so far for this function
496 but currently removed from the table. */
497
498 static struct table_elt *free_element_chain;
499
500 /* Number of `struct table_elt' structures made so far for this function. */
501
502 static int n_elements_made;
503
504 /* Maximum value `n_elements_made' has had so far in this compilation
505 for functions previously processed. */
506
507 static int max_elements_made;
508
509 /* Surviving equivalence class when two equivalence classes are merged
510 by recording the effects of a jump in the last insn. Zero if the
511 last insn was not a conditional jump. */
512
513 static struct table_elt *last_jump_equiv_class;
514
515 /* Set to the cost of a constant pool reference if one was found for a
516 symbolic constant. If this was found, it means we should try to
517 convert constants into constant pool entries if they don't fit in
518 the insn. */
519
520 static int constant_pool_entries_cost;
521
522 /* Bits describing what kind of values in memory must be invalidated
523 for a particular instruction. If all three bits are zero,
524 no memory refs need to be invalidated. Each bit is more powerful
525 than the preceding ones, and if a bit is set then the preceding
526 bits are also set.
527
528 Here is how the bits are set:
529 Pushing onto the stack invalidates only the stack pointer,
530 writing at a fixed address invalidates only variable addresses,
531 writing in a structure element at variable address
532 invalidates all but scalar variables,
533 and writing in anything else at variable address invalidates everything. */
534
535 struct write_data
536 {
537 int sp : 1; /* Invalidate stack pointer. */
538 int var : 1; /* Invalidate variable addresses. */
539 int nonscalar : 1; /* Invalidate all but scalar variables. */
540 int all : 1; /* Invalidate all memory refs. */
541 };
542
543 /* Define maximum length of a branch path. */
544
545 #define PATHLENGTH 10
546
547 /* This data describes a block that will be processed by cse_basic_block. */
548
549 struct cse_basic_block_data {
550 /* Lowest CUID value of insns in block. */
551 int low_cuid;
552 /* Highest CUID value of insns in block. */
553 int high_cuid;
554 /* Total number of SETs in block. */
555 int nsets;
556 /* Last insn in the block. */
557 rtx last;
558 /* Size of current branch path, if any. */
559 int path_size;
560 /* Current branch path, indicating which branches will be taken. */
561 struct branch_path {
562 /* The branch insn. */
563 rtx branch;
564 /* Whether it should be taken or not. AROUND is the same as taken
565 except that it is used when the destination label is not preceded
566 by a BARRIER. */
567 enum taken {TAKEN, NOT_TAKEN, AROUND} status;
568 } path[PATHLENGTH];
569 };
570
571 /* Nonzero if X has the form (PLUS frame-pointer integer). We check for
572 virtual regs here because the simplify_*_operation routines are called
573 by integrate.c, which is called before virtual register instantiation. */
574
575 #define FIXED_BASE_PLUS_P(X) \
576 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
577 || (X) == arg_pointer_rtx \
578 || (X) == virtual_stack_vars_rtx \
579 || (X) == virtual_incoming_args_rtx \
580 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
581 && (XEXP (X, 0) == frame_pointer_rtx \
582 || XEXP (X, 0) == hard_frame_pointer_rtx \
583 || XEXP (X, 0) == arg_pointer_rtx \
584 || XEXP (X, 0) == virtual_stack_vars_rtx \
585 || XEXP (X, 0) == virtual_incoming_args_rtx)))
586
587 /* Similar, but also allows reference to the stack pointer.
588
589 This used to include FIXED_BASE_PLUS_P, however, we can't assume that
590 arg_pointer_rtx by itself is nonzero, because on at least one machine,
591 the i960, the arg pointer is zero when it is unused. */
592
593 #define NONZERO_BASE_PLUS_P(X) \
594 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
595 || (X) == virtual_stack_vars_rtx \
596 || (X) == virtual_incoming_args_rtx \
597 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
598 && (XEXP (X, 0) == frame_pointer_rtx \
599 || XEXP (X, 0) == hard_frame_pointer_rtx \
600 || XEXP (X, 0) == arg_pointer_rtx \
601 || XEXP (X, 0) == virtual_stack_vars_rtx \
602 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
603 || (X) == stack_pointer_rtx \
604 || (X) == virtual_stack_dynamic_rtx \
605 || (X) == virtual_outgoing_args_rtx \
606 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
607 && (XEXP (X, 0) == stack_pointer_rtx \
608 || XEXP (X, 0) == virtual_stack_dynamic_rtx \
609 || XEXP (X, 0) == virtual_outgoing_args_rtx)))
610
611 static int notreg_cost PROTO((rtx));
612 static void new_basic_block PROTO((void));
613 static void make_new_qty PROTO((int));
614 static void make_regs_eqv PROTO((int, int));
615 static void delete_reg_equiv PROTO((int));
616 static int mention_regs PROTO((rtx));
617 static int insert_regs PROTO((rtx, struct table_elt *, int));
618 static void free_element PROTO((struct table_elt *));
619 static void remove_from_table PROTO((struct table_elt *, unsigned));
620 static struct table_elt *get_element PROTO((void));
621 static struct table_elt *lookup PROTO((rtx, unsigned, enum machine_mode)),
622 *lookup_for_remove PROTO((rtx, unsigned, enum machine_mode));
623 static rtx lookup_as_function PROTO((rtx, enum rtx_code));
624 static struct table_elt *insert PROTO((rtx, struct table_elt *, unsigned,
625 enum machine_mode));
626 static void merge_equiv_classes PROTO((struct table_elt *,
627 struct table_elt *));
628 static void invalidate PROTO((rtx, enum machine_mode));
629 static void remove_invalid_refs PROTO((int));
630 static void rehash_using_reg PROTO((rtx));
631 static void invalidate_memory PROTO((struct write_data *));
632 static void invalidate_for_call PROTO((void));
633 static rtx use_related_value PROTO((rtx, struct table_elt *));
634 static unsigned canon_hash PROTO((rtx, enum machine_mode));
635 static unsigned safe_hash PROTO((rtx, enum machine_mode));
636 static int exp_equiv_p PROTO((rtx, rtx, int, int));
637 static void set_nonvarying_address_components PROTO((rtx, int, rtx *,
638 HOST_WIDE_INT *,
639 HOST_WIDE_INT *));
640 static int refers_to_p PROTO((rtx, rtx));
641 static int refers_to_mem_p PROTO((rtx, rtx, HOST_WIDE_INT,
642 HOST_WIDE_INT));
643 static int cse_rtx_addr_varies_p PROTO((rtx));
644 static rtx canon_reg PROTO((rtx, rtx));
645 static void find_best_addr PROTO((rtx, rtx *));
646 static enum rtx_code find_comparison_args PROTO((enum rtx_code, rtx *, rtx *,
647 enum machine_mode *,
648 enum machine_mode *));
649 static rtx cse_gen_binary PROTO((enum rtx_code, enum machine_mode,
650 rtx, rtx));
651 static rtx simplify_plus_minus PROTO((enum rtx_code, enum machine_mode,
652 rtx, rtx));
653 static rtx fold_rtx PROTO((rtx, rtx));
654 static rtx equiv_constant PROTO((rtx));
655 static void record_jump_equiv PROTO((rtx, int));
656 static void record_jump_cond PROTO((enum rtx_code, enum machine_mode,
657 rtx, rtx, int));
658 static void cse_insn PROTO((rtx, int));
659 static void note_mem_written PROTO((rtx, struct write_data *));
660 static void invalidate_from_clobbers PROTO((struct write_data *, rtx));
661 static rtx cse_process_notes PROTO((rtx, rtx));
662 static void cse_around_loop PROTO((rtx));
663 static void invalidate_skipped_set PROTO((rtx, rtx));
664 static void invalidate_skipped_block PROTO((rtx));
665 static void cse_check_loop_start PROTO((rtx, rtx));
666 static void cse_set_around_loop PROTO((rtx, rtx, rtx));
667 static rtx cse_basic_block PROTO((rtx, rtx, struct branch_path *, int));
668 static void count_reg_usage PROTO((rtx, int *, rtx, int));
669
670 extern int rtx_equal_function_value_matters;
671 \f
672 /* Return an estimate of the cost of computing rtx X.
673 One use is in cse, to decide which expression to keep in the hash table.
674 Another is in rtl generation, to pick the cheapest way to multiply.
675 Other uses like the latter are expected in the future. */
676
677 /* Internal function, to compute cost when X is not a register; called
678 from COST macro to keep it simple. */
679
680 static int
681 notreg_cost (x)
682 rtx x;
683 {
684 return ((GET_CODE (x) == SUBREG
685 && GET_CODE (SUBREG_REG (x)) == REG
686 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
687 && GET_MODE_CLASS (GET_MODE (SUBREG_REG (x))) == MODE_INT
688 && (GET_MODE_SIZE (GET_MODE (x))
689 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
690 && subreg_lowpart_p (x)
691 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (GET_MODE (x)),
692 GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x)))))
693 ? (CHEAP_REG (SUBREG_REG (x)) ? 0
694 : (REGNO (SUBREG_REG (x)) >= FIRST_PSEUDO_REGISTER ? 1
695 : 2))
696 : rtx_cost (x, SET) * 2);
697 }
698
699 /* Return the right cost to give to an operation
700 to make the cost of the corresponding register-to-register instruction
701 N times that of a fast register-to-register instruction. */
702
703 #define COSTS_N_INSNS(N) ((N) * 4 - 2)
704
705 int
706 rtx_cost (x, outer_code)
707 rtx x;
708 enum rtx_code outer_code;
709 {
710 register int i, j;
711 register enum rtx_code code;
712 register char *fmt;
713 register int total;
714
715 if (x == 0)
716 return 0;
717
718 /* Compute the default costs of certain things.
719 Note that RTX_COSTS can override the defaults. */
720
721 code = GET_CODE (x);
722 switch (code)
723 {
724 case MULT:
725 /* Count multiplication by 2**n as a shift,
726 because if we are considering it, we would output it as a shift. */
727 if (GET_CODE (XEXP (x, 1)) == CONST_INT
728 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
729 total = 2;
730 else
731 total = COSTS_N_INSNS (5);
732 break;
733 case DIV:
734 case UDIV:
735 case MOD:
736 case UMOD:
737 total = COSTS_N_INSNS (7);
738 break;
739 case USE:
740 /* Used in loop.c and combine.c as a marker. */
741 total = 0;
742 break;
743 case ASM_OPERANDS:
744 /* We don't want these to be used in substitutions because
745 we have no way of validating the resulting insn. So assign
746 anything containing an ASM_OPERANDS a very high cost. */
747 total = 1000;
748 break;
749 default:
750 total = 2;
751 }
752
753 switch (code)
754 {
755 case REG:
756 return ! CHEAP_REG (x);
757
758 case SUBREG:
759 /* If we can't tie these modes, make this expensive. The larger
760 the mode, the more expensive it is. */
761 if (! MODES_TIEABLE_P (GET_MODE (x), GET_MODE (SUBREG_REG (x))))
762 return COSTS_N_INSNS (2
763 + GET_MODE_SIZE (GET_MODE (x)) / UNITS_PER_WORD);
764 return 2;
765 #ifdef RTX_COSTS
766 RTX_COSTS (x, code, outer_code);
767 #endif
768 CONST_COSTS (x, code, outer_code);
769 }
770
771 /* Sum the costs of the sub-rtx's, plus cost of this operation,
772 which is already in total. */
773
774 fmt = GET_RTX_FORMAT (code);
775 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
776 if (fmt[i] == 'e')
777 total += rtx_cost (XEXP (x, i), code);
778 else if (fmt[i] == 'E')
779 for (j = 0; j < XVECLEN (x, i); j++)
780 total += rtx_cost (XVECEXP (x, i, j), code);
781
782 return total;
783 }
784 \f
785 /* Clear the hash table and initialize each register with its own quantity,
786 for a new basic block. */
787
788 static void
789 new_basic_block ()
790 {
791 register int i;
792
793 next_qty = max_reg;
794
795 bzero ((char *) reg_tick, max_reg * sizeof (int));
796
797 bcopy ((char *) all_minus_one, (char *) reg_in_table,
798 max_reg * sizeof (int));
799 bcopy ((char *) consec_ints, (char *) reg_qty, max_reg * sizeof (int));
800 CLEAR_HARD_REG_SET (hard_regs_in_table);
801
802 /* The per-quantity values used to be initialized here, but it is
803 much faster to initialize each as it is made in `make_new_qty'. */
804
805 for (i = 0; i < NBUCKETS; i++)
806 {
807 register struct table_elt *this, *next;
808 for (this = table[i]; this; this = next)
809 {
810 next = this->next_same_hash;
811 free_element (this);
812 }
813 }
814
815 bzero ((char *) table, sizeof table);
816
817 prev_insn = 0;
818
819 #ifdef HAVE_cc0
820 prev_insn_cc0 = 0;
821 #endif
822 }
823
824 /* Say that register REG contains a quantity not in any register before
825 and initialize that quantity. */
826
827 static void
828 make_new_qty (reg)
829 register int reg;
830 {
831 register int q;
832
833 if (next_qty >= max_qty)
834 abort ();
835
836 q = reg_qty[reg] = next_qty++;
837 qty_first_reg[q] = reg;
838 qty_last_reg[q] = reg;
839 qty_const[q] = qty_const_insn[q] = 0;
840 qty_comparison_code[q] = UNKNOWN;
841
842 reg_next_eqv[reg] = reg_prev_eqv[reg] = -1;
843 }
844
845 /* Make reg NEW equivalent to reg OLD.
846 OLD is not changing; NEW is. */
847
848 static void
849 make_regs_eqv (new, old)
850 register int new, old;
851 {
852 register int lastr, firstr;
853 register int q = reg_qty[old];
854
855 /* Nothing should become eqv until it has a "non-invalid" qty number. */
856 if (! REGNO_QTY_VALID_P (old))
857 abort ();
858
859 reg_qty[new] = q;
860 firstr = qty_first_reg[q];
861 lastr = qty_last_reg[q];
862
863 /* Prefer fixed hard registers to anything. Prefer pseudo regs to other
864 hard regs. Among pseudos, if NEW will live longer than any other reg
865 of the same qty, and that is beyond the current basic block,
866 make it the new canonical replacement for this qty. */
867 if (! (firstr < FIRST_PSEUDO_REGISTER && FIXED_REGNO_P (firstr))
868 /* Certain fixed registers might be of the class NO_REGS. This means
869 that not only can they not be allocated by the compiler, but
870 they cannot be used in substitutions or canonicalizations
871 either. */
872 && (new >= FIRST_PSEUDO_REGISTER || REGNO_REG_CLASS (new) != NO_REGS)
873 && ((new < FIRST_PSEUDO_REGISTER && FIXED_REGNO_P (new))
874 || (new >= FIRST_PSEUDO_REGISTER
875 && (firstr < FIRST_PSEUDO_REGISTER
876 || ((uid_cuid[REGNO_LAST_UID (new)] > cse_basic_block_end
877 || (uid_cuid[REGNO_FIRST_UID (new)]
878 < cse_basic_block_start))
879 && (uid_cuid[REGNO_LAST_UID (new)]
880 > uid_cuid[REGNO_LAST_UID (firstr)]))))))
881 {
882 reg_prev_eqv[firstr] = new;
883 reg_next_eqv[new] = firstr;
884 reg_prev_eqv[new] = -1;
885 qty_first_reg[q] = new;
886 }
887 else
888 {
889 /* If NEW is a hard reg (known to be non-fixed), insert at end.
890 Otherwise, insert before any non-fixed hard regs that are at the
891 end. Registers of class NO_REGS cannot be used as an
892 equivalent for anything. */
893 while (lastr < FIRST_PSEUDO_REGISTER && reg_prev_eqv[lastr] >= 0
894 && (REGNO_REG_CLASS (lastr) == NO_REGS || ! FIXED_REGNO_P (lastr))
895 && new >= FIRST_PSEUDO_REGISTER)
896 lastr = reg_prev_eqv[lastr];
897 reg_next_eqv[new] = reg_next_eqv[lastr];
898 if (reg_next_eqv[lastr] >= 0)
899 reg_prev_eqv[reg_next_eqv[lastr]] = new;
900 else
901 qty_last_reg[q] = new;
902 reg_next_eqv[lastr] = new;
903 reg_prev_eqv[new] = lastr;
904 }
905 }
906
907 /* Remove REG from its equivalence class. */
908
909 static void
910 delete_reg_equiv (reg)
911 register int reg;
912 {
913 register int q = reg_qty[reg];
914 register int p, n;
915
916 /* If invalid, do nothing. */
917 if (q == reg)
918 return;
919
920 p = reg_prev_eqv[reg];
921 n = reg_next_eqv[reg];
922
923 if (n != -1)
924 reg_prev_eqv[n] = p;
925 else
926 qty_last_reg[q] = p;
927 if (p != -1)
928 reg_next_eqv[p] = n;
929 else
930 qty_first_reg[q] = n;
931
932 reg_qty[reg] = reg;
933 }
934
935 /* Remove any invalid expressions from the hash table
936 that refer to any of the registers contained in expression X.
937
938 Make sure that newly inserted references to those registers
939 as subexpressions will be considered valid.
940
941 mention_regs is not called when a register itself
942 is being stored in the table.
943
944 Return 1 if we have done something that may have changed the hash code
945 of X. */
946
947 static int
948 mention_regs (x)
949 rtx x;
950 {
951 register enum rtx_code code;
952 register int i, j;
953 register char *fmt;
954 register int changed = 0;
955
956 if (x == 0)
957 return 0;
958
959 code = GET_CODE (x);
960 if (code == REG)
961 {
962 register int regno = REGNO (x);
963 register int endregno
964 = regno + (regno >= FIRST_PSEUDO_REGISTER ? 1
965 : HARD_REGNO_NREGS (regno, GET_MODE (x)));
966 int i;
967
968 for (i = regno; i < endregno; i++)
969 {
970 if (reg_in_table[i] >= 0 && reg_in_table[i] != reg_tick[i])
971 remove_invalid_refs (i);
972
973 reg_in_table[i] = reg_tick[i];
974 }
975
976 return 0;
977 }
978
979 /* If X is a comparison or a COMPARE and either operand is a register
980 that does not have a quantity, give it one. This is so that a later
981 call to record_jump_equiv won't cause X to be assigned a different
982 hash code and not found in the table after that call.
983
984 It is not necessary to do this here, since rehash_using_reg can
985 fix up the table later, but doing this here eliminates the need to
986 call that expensive function in the most common case where the only
987 use of the register is in the comparison. */
988
989 if (code == COMPARE || GET_RTX_CLASS (code) == '<')
990 {
991 if (GET_CODE (XEXP (x, 0)) == REG
992 && ! REGNO_QTY_VALID_P (REGNO (XEXP (x, 0))))
993 if (insert_regs (XEXP (x, 0), NULL_PTR, 0))
994 {
995 rehash_using_reg (XEXP (x, 0));
996 changed = 1;
997 }
998
999 if (GET_CODE (XEXP (x, 1)) == REG
1000 && ! REGNO_QTY_VALID_P (REGNO (XEXP (x, 1))))
1001 if (insert_regs (XEXP (x, 1), NULL_PTR, 0))
1002 {
1003 rehash_using_reg (XEXP (x, 1));
1004 changed = 1;
1005 }
1006 }
1007
1008 fmt = GET_RTX_FORMAT (code);
1009 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1010 if (fmt[i] == 'e')
1011 changed |= mention_regs (XEXP (x, i));
1012 else if (fmt[i] == 'E')
1013 for (j = 0; j < XVECLEN (x, i); j++)
1014 changed |= mention_regs (XVECEXP (x, i, j));
1015
1016 return changed;
1017 }
1018
1019 /* Update the register quantities for inserting X into the hash table
1020 with a value equivalent to CLASSP.
1021 (If the class does not contain a REG, it is irrelevant.)
1022 If MODIFIED is nonzero, X is a destination; it is being modified.
1023 Note that delete_reg_equiv should be called on a register
1024 before insert_regs is done on that register with MODIFIED != 0.
1025
1026 Nonzero value means that elements of reg_qty have changed
1027 so X's hash code may be different. */
1028
1029 static int
1030 insert_regs (x, classp, modified)
1031 rtx x;
1032 struct table_elt *classp;
1033 int modified;
1034 {
1035 if (GET_CODE (x) == REG)
1036 {
1037 register int regno = REGNO (x);
1038
1039 /* If REGNO is in the equivalence table already but is of the
1040 wrong mode for that equivalence, don't do anything here. */
1041
1042 if (REGNO_QTY_VALID_P (regno)
1043 && qty_mode[reg_qty[regno]] != GET_MODE (x))
1044 return 0;
1045
1046 if (modified || ! REGNO_QTY_VALID_P (regno))
1047 {
1048 if (classp)
1049 for (classp = classp->first_same_value;
1050 classp != 0;
1051 classp = classp->next_same_value)
1052 if (GET_CODE (classp->exp) == REG
1053 && GET_MODE (classp->exp) == GET_MODE (x))
1054 {
1055 make_regs_eqv (regno, REGNO (classp->exp));
1056 return 1;
1057 }
1058
1059 make_new_qty (regno);
1060 qty_mode[reg_qty[regno]] = GET_MODE (x);
1061 return 1;
1062 }
1063
1064 return 0;
1065 }
1066
1067 /* If X is a SUBREG, we will likely be inserting the inner register in the
1068 table. If that register doesn't have an assigned quantity number at
1069 this point but does later, the insertion that we will be doing now will
1070 not be accessible because its hash code will have changed. So assign
1071 a quantity number now. */
1072
1073 else if (GET_CODE (x) == SUBREG && GET_CODE (SUBREG_REG (x)) == REG
1074 && ! REGNO_QTY_VALID_P (REGNO (SUBREG_REG (x))))
1075 {
1076 insert_regs (SUBREG_REG (x), NULL_PTR, 0);
1077 mention_regs (SUBREG_REG (x));
1078 return 1;
1079 }
1080 else
1081 return mention_regs (x);
1082 }
1083 \f
1084 /* Look in or update the hash table. */
1085
1086 /* Put the element ELT on the list of free elements. */
1087
1088 static void
1089 free_element (elt)
1090 struct table_elt *elt;
1091 {
1092 elt->next_same_hash = free_element_chain;
1093 free_element_chain = elt;
1094 }
1095
1096 /* Return an element that is free for use. */
1097
1098 static struct table_elt *
1099 get_element ()
1100 {
1101 struct table_elt *elt = free_element_chain;
1102 if (elt)
1103 {
1104 free_element_chain = elt->next_same_hash;
1105 return elt;
1106 }
1107 n_elements_made++;
1108 return (struct table_elt *) oballoc (sizeof (struct table_elt));
1109 }
1110
1111 /* Remove table element ELT from use in the table.
1112 HASH is its hash code, made using the HASH macro.
1113 It's an argument because often that is known in advance
1114 and we save much time not recomputing it. */
1115
1116 static void
1117 remove_from_table (elt, hash)
1118 register struct table_elt *elt;
1119 unsigned hash;
1120 {
1121 if (elt == 0)
1122 return;
1123
1124 /* Mark this element as removed. See cse_insn. */
1125 elt->first_same_value = 0;
1126
1127 /* Remove the table element from its equivalence class. */
1128
1129 {
1130 register struct table_elt *prev = elt->prev_same_value;
1131 register struct table_elt *next = elt->next_same_value;
1132
1133 if (next) next->prev_same_value = prev;
1134
1135 if (prev)
1136 prev->next_same_value = next;
1137 else
1138 {
1139 register struct table_elt *newfirst = next;
1140 while (next)
1141 {
1142 next->first_same_value = newfirst;
1143 next = next->next_same_value;
1144 }
1145 }
1146 }
1147
1148 /* Remove the table element from its hash bucket. */
1149
1150 {
1151 register struct table_elt *prev = elt->prev_same_hash;
1152 register struct table_elt *next = elt->next_same_hash;
1153
1154 if (next) next->prev_same_hash = prev;
1155
1156 if (prev)
1157 prev->next_same_hash = next;
1158 else if (table[hash] == elt)
1159 table[hash] = next;
1160 else
1161 {
1162 /* This entry is not in the proper hash bucket. This can happen
1163 when two classes were merged by `merge_equiv_classes'. Search
1164 for the hash bucket that it heads. This happens only very
1165 rarely, so the cost is acceptable. */
1166 for (hash = 0; hash < NBUCKETS; hash++)
1167 if (table[hash] == elt)
1168 table[hash] = next;
1169 }
1170 }
1171
1172 /* Remove the table element from its related-value circular chain. */
1173
1174 if (elt->related_value != 0 && elt->related_value != elt)
1175 {
1176 register struct table_elt *p = elt->related_value;
1177 while (p->related_value != elt)
1178 p = p->related_value;
1179 p->related_value = elt->related_value;
1180 if (p->related_value == p)
1181 p->related_value = 0;
1182 }
1183
1184 free_element (elt);
1185 }
1186
1187 /* Look up X in the hash table and return its table element,
1188 or 0 if X is not in the table.
1189
1190 MODE is the machine-mode of X, or if X is an integer constant
1191 with VOIDmode then MODE is the mode with which X will be used.
1192
1193 Here we are satisfied to find an expression whose tree structure
1194 looks like X. */
1195
1196 static struct table_elt *
1197 lookup (x, hash, mode)
1198 rtx x;
1199 unsigned hash;
1200 enum machine_mode mode;
1201 {
1202 register struct table_elt *p;
1203
1204 for (p = table[hash]; p; p = p->next_same_hash)
1205 if (mode == p->mode && ((x == p->exp && GET_CODE (x) == REG)
1206 || exp_equiv_p (x, p->exp, GET_CODE (x) != REG, 0)))
1207 return p;
1208
1209 return 0;
1210 }
1211
1212 /* Like `lookup' but don't care whether the table element uses invalid regs.
1213 Also ignore discrepancies in the machine mode of a register. */
1214
1215 static struct table_elt *
1216 lookup_for_remove (x, hash, mode)
1217 rtx x;
1218 unsigned hash;
1219 enum machine_mode mode;
1220 {
1221 register struct table_elt *p;
1222
1223 if (GET_CODE (x) == REG)
1224 {
1225 int regno = REGNO (x);
1226 /* Don't check the machine mode when comparing registers;
1227 invalidating (REG:SI 0) also invalidates (REG:DF 0). */
1228 for (p = table[hash]; p; p = p->next_same_hash)
1229 if (GET_CODE (p->exp) == REG
1230 && REGNO (p->exp) == regno)
1231 return p;
1232 }
1233 else
1234 {
1235 for (p = table[hash]; p; p = p->next_same_hash)
1236 if (mode == p->mode && (x == p->exp || exp_equiv_p (x, p->exp, 0, 0)))
1237 return p;
1238 }
1239
1240 return 0;
1241 }
1242
1243 /* Look for an expression equivalent to X and with code CODE.
1244 If one is found, return that expression. */
1245
1246 static rtx
1247 lookup_as_function (x, code)
1248 rtx x;
1249 enum rtx_code code;
1250 {
1251 register struct table_elt *p = lookup (x, safe_hash (x, VOIDmode) % NBUCKETS,
1252 GET_MODE (x));
1253 if (p == 0)
1254 return 0;
1255
1256 for (p = p->first_same_value; p; p = p->next_same_value)
1257 {
1258 if (GET_CODE (p->exp) == code
1259 /* Make sure this is a valid entry in the table. */
1260 && exp_equiv_p (p->exp, p->exp, 1, 0))
1261 return p->exp;
1262 }
1263
1264 return 0;
1265 }
1266
1267 /* Insert X in the hash table, assuming HASH is its hash code
1268 and CLASSP is an element of the class it should go in
1269 (or 0 if a new class should be made).
1270 It is inserted at the proper position to keep the class in
1271 the order cheapest first.
1272
1273 MODE is the machine-mode of X, or if X is an integer constant
1274 with VOIDmode then MODE is the mode with which X will be used.
1275
1276 For elements of equal cheapness, the most recent one
1277 goes in front, except that the first element in the list
1278 remains first unless a cheaper element is added. The order of
1279 pseudo-registers does not matter, as canon_reg will be called to
1280 find the cheapest when a register is retrieved from the table.
1281
1282 The in_memory field in the hash table element is set to 0.
1283 The caller must set it nonzero if appropriate.
1284
1285 You should call insert_regs (X, CLASSP, MODIFY) before calling here,
1286 and if insert_regs returns a nonzero value
1287 you must then recompute its hash code before calling here.
1288
1289 If necessary, update table showing constant values of quantities. */
1290
1291 #define CHEAPER(X,Y) ((X)->cost < (Y)->cost)
1292
1293 static struct table_elt *
1294 insert (x, classp, hash, mode)
1295 register rtx x;
1296 register struct table_elt *classp;
1297 unsigned hash;
1298 enum machine_mode mode;
1299 {
1300 register struct table_elt *elt;
1301
1302 /* If X is a register and we haven't made a quantity for it,
1303 something is wrong. */
1304 if (GET_CODE (x) == REG && ! REGNO_QTY_VALID_P (REGNO (x)))
1305 abort ();
1306
1307 /* If X is a hard register, show it is being put in the table. */
1308 if (GET_CODE (x) == REG && REGNO (x) < FIRST_PSEUDO_REGISTER)
1309 {
1310 int regno = REGNO (x);
1311 int endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (x));
1312 int i;
1313
1314 for (i = regno; i < endregno; i++)
1315 SET_HARD_REG_BIT (hard_regs_in_table, i);
1316 }
1317
1318 /* If X is a label, show we recorded it. */
1319 if (GET_CODE (x) == LABEL_REF
1320 || (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
1321 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF))
1322 recorded_label_ref = 1;
1323
1324 /* Put an element for X into the right hash bucket. */
1325
1326 elt = get_element ();
1327 elt->exp = x;
1328 elt->cost = COST (x);
1329 elt->next_same_value = 0;
1330 elt->prev_same_value = 0;
1331 elt->next_same_hash = table[hash];
1332 elt->prev_same_hash = 0;
1333 elt->related_value = 0;
1334 elt->in_memory = 0;
1335 elt->mode = mode;
1336 elt->is_const = (CONSTANT_P (x)
1337 /* GNU C++ takes advantage of this for `this'
1338 (and other const values). */
1339 || (RTX_UNCHANGING_P (x)
1340 && GET_CODE (x) == REG
1341 && REGNO (x) >= FIRST_PSEUDO_REGISTER)
1342 || FIXED_BASE_PLUS_P (x));
1343
1344 if (table[hash])
1345 table[hash]->prev_same_hash = elt;
1346 table[hash] = elt;
1347
1348 /* Put it into the proper value-class. */
1349 if (classp)
1350 {
1351 classp = classp->first_same_value;
1352 if (CHEAPER (elt, classp))
1353 /* Insert at the head of the class */
1354 {
1355 register struct table_elt *p;
1356 elt->next_same_value = classp;
1357 classp->prev_same_value = elt;
1358 elt->first_same_value = elt;
1359
1360 for (p = classp; p; p = p->next_same_value)
1361 p->first_same_value = elt;
1362 }
1363 else
1364 {
1365 /* Insert not at head of the class. */
1366 /* Put it after the last element cheaper than X. */
1367 register struct table_elt *p, *next;
1368 for (p = classp; (next = p->next_same_value) && CHEAPER (next, elt);
1369 p = next);
1370 /* Put it after P and before NEXT. */
1371 elt->next_same_value = next;
1372 if (next)
1373 next->prev_same_value = elt;
1374 elt->prev_same_value = p;
1375 p->next_same_value = elt;
1376 elt->first_same_value = classp;
1377 }
1378 }
1379 else
1380 elt->first_same_value = elt;
1381
1382 /* If this is a constant being set equivalent to a register or a register
1383 being set equivalent to a constant, note the constant equivalence.
1384
1385 If this is a constant, it cannot be equivalent to a different constant,
1386 and a constant is the only thing that can be cheaper than a register. So
1387 we know the register is the head of the class (before the constant was
1388 inserted).
1389
1390 If this is a register that is not already known equivalent to a
1391 constant, we must check the entire class.
1392
1393 If this is a register that is already known equivalent to an insn,
1394 update `qty_const_insn' to show that `this_insn' is the latest
1395 insn making that quantity equivalent to the constant. */
1396
1397 if (elt->is_const && classp && GET_CODE (classp->exp) == REG
1398 && GET_CODE (x) != REG)
1399 {
1400 qty_const[reg_qty[REGNO (classp->exp)]]
1401 = gen_lowpart_if_possible (qty_mode[reg_qty[REGNO (classp->exp)]], x);
1402 qty_const_insn[reg_qty[REGNO (classp->exp)]] = this_insn;
1403 }
1404
1405 else if (GET_CODE (x) == REG && classp && ! qty_const[reg_qty[REGNO (x)]]
1406 && ! elt->is_const)
1407 {
1408 register struct table_elt *p;
1409
1410 for (p = classp; p != 0; p = p->next_same_value)
1411 {
1412 if (p->is_const && GET_CODE (p->exp) != REG)
1413 {
1414 qty_const[reg_qty[REGNO (x)]]
1415 = gen_lowpart_if_possible (GET_MODE (x), p->exp);
1416 qty_const_insn[reg_qty[REGNO (x)]] = this_insn;
1417 break;
1418 }
1419 }
1420 }
1421
1422 else if (GET_CODE (x) == REG && qty_const[reg_qty[REGNO (x)]]
1423 && GET_MODE (x) == qty_mode[reg_qty[REGNO (x)]])
1424 qty_const_insn[reg_qty[REGNO (x)]] = this_insn;
1425
1426 /* If this is a constant with symbolic value,
1427 and it has a term with an explicit integer value,
1428 link it up with related expressions. */
1429 if (GET_CODE (x) == CONST)
1430 {
1431 rtx subexp = get_related_value (x);
1432 unsigned subhash;
1433 struct table_elt *subelt, *subelt_prev;
1434
1435 if (subexp != 0)
1436 {
1437 /* Get the integer-free subexpression in the hash table. */
1438 subhash = safe_hash (subexp, mode) % NBUCKETS;
1439 subelt = lookup (subexp, subhash, mode);
1440 if (subelt == 0)
1441 subelt = insert (subexp, NULL_PTR, subhash, mode);
1442 /* Initialize SUBELT's circular chain if it has none. */
1443 if (subelt->related_value == 0)
1444 subelt->related_value = subelt;
1445 /* Find the element in the circular chain that precedes SUBELT. */
1446 subelt_prev = subelt;
1447 while (subelt_prev->related_value != subelt)
1448 subelt_prev = subelt_prev->related_value;
1449 /* Put new ELT into SUBELT's circular chain just before SUBELT.
1450 This way the element that follows SUBELT is the oldest one. */
1451 elt->related_value = subelt_prev->related_value;
1452 subelt_prev->related_value = elt;
1453 }
1454 }
1455
1456 return elt;
1457 }
1458 \f
1459 /* Given two equivalence classes, CLASS1 and CLASS2, put all the entries from
1460 CLASS2 into CLASS1. This is done when we have reached an insn which makes
1461 the two classes equivalent.
1462
1463 CLASS1 will be the surviving class; CLASS2 should not be used after this
1464 call.
1465
1466 Any invalid entries in CLASS2 will not be copied. */
1467
1468 static void
1469 merge_equiv_classes (class1, class2)
1470 struct table_elt *class1, *class2;
1471 {
1472 struct table_elt *elt, *next, *new;
1473
1474 /* Ensure we start with the head of the classes. */
1475 class1 = class1->first_same_value;
1476 class2 = class2->first_same_value;
1477
1478 /* If they were already equal, forget it. */
1479 if (class1 == class2)
1480 return;
1481
1482 for (elt = class2; elt; elt = next)
1483 {
1484 unsigned hash;
1485 rtx exp = elt->exp;
1486 enum machine_mode mode = elt->mode;
1487
1488 next = elt->next_same_value;
1489
1490 /* Remove old entry, make a new one in CLASS1's class.
1491 Don't do this for invalid entries as we cannot find their
1492 hash code (it also isn't necessary). */
1493 if (GET_CODE (exp) == REG || exp_equiv_p (exp, exp, 1, 0))
1494 {
1495 hash_arg_in_memory = 0;
1496 hash_arg_in_struct = 0;
1497 hash = HASH (exp, mode);
1498
1499 if (GET_CODE (exp) == REG)
1500 delete_reg_equiv (REGNO (exp));
1501
1502 remove_from_table (elt, hash);
1503
1504 if (insert_regs (exp, class1, 0))
1505 {
1506 rehash_using_reg (exp);
1507 hash = HASH (exp, mode);
1508 }
1509 new = insert (exp, class1, hash, mode);
1510 new->in_memory = hash_arg_in_memory;
1511 new->in_struct = hash_arg_in_struct;
1512 }
1513 }
1514 }
1515 \f
1516 /* Remove from the hash table, or mark as invalid,
1517 all expressions whose values could be altered by storing in X.
1518 X is a register, a subreg, or a memory reference with nonvarying address
1519 (because, when a memory reference with a varying address is stored in,
1520 all memory references are removed by invalidate_memory
1521 so specific invalidation is superfluous).
1522 FULL_MODE, if not VOIDmode, indicates that this much should be invalidated
1523 instead of just the amount indicated by the mode of X. This is only used
1524 for bitfield stores into memory.
1525
1526 A nonvarying address may be just a register or just
1527 a symbol reference, or it may be either of those plus
1528 a numeric offset. */
1529
1530 static void
1531 invalidate (x, full_mode)
1532 rtx x;
1533 enum machine_mode full_mode;
1534 {
1535 register int i;
1536 register struct table_elt *p;
1537 rtx base;
1538 HOST_WIDE_INT start, end;
1539
1540 /* If X is a register, dependencies on its contents
1541 are recorded through the qty number mechanism.
1542 Just change the qty number of the register,
1543 mark it as invalid for expressions that refer to it,
1544 and remove it itself. */
1545
1546 if (GET_CODE (x) == REG)
1547 {
1548 register int regno = REGNO (x);
1549 register unsigned hash = HASH (x, GET_MODE (x));
1550
1551 /* Remove REGNO from any quantity list it might be on and indicate
1552 that it's value might have changed. If it is a pseudo, remove its
1553 entry from the hash table.
1554
1555 For a hard register, we do the first two actions above for any
1556 additional hard registers corresponding to X. Then, if any of these
1557 registers are in the table, we must remove any REG entries that
1558 overlap these registers. */
1559
1560 delete_reg_equiv (regno);
1561 reg_tick[regno]++;
1562
1563 if (regno >= FIRST_PSEUDO_REGISTER)
1564 {
1565 /* Because a register can be referenced in more than one mode,
1566 we might have to remove more than one table entry. */
1567
1568 struct table_elt *elt;
1569
1570 while (elt = lookup_for_remove (x, hash, GET_MODE (x)))
1571 remove_from_table (elt, hash);
1572 }
1573 else
1574 {
1575 HOST_WIDE_INT in_table
1576 = TEST_HARD_REG_BIT (hard_regs_in_table, regno);
1577 int endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (x));
1578 int tregno, tendregno;
1579 register struct table_elt *p, *next;
1580
1581 CLEAR_HARD_REG_BIT (hard_regs_in_table, regno);
1582
1583 for (i = regno + 1; i < endregno; i++)
1584 {
1585 in_table |= TEST_HARD_REG_BIT (hard_regs_in_table, i);
1586 CLEAR_HARD_REG_BIT (hard_regs_in_table, i);
1587 delete_reg_equiv (i);
1588 reg_tick[i]++;
1589 }
1590
1591 if (in_table)
1592 for (hash = 0; hash < NBUCKETS; hash++)
1593 for (p = table[hash]; p; p = next)
1594 {
1595 next = p->next_same_hash;
1596
1597 if (GET_CODE (p->exp) != REG
1598 || REGNO (p->exp) >= FIRST_PSEUDO_REGISTER)
1599 continue;
1600
1601 tregno = REGNO (p->exp);
1602 tendregno
1603 = tregno + HARD_REGNO_NREGS (tregno, GET_MODE (p->exp));
1604 if (tendregno > regno && tregno < endregno)
1605 remove_from_table (p, hash);
1606 }
1607 }
1608
1609 return;
1610 }
1611
1612 if (GET_CODE (x) == SUBREG)
1613 {
1614 if (GET_CODE (SUBREG_REG (x)) != REG)
1615 abort ();
1616 invalidate (SUBREG_REG (x), VOIDmode);
1617 return;
1618 }
1619
1620 /* X is not a register; it must be a memory reference with
1621 a nonvarying address. Remove all hash table elements
1622 that refer to overlapping pieces of memory. */
1623
1624 if (GET_CODE (x) != MEM)
1625 abort ();
1626
1627 if (full_mode == VOIDmode)
1628 full_mode = GET_MODE (x);
1629
1630 set_nonvarying_address_components (XEXP (x, 0), GET_MODE_SIZE (full_mode),
1631 &base, &start, &end);
1632
1633 for (i = 0; i < NBUCKETS; i++)
1634 {
1635 register struct table_elt *next;
1636 for (p = table[i]; p; p = next)
1637 {
1638 next = p->next_same_hash;
1639 if (refers_to_mem_p (p->exp, base, start, end))
1640 remove_from_table (p, i);
1641 }
1642 }
1643 }
1644
1645 /* Remove all expressions that refer to register REGNO,
1646 since they are already invalid, and we are about to
1647 mark that register valid again and don't want the old
1648 expressions to reappear as valid. */
1649
1650 static void
1651 remove_invalid_refs (regno)
1652 int regno;
1653 {
1654 register int i;
1655 register struct table_elt *p, *next;
1656
1657 for (i = 0; i < NBUCKETS; i++)
1658 for (p = table[i]; p; p = next)
1659 {
1660 next = p->next_same_hash;
1661 if (GET_CODE (p->exp) != REG
1662 && refers_to_regno_p (regno, regno + 1, p->exp, NULL_PTR))
1663 remove_from_table (p, i);
1664 }
1665 }
1666 \f
1667 /* Recompute the hash codes of any valid entries in the hash table that
1668 reference X, if X is a register, or SUBREG_REG (X) if X is a SUBREG.
1669
1670 This is called when we make a jump equivalence. */
1671
1672 static void
1673 rehash_using_reg (x)
1674 rtx x;
1675 {
1676 int i;
1677 struct table_elt *p, *next;
1678 unsigned hash;
1679
1680 if (GET_CODE (x) == SUBREG)
1681 x = SUBREG_REG (x);
1682
1683 /* If X is not a register or if the register is known not to be in any
1684 valid entries in the table, we have no work to do. */
1685
1686 if (GET_CODE (x) != REG
1687 || reg_in_table[REGNO (x)] < 0
1688 || reg_in_table[REGNO (x)] != reg_tick[REGNO (x)])
1689 return;
1690
1691 /* Scan all hash chains looking for valid entries that mention X.
1692 If we find one and it is in the wrong hash chain, move it. We can skip
1693 objects that are registers, since they are handled specially. */
1694
1695 for (i = 0; i < NBUCKETS; i++)
1696 for (p = table[i]; p; p = next)
1697 {
1698 next = p->next_same_hash;
1699 if (GET_CODE (p->exp) != REG && reg_mentioned_p (x, p->exp)
1700 && exp_equiv_p (p->exp, p->exp, 1, 0)
1701 && i != (hash = safe_hash (p->exp, p->mode) % NBUCKETS))
1702 {
1703 if (p->next_same_hash)
1704 p->next_same_hash->prev_same_hash = p->prev_same_hash;
1705
1706 if (p->prev_same_hash)
1707 p->prev_same_hash->next_same_hash = p->next_same_hash;
1708 else
1709 table[i] = p->next_same_hash;
1710
1711 p->next_same_hash = table[hash];
1712 p->prev_same_hash = 0;
1713 if (table[hash])
1714 table[hash]->prev_same_hash = p;
1715 table[hash] = p;
1716 }
1717 }
1718 }
1719 \f
1720 /* Remove from the hash table all expressions that reference memory,
1721 or some of them as specified by *WRITES. */
1722
1723 static void
1724 invalidate_memory (writes)
1725 struct write_data *writes;
1726 {
1727 register int i;
1728 register struct table_elt *p, *next;
1729 int all = writes->all;
1730 int nonscalar = writes->nonscalar;
1731
1732 for (i = 0; i < NBUCKETS; i++)
1733 for (p = table[i]; p; p = next)
1734 {
1735 next = p->next_same_hash;
1736 if (p->in_memory
1737 && (all
1738 || (nonscalar && p->in_struct)
1739 || cse_rtx_addr_varies_p (p->exp)))
1740 remove_from_table (p, i);
1741 }
1742 }
1743 \f
1744 /* Remove from the hash table any expression that is a call-clobbered
1745 register. Also update their TICK values. */
1746
1747 static void
1748 invalidate_for_call ()
1749 {
1750 int regno, endregno;
1751 int i;
1752 unsigned hash;
1753 struct table_elt *p, *next;
1754 int in_table = 0;
1755
1756 /* Go through all the hard registers. For each that is clobbered in
1757 a CALL_INSN, remove the register from quantity chains and update
1758 reg_tick if defined. Also see if any of these registers is currently
1759 in the table. */
1760
1761 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
1762 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
1763 {
1764 delete_reg_equiv (regno);
1765 if (reg_tick[regno] >= 0)
1766 reg_tick[regno]++;
1767
1768 in_table |= (TEST_HARD_REG_BIT (hard_regs_in_table, regno) != 0);
1769 }
1770
1771 /* In the case where we have no call-clobbered hard registers in the
1772 table, we are done. Otherwise, scan the table and remove any
1773 entry that overlaps a call-clobbered register. */
1774
1775 if (in_table)
1776 for (hash = 0; hash < NBUCKETS; hash++)
1777 for (p = table[hash]; p; p = next)
1778 {
1779 next = p->next_same_hash;
1780
1781 if (GET_CODE (p->exp) != REG
1782 || REGNO (p->exp) >= FIRST_PSEUDO_REGISTER)
1783 continue;
1784
1785 regno = REGNO (p->exp);
1786 endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (p->exp));
1787
1788 for (i = regno; i < endregno; i++)
1789 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
1790 {
1791 remove_from_table (p, hash);
1792 break;
1793 }
1794 }
1795 }
1796 \f
1797 /* Given an expression X of type CONST,
1798 and ELT which is its table entry (or 0 if it
1799 is not in the hash table),
1800 return an alternate expression for X as a register plus integer.
1801 If none can be found, return 0. */
1802
1803 static rtx
1804 use_related_value (x, elt)
1805 rtx x;
1806 struct table_elt *elt;
1807 {
1808 register struct table_elt *relt = 0;
1809 register struct table_elt *p, *q;
1810 HOST_WIDE_INT offset;
1811
1812 /* First, is there anything related known?
1813 If we have a table element, we can tell from that.
1814 Otherwise, must look it up. */
1815
1816 if (elt != 0 && elt->related_value != 0)
1817 relt = elt;
1818 else if (elt == 0 && GET_CODE (x) == CONST)
1819 {
1820 rtx subexp = get_related_value (x);
1821 if (subexp != 0)
1822 relt = lookup (subexp,
1823 safe_hash (subexp, GET_MODE (subexp)) % NBUCKETS,
1824 GET_MODE (subexp));
1825 }
1826
1827 if (relt == 0)
1828 return 0;
1829
1830 /* Search all related table entries for one that has an
1831 equivalent register. */
1832
1833 p = relt;
1834 while (1)
1835 {
1836 /* This loop is strange in that it is executed in two different cases.
1837 The first is when X is already in the table. Then it is searching
1838 the RELATED_VALUE list of X's class (RELT). The second case is when
1839 X is not in the table. Then RELT points to a class for the related
1840 value.
1841
1842 Ensure that, whatever case we are in, that we ignore classes that have
1843 the same value as X. */
1844
1845 if (rtx_equal_p (x, p->exp))
1846 q = 0;
1847 else
1848 for (q = p->first_same_value; q; q = q->next_same_value)
1849 if (GET_CODE (q->exp) == REG)
1850 break;
1851
1852 if (q)
1853 break;
1854
1855 p = p->related_value;
1856
1857 /* We went all the way around, so there is nothing to be found.
1858 Alternatively, perhaps RELT was in the table for some other reason
1859 and it has no related values recorded. */
1860 if (p == relt || p == 0)
1861 break;
1862 }
1863
1864 if (q == 0)
1865 return 0;
1866
1867 offset = (get_integer_term (x) - get_integer_term (p->exp));
1868 /* Note: OFFSET may be 0 if P->xexp and X are related by commutativity. */
1869 return plus_constant (q->exp, offset);
1870 }
1871 \f
1872 /* Hash an rtx. We are careful to make sure the value is never negative.
1873 Equivalent registers hash identically.
1874 MODE is used in hashing for CONST_INTs only;
1875 otherwise the mode of X is used.
1876
1877 Store 1 in do_not_record if any subexpression is volatile.
1878
1879 Store 1 in hash_arg_in_memory if X contains a MEM rtx
1880 which does not have the RTX_UNCHANGING_P bit set.
1881 In this case, also store 1 in hash_arg_in_struct
1882 if there is a MEM rtx which has the MEM_IN_STRUCT_P bit set.
1883
1884 Note that cse_insn knows that the hash code of a MEM expression
1885 is just (int) MEM plus the hash code of the address. */
1886
1887 static unsigned
1888 canon_hash (x, mode)
1889 rtx x;
1890 enum machine_mode mode;
1891 {
1892 register int i, j;
1893 register unsigned hash = 0;
1894 register enum rtx_code code;
1895 register char *fmt;
1896
1897 /* repeat is used to turn tail-recursion into iteration. */
1898 repeat:
1899 if (x == 0)
1900 return hash;
1901
1902 code = GET_CODE (x);
1903 switch (code)
1904 {
1905 case REG:
1906 {
1907 register int regno = REGNO (x);
1908
1909 /* On some machines, we can't record any non-fixed hard register,
1910 because extending its life will cause reload problems. We
1911 consider ap, fp, and sp to be fixed for this purpose.
1912 On all machines, we can't record any global registers. */
1913
1914 if (regno < FIRST_PSEUDO_REGISTER
1915 && (global_regs[regno]
1916 #ifdef SMALL_REGISTER_CLASSES
1917 || (SMALL_REGISTER_CLASSES
1918 && ! fixed_regs[regno]
1919 && regno != FRAME_POINTER_REGNUM
1920 && regno != HARD_FRAME_POINTER_REGNUM
1921 && regno != ARG_POINTER_REGNUM
1922 && regno != STACK_POINTER_REGNUM)
1923 #endif
1924 ))
1925 {
1926 do_not_record = 1;
1927 return 0;
1928 }
1929 hash += ((unsigned) REG << 7) + (unsigned) reg_qty[regno];
1930 return hash;
1931 }
1932
1933 case CONST_INT:
1934 {
1935 unsigned HOST_WIDE_INT tem = INTVAL (x);
1936 hash += ((unsigned) CONST_INT << 7) + (unsigned) mode + tem;
1937 return hash;
1938 }
1939
1940 case CONST_DOUBLE:
1941 /* This is like the general case, except that it only counts
1942 the integers representing the constant. */
1943 hash += (unsigned) code + (unsigned) GET_MODE (x);
1944 if (GET_MODE (x) != VOIDmode)
1945 for (i = 2; i < GET_RTX_LENGTH (CONST_DOUBLE); i++)
1946 {
1947 unsigned tem = XINT (x, i);
1948 hash += tem;
1949 }
1950 else
1951 hash += ((unsigned) CONST_DOUBLE_LOW (x)
1952 + (unsigned) CONST_DOUBLE_HIGH (x));
1953 return hash;
1954
1955 /* Assume there is only one rtx object for any given label. */
1956 case LABEL_REF:
1957 hash
1958 += ((unsigned) LABEL_REF << 7) + (unsigned HOST_WIDE_INT) XEXP (x, 0);
1959 return hash;
1960
1961 case SYMBOL_REF:
1962 hash
1963 += ((unsigned) SYMBOL_REF << 7) + (unsigned HOST_WIDE_INT) XSTR (x, 0);
1964 return hash;
1965
1966 case MEM:
1967 if (MEM_VOLATILE_P (x))
1968 {
1969 do_not_record = 1;
1970 return 0;
1971 }
1972 if (! RTX_UNCHANGING_P (x) || FIXED_BASE_PLUS_P (XEXP (x, 0)))
1973 {
1974 hash_arg_in_memory = 1;
1975 if (MEM_IN_STRUCT_P (x)) hash_arg_in_struct = 1;
1976 }
1977 /* Now that we have already found this special case,
1978 might as well speed it up as much as possible. */
1979 hash += (unsigned) MEM;
1980 x = XEXP (x, 0);
1981 goto repeat;
1982
1983 case PRE_DEC:
1984 case PRE_INC:
1985 case POST_DEC:
1986 case POST_INC:
1987 case PC:
1988 case CC0:
1989 case CALL:
1990 case UNSPEC_VOLATILE:
1991 do_not_record = 1;
1992 return 0;
1993
1994 case ASM_OPERANDS:
1995 if (MEM_VOLATILE_P (x))
1996 {
1997 do_not_record = 1;
1998 return 0;
1999 }
2000 }
2001
2002 i = GET_RTX_LENGTH (code) - 1;
2003 hash += (unsigned) code + (unsigned) GET_MODE (x);
2004 fmt = GET_RTX_FORMAT (code);
2005 for (; i >= 0; i--)
2006 {
2007 if (fmt[i] == 'e')
2008 {
2009 rtx tem = XEXP (x, i);
2010
2011 /* If we are about to do the last recursive call
2012 needed at this level, change it into iteration.
2013 This function is called enough to be worth it. */
2014 if (i == 0)
2015 {
2016 x = tem;
2017 goto repeat;
2018 }
2019 hash += canon_hash (tem, 0);
2020 }
2021 else if (fmt[i] == 'E')
2022 for (j = 0; j < XVECLEN (x, i); j++)
2023 hash += canon_hash (XVECEXP (x, i, j), 0);
2024 else if (fmt[i] == 's')
2025 {
2026 register unsigned char *p = (unsigned char *) XSTR (x, i);
2027 if (p)
2028 while (*p)
2029 hash += *p++;
2030 }
2031 else if (fmt[i] == 'i')
2032 {
2033 register unsigned tem = XINT (x, i);
2034 hash += tem;
2035 }
2036 else
2037 abort ();
2038 }
2039 return hash;
2040 }
2041
2042 /* Like canon_hash but with no side effects. */
2043
2044 static unsigned
2045 safe_hash (x, mode)
2046 rtx x;
2047 enum machine_mode mode;
2048 {
2049 int save_do_not_record = do_not_record;
2050 int save_hash_arg_in_memory = hash_arg_in_memory;
2051 int save_hash_arg_in_struct = hash_arg_in_struct;
2052 unsigned hash = canon_hash (x, mode);
2053 hash_arg_in_memory = save_hash_arg_in_memory;
2054 hash_arg_in_struct = save_hash_arg_in_struct;
2055 do_not_record = save_do_not_record;
2056 return hash;
2057 }
2058 \f
2059 /* Return 1 iff X and Y would canonicalize into the same thing,
2060 without actually constructing the canonicalization of either one.
2061 If VALIDATE is nonzero,
2062 we assume X is an expression being processed from the rtl
2063 and Y was found in the hash table. We check register refs
2064 in Y for being marked as valid.
2065
2066 If EQUAL_VALUES is nonzero, we allow a register to match a constant value
2067 that is known to be in the register. Ordinarily, we don't allow them
2068 to match, because letting them match would cause unpredictable results
2069 in all the places that search a hash table chain for an equivalent
2070 for a given value. A possible equivalent that has different structure
2071 has its hash code computed from different data. Whether the hash code
2072 is the same as that of the the given value is pure luck. */
2073
2074 static int
2075 exp_equiv_p (x, y, validate, equal_values)
2076 rtx x, y;
2077 int validate;
2078 int equal_values;
2079 {
2080 register int i, j;
2081 register enum rtx_code code;
2082 register char *fmt;
2083
2084 /* Note: it is incorrect to assume an expression is equivalent to itself
2085 if VALIDATE is nonzero. */
2086 if (x == y && !validate)
2087 return 1;
2088 if (x == 0 || y == 0)
2089 return x == y;
2090
2091 code = GET_CODE (x);
2092 if (code != GET_CODE (y))
2093 {
2094 if (!equal_values)
2095 return 0;
2096
2097 /* If X is a constant and Y is a register or vice versa, they may be
2098 equivalent. We only have to validate if Y is a register. */
2099 if (CONSTANT_P (x) && GET_CODE (y) == REG
2100 && REGNO_QTY_VALID_P (REGNO (y))
2101 && GET_MODE (y) == qty_mode[reg_qty[REGNO (y)]]
2102 && rtx_equal_p (x, qty_const[reg_qty[REGNO (y)]])
2103 && (! validate || reg_in_table[REGNO (y)] == reg_tick[REGNO (y)]))
2104 return 1;
2105
2106 if (CONSTANT_P (y) && code == REG
2107 && REGNO_QTY_VALID_P (REGNO (x))
2108 && GET_MODE (x) == qty_mode[reg_qty[REGNO (x)]]
2109 && rtx_equal_p (y, qty_const[reg_qty[REGNO (x)]]))
2110 return 1;
2111
2112 return 0;
2113 }
2114
2115 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent. */
2116 if (GET_MODE (x) != GET_MODE (y))
2117 return 0;
2118
2119 switch (code)
2120 {
2121 case PC:
2122 case CC0:
2123 return x == y;
2124
2125 case CONST_INT:
2126 return INTVAL (x) == INTVAL (y);
2127
2128 case LABEL_REF:
2129 return XEXP (x, 0) == XEXP (y, 0);
2130
2131 case SYMBOL_REF:
2132 return XSTR (x, 0) == XSTR (y, 0);
2133
2134 case REG:
2135 {
2136 int regno = REGNO (y);
2137 int endregno
2138 = regno + (regno >= FIRST_PSEUDO_REGISTER ? 1
2139 : HARD_REGNO_NREGS (regno, GET_MODE (y)));
2140 int i;
2141
2142 /* If the quantities are not the same, the expressions are not
2143 equivalent. If there are and we are not to validate, they
2144 are equivalent. Otherwise, ensure all regs are up-to-date. */
2145
2146 if (reg_qty[REGNO (x)] != reg_qty[regno])
2147 return 0;
2148
2149 if (! validate)
2150 return 1;
2151
2152 for (i = regno; i < endregno; i++)
2153 if (reg_in_table[i] != reg_tick[i])
2154 return 0;
2155
2156 return 1;
2157 }
2158
2159 /* For commutative operations, check both orders. */
2160 case PLUS:
2161 case MULT:
2162 case AND:
2163 case IOR:
2164 case XOR:
2165 case NE:
2166 case EQ:
2167 return ((exp_equiv_p (XEXP (x, 0), XEXP (y, 0), validate, equal_values)
2168 && exp_equiv_p (XEXP (x, 1), XEXP (y, 1),
2169 validate, equal_values))
2170 || (exp_equiv_p (XEXP (x, 0), XEXP (y, 1),
2171 validate, equal_values)
2172 && exp_equiv_p (XEXP (x, 1), XEXP (y, 0),
2173 validate, equal_values)));
2174 }
2175
2176 /* Compare the elements. If any pair of corresponding elements
2177 fail to match, return 0 for the whole things. */
2178
2179 fmt = GET_RTX_FORMAT (code);
2180 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2181 {
2182 switch (fmt[i])
2183 {
2184 case 'e':
2185 if (! exp_equiv_p (XEXP (x, i), XEXP (y, i), validate, equal_values))
2186 return 0;
2187 break;
2188
2189 case 'E':
2190 if (XVECLEN (x, i) != XVECLEN (y, i))
2191 return 0;
2192 for (j = 0; j < XVECLEN (x, i); j++)
2193 if (! exp_equiv_p (XVECEXP (x, i, j), XVECEXP (y, i, j),
2194 validate, equal_values))
2195 return 0;
2196 break;
2197
2198 case 's':
2199 if (strcmp (XSTR (x, i), XSTR (y, i)))
2200 return 0;
2201 break;
2202
2203 case 'i':
2204 if (XINT (x, i) != XINT (y, i))
2205 return 0;
2206 break;
2207
2208 case 'w':
2209 if (XWINT (x, i) != XWINT (y, i))
2210 return 0;
2211 break;
2212
2213 case '0':
2214 break;
2215
2216 default:
2217 abort ();
2218 }
2219 }
2220
2221 return 1;
2222 }
2223 \f
2224 /* Return 1 iff any subexpression of X matches Y.
2225 Here we do not require that X or Y be valid (for registers referred to)
2226 for being in the hash table. */
2227
2228 static int
2229 refers_to_p (x, y)
2230 rtx x, y;
2231 {
2232 register int i;
2233 register enum rtx_code code;
2234 register char *fmt;
2235
2236 repeat:
2237 if (x == y)
2238 return 1;
2239 if (x == 0 || y == 0)
2240 return 0;
2241
2242 code = GET_CODE (x);
2243 /* If X as a whole has the same code as Y, they may match.
2244 If so, return 1. */
2245 if (code == GET_CODE (y))
2246 {
2247 if (exp_equiv_p (x, y, 0, 1))
2248 return 1;
2249 }
2250
2251 /* X does not match, so try its subexpressions. */
2252
2253 fmt = GET_RTX_FORMAT (code);
2254 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2255 if (fmt[i] == 'e')
2256 {
2257 if (i == 0)
2258 {
2259 x = XEXP (x, 0);
2260 goto repeat;
2261 }
2262 else
2263 if (refers_to_p (XEXP (x, i), y))
2264 return 1;
2265 }
2266 else if (fmt[i] == 'E')
2267 {
2268 int j;
2269 for (j = 0; j < XVECLEN (x, i); j++)
2270 if (refers_to_p (XVECEXP (x, i, j), y))
2271 return 1;
2272 }
2273
2274 return 0;
2275 }
2276 \f
2277 /* Given ADDR and SIZE (a memory address, and the size of the memory reference),
2278 set PBASE, PSTART, and PEND which correspond to the base of the address,
2279 the starting offset, and ending offset respectively.
2280
2281 ADDR is known to be a nonvarying address. */
2282
2283 /* ??? Despite what the comments say, this function is in fact frequently
2284 passed varying addresses. This does not appear to cause any problems. */
2285
2286 static void
2287 set_nonvarying_address_components (addr, size, pbase, pstart, pend)
2288 rtx addr;
2289 int size;
2290 rtx *pbase;
2291 HOST_WIDE_INT *pstart, *pend;
2292 {
2293 rtx base;
2294 HOST_WIDE_INT start, end;
2295
2296 base = addr;
2297 start = 0;
2298 end = 0;
2299
2300 /* Registers with nonvarying addresses usually have constant equivalents;
2301 but the frame pointer register is also possible. */
2302 if (GET_CODE (base) == REG
2303 && qty_const != 0
2304 && REGNO_QTY_VALID_P (REGNO (base))
2305 && qty_mode[reg_qty[REGNO (base)]] == GET_MODE (base)
2306 && qty_const[reg_qty[REGNO (base)]] != 0)
2307 base = qty_const[reg_qty[REGNO (base)]];
2308 else if (GET_CODE (base) == PLUS
2309 && GET_CODE (XEXP (base, 1)) == CONST_INT
2310 && GET_CODE (XEXP (base, 0)) == REG
2311 && qty_const != 0
2312 && REGNO_QTY_VALID_P (REGNO (XEXP (base, 0)))
2313 && (qty_mode[reg_qty[REGNO (XEXP (base, 0))]]
2314 == GET_MODE (XEXP (base, 0)))
2315 && qty_const[reg_qty[REGNO (XEXP (base, 0))]])
2316 {
2317 start = INTVAL (XEXP (base, 1));
2318 base = qty_const[reg_qty[REGNO (XEXP (base, 0))]];
2319 }
2320 /* This can happen as the result of virtual register instantiation,
2321 if the initial offset is too large to be a valid address. */
2322 else if (GET_CODE (base) == PLUS
2323 && GET_CODE (XEXP (base, 0)) == REG
2324 && GET_CODE (XEXP (base, 1)) == REG
2325 && qty_const != 0
2326 && REGNO_QTY_VALID_P (REGNO (XEXP (base, 0)))
2327 && (qty_mode[reg_qty[REGNO (XEXP (base, 0))]]
2328 == GET_MODE (XEXP (base, 0)))
2329 && qty_const[reg_qty[REGNO (XEXP (base, 0))]]
2330 && REGNO_QTY_VALID_P (REGNO (XEXP (base, 1)))
2331 && (qty_mode[reg_qty[REGNO (XEXP (base, 1))]]
2332 == GET_MODE (XEXP (base, 1)))
2333 && qty_const[reg_qty[REGNO (XEXP (base, 1))]])
2334 {
2335 rtx tem = qty_const[reg_qty[REGNO (XEXP (base, 1))]];
2336 base = qty_const[reg_qty[REGNO (XEXP (base, 0))]];
2337
2338 /* One of the two values must be a constant. */
2339 if (GET_CODE (base) != CONST_INT)
2340 {
2341 if (GET_CODE (tem) != CONST_INT)
2342 abort ();
2343 start = INTVAL (tem);
2344 }
2345 else
2346 {
2347 start = INTVAL (base);
2348 base = tem;
2349 }
2350 }
2351
2352 /* Handle everything that we can find inside an address that has been
2353 viewed as constant. */
2354
2355 while (1)
2356 {
2357 /* If no part of this switch does a "continue", the code outside
2358 will exit this loop. */
2359
2360 switch (GET_CODE (base))
2361 {
2362 case LO_SUM:
2363 /* By definition, operand1 of a LO_SUM is the associated constant
2364 address. Use the associated constant address as the base
2365 instead. */
2366 base = XEXP (base, 1);
2367 continue;
2368
2369 case CONST:
2370 /* Strip off CONST. */
2371 base = XEXP (base, 0);
2372 continue;
2373
2374 case PLUS:
2375 if (GET_CODE (XEXP (base, 1)) == CONST_INT)
2376 {
2377 start += INTVAL (XEXP (base, 1));
2378 base = XEXP (base, 0);
2379 continue;
2380 }
2381 break;
2382
2383 case AND:
2384 /* Handle the case of an AND which is the negative of a power of
2385 two. This is used to represent unaligned memory operations. */
2386 if (GET_CODE (XEXP (base, 1)) == CONST_INT
2387 && exact_log2 (- INTVAL (XEXP (base, 1))) > 0)
2388 {
2389 set_nonvarying_address_components (XEXP (base, 0), size,
2390 pbase, pstart, pend);
2391
2392 /* Assume the worst misalignment. START is affected, but not
2393 END, so compensate but adjusting SIZE. Don't lose any
2394 constant we already had. */
2395
2396 size = *pend - *pstart - INTVAL (XEXP (base, 1)) - 1;
2397 start += *pstart + INTVAL (XEXP (base, 1)) + 1;
2398 end += *pend;
2399 base = *pbase;
2400 }
2401 break;
2402 }
2403
2404 break;
2405 }
2406
2407 if (GET_CODE (base) == CONST_INT)
2408 {
2409 start += INTVAL (base);
2410 base = const0_rtx;
2411 }
2412
2413 end = start + size;
2414
2415 /* Set the return values. */
2416 *pbase = base;
2417 *pstart = start;
2418 *pend = end;
2419 }
2420
2421 /* Return 1 iff any subexpression of X refers to memory
2422 at an address of BASE plus some offset
2423 such that any of the bytes' offsets fall between START (inclusive)
2424 and END (exclusive).
2425
2426 The value is undefined if X is a varying address (as determined by
2427 cse_rtx_addr_varies_p). This function is not used in such cases.
2428
2429 When used in the cse pass, `qty_const' is nonzero, and it is used
2430 to treat an address that is a register with a known constant value
2431 as if it were that constant value.
2432 In the loop pass, `qty_const' is zero, so this is not done. */
2433
2434 static int
2435 refers_to_mem_p (x, base, start, end)
2436 rtx x, base;
2437 HOST_WIDE_INT start, end;
2438 {
2439 register HOST_WIDE_INT i;
2440 register enum rtx_code code;
2441 register char *fmt;
2442
2443 repeat:
2444 if (x == 0)
2445 return 0;
2446
2447 code = GET_CODE (x);
2448 if (code == MEM)
2449 {
2450 register rtx addr = XEXP (x, 0); /* Get the address. */
2451 rtx mybase;
2452 HOST_WIDE_INT mystart, myend;
2453
2454 set_nonvarying_address_components (addr, GET_MODE_SIZE (GET_MODE (x)),
2455 &mybase, &mystart, &myend);
2456
2457
2458 /* refers_to_mem_p is never called with varying addresses.
2459 If the base addresses are not equal, there is no chance
2460 of the memory addresses conflicting. */
2461 if (! rtx_equal_p (mybase, base))
2462 return 0;
2463
2464 return myend > start && mystart < end;
2465 }
2466
2467 /* X does not match, so try its subexpressions. */
2468
2469 fmt = GET_RTX_FORMAT (code);
2470 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2471 if (fmt[i] == 'e')
2472 {
2473 if (i == 0)
2474 {
2475 x = XEXP (x, 0);
2476 goto repeat;
2477 }
2478 else
2479 if (refers_to_mem_p (XEXP (x, i), base, start, end))
2480 return 1;
2481 }
2482 else if (fmt[i] == 'E')
2483 {
2484 int j;
2485 for (j = 0; j < XVECLEN (x, i); j++)
2486 if (refers_to_mem_p (XVECEXP (x, i, j), base, start, end))
2487 return 1;
2488 }
2489
2490 return 0;
2491 }
2492
2493 /* Nonzero if X refers to memory at a varying address;
2494 except that a register which has at the moment a known constant value
2495 isn't considered variable. */
2496
2497 static int
2498 cse_rtx_addr_varies_p (x)
2499 rtx x;
2500 {
2501 /* We need not check for X and the equivalence class being of the same
2502 mode because if X is equivalent to a constant in some mode, it
2503 doesn't vary in any mode. */
2504
2505 if (GET_CODE (x) == MEM
2506 && GET_CODE (XEXP (x, 0)) == REG
2507 && REGNO_QTY_VALID_P (REGNO (XEXP (x, 0)))
2508 && GET_MODE (XEXP (x, 0)) == qty_mode[reg_qty[REGNO (XEXP (x, 0))]]
2509 && qty_const[reg_qty[REGNO (XEXP (x, 0))]] != 0)
2510 return 0;
2511
2512 if (GET_CODE (x) == MEM
2513 && GET_CODE (XEXP (x, 0)) == PLUS
2514 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2515 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
2516 && REGNO_QTY_VALID_P (REGNO (XEXP (XEXP (x, 0), 0)))
2517 && (GET_MODE (XEXP (XEXP (x, 0), 0))
2518 == qty_mode[reg_qty[REGNO (XEXP (XEXP (x, 0), 0))]])
2519 && qty_const[reg_qty[REGNO (XEXP (XEXP (x, 0), 0))]])
2520 return 0;
2521
2522 /* This can happen as the result of virtual register instantiation, if
2523 the initial constant is too large to be a valid address. This gives
2524 us a three instruction sequence, load large offset into a register,
2525 load fp minus a constant into a register, then a MEM which is the
2526 sum of the two `constant' registers. */
2527 if (GET_CODE (x) == MEM
2528 && GET_CODE (XEXP (x, 0)) == PLUS
2529 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
2530 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG
2531 && REGNO_QTY_VALID_P (REGNO (XEXP (XEXP (x, 0), 0)))
2532 && (GET_MODE (XEXP (XEXP (x, 0), 0))
2533 == qty_mode[reg_qty[REGNO (XEXP (XEXP (x, 0), 0))]])
2534 && qty_const[reg_qty[REGNO (XEXP (XEXP (x, 0), 0))]]
2535 && REGNO_QTY_VALID_P (REGNO (XEXP (XEXP (x, 0), 1)))
2536 && (GET_MODE (XEXP (XEXP (x, 0), 1))
2537 == qty_mode[reg_qty[REGNO (XEXP (XEXP (x, 0), 1))]])
2538 && qty_const[reg_qty[REGNO (XEXP (XEXP (x, 0), 1))]])
2539 return 0;
2540
2541 return rtx_addr_varies_p (x);
2542 }
2543 \f
2544 /* Canonicalize an expression:
2545 replace each register reference inside it
2546 with the "oldest" equivalent register.
2547
2548 If INSN is non-zero and we are replacing a pseudo with a hard register
2549 or vice versa, validate_change is used to ensure that INSN remains valid
2550 after we make our substitution. The calls are made with IN_GROUP non-zero
2551 so apply_change_group must be called upon the outermost return from this
2552 function (unless INSN is zero). The result of apply_change_group can
2553 generally be discarded since the changes we are making are optional. */
2554
2555 static rtx
2556 canon_reg (x, insn)
2557 rtx x;
2558 rtx insn;
2559 {
2560 register int i;
2561 register enum rtx_code code;
2562 register char *fmt;
2563
2564 if (x == 0)
2565 return x;
2566
2567 code = GET_CODE (x);
2568 switch (code)
2569 {
2570 case PC:
2571 case CC0:
2572 case CONST:
2573 case CONST_INT:
2574 case CONST_DOUBLE:
2575 case SYMBOL_REF:
2576 case LABEL_REF:
2577 case ADDR_VEC:
2578 case ADDR_DIFF_VEC:
2579 return x;
2580
2581 case REG:
2582 {
2583 register int first;
2584
2585 /* Never replace a hard reg, because hard regs can appear
2586 in more than one machine mode, and we must preserve the mode
2587 of each occurrence. Also, some hard regs appear in
2588 MEMs that are shared and mustn't be altered. Don't try to
2589 replace any reg that maps to a reg of class NO_REGS. */
2590 if (REGNO (x) < FIRST_PSEUDO_REGISTER
2591 || ! REGNO_QTY_VALID_P (REGNO (x)))
2592 return x;
2593
2594 first = qty_first_reg[reg_qty[REGNO (x)]];
2595 return (first >= FIRST_PSEUDO_REGISTER ? regno_reg_rtx[first]
2596 : REGNO_REG_CLASS (first) == NO_REGS ? x
2597 : gen_rtx (REG, qty_mode[reg_qty[REGNO (x)]], first));
2598 }
2599 }
2600
2601 fmt = GET_RTX_FORMAT (code);
2602 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2603 {
2604 register int j;
2605
2606 if (fmt[i] == 'e')
2607 {
2608 rtx new = canon_reg (XEXP (x, i), insn);
2609 int insn_code;
2610
2611 /* If replacing pseudo with hard reg or vice versa, ensure the
2612 insn remains valid. Likewise if the insn has MATCH_DUPs. */
2613 if (insn != 0 && new != 0
2614 && GET_CODE (new) == REG && GET_CODE (XEXP (x, i)) == REG
2615 && (((REGNO (new) < FIRST_PSEUDO_REGISTER)
2616 != (REGNO (XEXP (x, i)) < FIRST_PSEUDO_REGISTER))
2617 || (insn_code = recog_memoized (insn)) < 0
2618 || insn_n_dups[insn_code] > 0))
2619 validate_change (insn, &XEXP (x, i), new, 1);
2620 else
2621 XEXP (x, i) = new;
2622 }
2623 else if (fmt[i] == 'E')
2624 for (j = 0; j < XVECLEN (x, i); j++)
2625 XVECEXP (x, i, j) = canon_reg (XVECEXP (x, i, j), insn);
2626 }
2627
2628 return x;
2629 }
2630 \f
2631 /* LOC is a location within INSN that is an operand address (the contents of
2632 a MEM). Find the best equivalent address to use that is valid for this
2633 insn.
2634
2635 On most CISC machines, complicated address modes are costly, and rtx_cost
2636 is a good approximation for that cost. However, most RISC machines have
2637 only a few (usually only one) memory reference formats. If an address is
2638 valid at all, it is often just as cheap as any other address. Hence, for
2639 RISC machines, we use the configuration macro `ADDRESS_COST' to compare the
2640 costs of various addresses. For two addresses of equal cost, choose the one
2641 with the highest `rtx_cost' value as that has the potential of eliminating
2642 the most insns. For equal costs, we choose the first in the equivalence
2643 class. Note that we ignore the fact that pseudo registers are cheaper
2644 than hard registers here because we would also prefer the pseudo registers.
2645 */
2646
2647 static void
2648 find_best_addr (insn, loc)
2649 rtx insn;
2650 rtx *loc;
2651 {
2652 struct table_elt *elt, *p;
2653 rtx addr = *loc;
2654 int our_cost;
2655 int found_better = 1;
2656 int save_do_not_record = do_not_record;
2657 int save_hash_arg_in_memory = hash_arg_in_memory;
2658 int save_hash_arg_in_struct = hash_arg_in_struct;
2659 int addr_volatile;
2660 int regno;
2661 unsigned hash;
2662
2663 /* Do not try to replace constant addresses or addresses of local and
2664 argument slots. These MEM expressions are made only once and inserted
2665 in many instructions, as well as being used to control symbol table
2666 output. It is not safe to clobber them.
2667
2668 There are some uncommon cases where the address is already in a register
2669 for some reason, but we cannot take advantage of that because we have
2670 no easy way to unshare the MEM. In addition, looking up all stack
2671 addresses is costly. */
2672 if ((GET_CODE (addr) == PLUS
2673 && GET_CODE (XEXP (addr, 0)) == REG
2674 && GET_CODE (XEXP (addr, 1)) == CONST_INT
2675 && (regno = REGNO (XEXP (addr, 0)),
2676 regno == FRAME_POINTER_REGNUM || regno == HARD_FRAME_POINTER_REGNUM
2677 || regno == ARG_POINTER_REGNUM))
2678 || (GET_CODE (addr) == REG
2679 && (regno = REGNO (addr), regno == FRAME_POINTER_REGNUM
2680 || regno == HARD_FRAME_POINTER_REGNUM
2681 || regno == ARG_POINTER_REGNUM))
2682 || CONSTANT_ADDRESS_P (addr))
2683 return;
2684
2685 /* If this address is not simply a register, try to fold it. This will
2686 sometimes simplify the expression. Many simplifications
2687 will not be valid, but some, usually applying the associative rule, will
2688 be valid and produce better code. */
2689 if (GET_CODE (addr) != REG)
2690 {
2691 rtx folded = fold_rtx (copy_rtx (addr), NULL_RTX);
2692
2693 if (1
2694 #ifdef ADDRESS_COST
2695 && (ADDRESS_COST (folded) < ADDRESS_COST (addr)
2696 || (ADDRESS_COST (folded) == ADDRESS_COST (addr)
2697 && rtx_cost (folded, MEM) > rtx_cost (addr, MEM)))
2698 #else
2699 && rtx_cost (folded, MEM) < rtx_cost (addr, MEM)
2700 #endif
2701 && validate_change (insn, loc, folded, 0))
2702 addr = folded;
2703 }
2704
2705 /* If this address is not in the hash table, we can't look for equivalences
2706 of the whole address. Also, ignore if volatile. */
2707
2708 do_not_record = 0;
2709 hash = HASH (addr, Pmode);
2710 addr_volatile = do_not_record;
2711 do_not_record = save_do_not_record;
2712 hash_arg_in_memory = save_hash_arg_in_memory;
2713 hash_arg_in_struct = save_hash_arg_in_struct;
2714
2715 if (addr_volatile)
2716 return;
2717
2718 elt = lookup (addr, hash, Pmode);
2719
2720 #ifndef ADDRESS_COST
2721 if (elt)
2722 {
2723 our_cost = elt->cost;
2724
2725 /* Find the lowest cost below ours that works. */
2726 for (elt = elt->first_same_value; elt; elt = elt->next_same_value)
2727 if (elt->cost < our_cost
2728 && (GET_CODE (elt->exp) == REG
2729 || exp_equiv_p (elt->exp, elt->exp, 1, 0))
2730 && validate_change (insn, loc,
2731 canon_reg (copy_rtx (elt->exp), NULL_RTX), 0))
2732 return;
2733 }
2734 #else
2735
2736 if (elt)
2737 {
2738 /* We need to find the best (under the criteria documented above) entry
2739 in the class that is valid. We use the `flag' field to indicate
2740 choices that were invalid and iterate until we can't find a better
2741 one that hasn't already been tried. */
2742
2743 for (p = elt->first_same_value; p; p = p->next_same_value)
2744 p->flag = 0;
2745
2746 while (found_better)
2747 {
2748 int best_addr_cost = ADDRESS_COST (*loc);
2749 int best_rtx_cost = (elt->cost + 1) >> 1;
2750 struct table_elt *best_elt = elt;
2751
2752 found_better = 0;
2753 for (p = elt->first_same_value; p; p = p->next_same_value)
2754 if (! p->flag
2755 && (GET_CODE (p->exp) == REG
2756 || exp_equiv_p (p->exp, p->exp, 1, 0))
2757 && (ADDRESS_COST (p->exp) < best_addr_cost
2758 || (ADDRESS_COST (p->exp) == best_addr_cost
2759 && (p->cost + 1) >> 1 > best_rtx_cost)))
2760 {
2761 found_better = 1;
2762 best_addr_cost = ADDRESS_COST (p->exp);
2763 best_rtx_cost = (p->cost + 1) >> 1;
2764 best_elt = p;
2765 }
2766
2767 if (found_better)
2768 {
2769 if (validate_change (insn, loc,
2770 canon_reg (copy_rtx (best_elt->exp),
2771 NULL_RTX), 0))
2772 return;
2773 else
2774 best_elt->flag = 1;
2775 }
2776 }
2777 }
2778
2779 /* If the address is a binary operation with the first operand a register
2780 and the second a constant, do the same as above, but looking for
2781 equivalences of the register. Then try to simplify before checking for
2782 the best address to use. This catches a few cases: First is when we
2783 have REG+const and the register is another REG+const. We can often merge
2784 the constants and eliminate one insn and one register. It may also be
2785 that a machine has a cheap REG+REG+const. Finally, this improves the
2786 code on the Alpha for unaligned byte stores. */
2787
2788 if (flag_expensive_optimizations
2789 && (GET_RTX_CLASS (GET_CODE (*loc)) == '2'
2790 || GET_RTX_CLASS (GET_CODE (*loc)) == 'c')
2791 && GET_CODE (XEXP (*loc, 0)) == REG
2792 && GET_CODE (XEXP (*loc, 1)) == CONST_INT)
2793 {
2794 rtx c = XEXP (*loc, 1);
2795
2796 do_not_record = 0;
2797 hash = HASH (XEXP (*loc, 0), Pmode);
2798 do_not_record = save_do_not_record;
2799 hash_arg_in_memory = save_hash_arg_in_memory;
2800 hash_arg_in_struct = save_hash_arg_in_struct;
2801
2802 elt = lookup (XEXP (*loc, 0), hash, Pmode);
2803 if (elt == 0)
2804 return;
2805
2806 /* We need to find the best (under the criteria documented above) entry
2807 in the class that is valid. We use the `flag' field to indicate
2808 choices that were invalid and iterate until we can't find a better
2809 one that hasn't already been tried. */
2810
2811 for (p = elt->first_same_value; p; p = p->next_same_value)
2812 p->flag = 0;
2813
2814 while (found_better)
2815 {
2816 int best_addr_cost = ADDRESS_COST (*loc);
2817 int best_rtx_cost = (COST (*loc) + 1) >> 1;
2818 struct table_elt *best_elt = elt;
2819 rtx best_rtx = *loc;
2820 int count;
2821
2822 /* This is at worst case an O(n^2) algorithm, so limit our search
2823 to the first 32 elements on the list. This avoids trouble
2824 compiling code with very long basic blocks that can easily
2825 call cse_gen_binary so many times that we run out of memory. */
2826
2827 found_better = 0;
2828 for (p = elt->first_same_value, count = 0;
2829 p && count < 32;
2830 p = p->next_same_value, count++)
2831 if (! p->flag
2832 && (GET_CODE (p->exp) == REG
2833 || exp_equiv_p (p->exp, p->exp, 1, 0)))
2834 {
2835 rtx new = cse_gen_binary (GET_CODE (*loc), Pmode, p->exp, c);
2836
2837 if ((ADDRESS_COST (new) < best_addr_cost
2838 || (ADDRESS_COST (new) == best_addr_cost
2839 && (COST (new) + 1) >> 1 > best_rtx_cost)))
2840 {
2841 found_better = 1;
2842 best_addr_cost = ADDRESS_COST (new);
2843 best_rtx_cost = (COST (new) + 1) >> 1;
2844 best_elt = p;
2845 best_rtx = new;
2846 }
2847 }
2848
2849 if (found_better)
2850 {
2851 if (validate_change (insn, loc,
2852 canon_reg (copy_rtx (best_rtx),
2853 NULL_RTX), 0))
2854 return;
2855 else
2856 best_elt->flag = 1;
2857 }
2858 }
2859 }
2860 #endif
2861 }
2862 \f
2863 /* Given an operation (CODE, *PARG1, *PARG2), where code is a comparison
2864 operation (EQ, NE, GT, etc.), follow it back through the hash table and
2865 what values are being compared.
2866
2867 *PARG1 and *PARG2 are updated to contain the rtx representing the values
2868 actually being compared. For example, if *PARG1 was (cc0) and *PARG2
2869 was (const_int 0), *PARG1 and *PARG2 will be set to the objects that were
2870 compared to produce cc0.
2871
2872 The return value is the comparison operator and is either the code of
2873 A or the code corresponding to the inverse of the comparison. */
2874
2875 static enum rtx_code
2876 find_comparison_args (code, parg1, parg2, pmode1, pmode2)
2877 enum rtx_code code;
2878 rtx *parg1, *parg2;
2879 enum machine_mode *pmode1, *pmode2;
2880 {
2881 rtx arg1, arg2;
2882
2883 arg1 = *parg1, arg2 = *parg2;
2884
2885 /* If ARG2 is const0_rtx, see what ARG1 is equivalent to. */
2886
2887 while (arg2 == CONST0_RTX (GET_MODE (arg1)))
2888 {
2889 /* Set non-zero when we find something of interest. */
2890 rtx x = 0;
2891 int reverse_code = 0;
2892 struct table_elt *p = 0;
2893
2894 /* If arg1 is a COMPARE, extract the comparison arguments from it.
2895 On machines with CC0, this is the only case that can occur, since
2896 fold_rtx will return the COMPARE or item being compared with zero
2897 when given CC0. */
2898
2899 if (GET_CODE (arg1) == COMPARE && arg2 == const0_rtx)
2900 x = arg1;
2901
2902 /* If ARG1 is a comparison operator and CODE is testing for
2903 STORE_FLAG_VALUE, get the inner arguments. */
2904
2905 else if (GET_RTX_CLASS (GET_CODE (arg1)) == '<')
2906 {
2907 if (code == NE
2908 || (GET_MODE_CLASS (GET_MODE (arg1)) == MODE_INT
2909 && code == LT && STORE_FLAG_VALUE == -1)
2910 #ifdef FLOAT_STORE_FLAG_VALUE
2911 || (GET_MODE_CLASS (GET_MODE (arg1)) == MODE_FLOAT
2912 && FLOAT_STORE_FLAG_VALUE < 0)
2913 #endif
2914 )
2915 x = arg1;
2916 else if (code == EQ
2917 || (GET_MODE_CLASS (GET_MODE (arg1)) == MODE_INT
2918 && code == GE && STORE_FLAG_VALUE == -1)
2919 #ifdef FLOAT_STORE_FLAG_VALUE
2920 || (GET_MODE_CLASS (GET_MODE (arg1)) == MODE_FLOAT
2921 && FLOAT_STORE_FLAG_VALUE < 0)
2922 #endif
2923 )
2924 x = arg1, reverse_code = 1;
2925 }
2926
2927 /* ??? We could also check for
2928
2929 (ne (and (eq (...) (const_int 1))) (const_int 0))
2930
2931 and related forms, but let's wait until we see them occurring. */
2932
2933 if (x == 0)
2934 /* Look up ARG1 in the hash table and see if it has an equivalence
2935 that lets us see what is being compared. */
2936 p = lookup (arg1, safe_hash (arg1, GET_MODE (arg1)) % NBUCKETS,
2937 GET_MODE (arg1));
2938 if (p) p = p->first_same_value;
2939
2940 for (; p; p = p->next_same_value)
2941 {
2942 enum machine_mode inner_mode = GET_MODE (p->exp);
2943
2944 /* If the entry isn't valid, skip it. */
2945 if (! exp_equiv_p (p->exp, p->exp, 1, 0))
2946 continue;
2947
2948 if (GET_CODE (p->exp) == COMPARE
2949 /* Another possibility is that this machine has a compare insn
2950 that includes the comparison code. In that case, ARG1 would
2951 be equivalent to a comparison operation that would set ARG1 to
2952 either STORE_FLAG_VALUE or zero. If this is an NE operation,
2953 ORIG_CODE is the actual comparison being done; if it is an EQ,
2954 we must reverse ORIG_CODE. On machine with a negative value
2955 for STORE_FLAG_VALUE, also look at LT and GE operations. */
2956 || ((code == NE
2957 || (code == LT
2958 && GET_MODE_CLASS (inner_mode) == MODE_INT
2959 && (GET_MODE_BITSIZE (inner_mode)
2960 <= HOST_BITS_PER_WIDE_INT)
2961 && (STORE_FLAG_VALUE
2962 & ((HOST_WIDE_INT) 1
2963 << (GET_MODE_BITSIZE (inner_mode) - 1))))
2964 #ifdef FLOAT_STORE_FLAG_VALUE
2965 || (code == LT
2966 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
2967 && FLOAT_STORE_FLAG_VALUE < 0)
2968 #endif
2969 )
2970 && GET_RTX_CLASS (GET_CODE (p->exp)) == '<'))
2971 {
2972 x = p->exp;
2973 break;
2974 }
2975 else if ((code == EQ
2976 || (code == GE
2977 && GET_MODE_CLASS (inner_mode) == MODE_INT
2978 && (GET_MODE_BITSIZE (inner_mode)
2979 <= HOST_BITS_PER_WIDE_INT)
2980 && (STORE_FLAG_VALUE
2981 & ((HOST_WIDE_INT) 1
2982 << (GET_MODE_BITSIZE (inner_mode) - 1))))
2983 #ifdef FLOAT_STORE_FLAG_VALUE
2984 || (code == GE
2985 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
2986 && FLOAT_STORE_FLAG_VALUE < 0)
2987 #endif
2988 )
2989 && GET_RTX_CLASS (GET_CODE (p->exp)) == '<')
2990 {
2991 reverse_code = 1;
2992 x = p->exp;
2993 break;
2994 }
2995
2996 /* If this is fp + constant, the equivalent is a better operand since
2997 it may let us predict the value of the comparison. */
2998 else if (NONZERO_BASE_PLUS_P (p->exp))
2999 {
3000 arg1 = p->exp;
3001 continue;
3002 }
3003 }
3004
3005 /* If we didn't find a useful equivalence for ARG1, we are done.
3006 Otherwise, set up for the next iteration. */
3007 if (x == 0)
3008 break;
3009
3010 arg1 = XEXP (x, 0), arg2 = XEXP (x, 1);
3011 if (GET_RTX_CLASS (GET_CODE (x)) == '<')
3012 code = GET_CODE (x);
3013
3014 if (reverse_code)
3015 code = reverse_condition (code);
3016 }
3017
3018 /* Return our results. Return the modes from before fold_rtx
3019 because fold_rtx might produce const_int, and then it's too late. */
3020 *pmode1 = GET_MODE (arg1), *pmode2 = GET_MODE (arg2);
3021 *parg1 = fold_rtx (arg1, 0), *parg2 = fold_rtx (arg2, 0);
3022
3023 return code;
3024 }
3025 \f
3026 /* Try to simplify a unary operation CODE whose output mode is to be
3027 MODE with input operand OP whose mode was originally OP_MODE.
3028 Return zero if no simplification can be made. */
3029
3030 rtx
3031 simplify_unary_operation (code, mode, op, op_mode)
3032 enum rtx_code code;
3033 enum machine_mode mode;
3034 rtx op;
3035 enum machine_mode op_mode;
3036 {
3037 register int width = GET_MODE_BITSIZE (mode);
3038
3039 /* The order of these tests is critical so that, for example, we don't
3040 check the wrong mode (input vs. output) for a conversion operation,
3041 such as FIX. At some point, this should be simplified. */
3042
3043 #if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC)
3044
3045 if (code == FLOAT && GET_MODE (op) == VOIDmode
3046 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
3047 {
3048 HOST_WIDE_INT hv, lv;
3049 REAL_VALUE_TYPE d;
3050
3051 if (GET_CODE (op) == CONST_INT)
3052 lv = INTVAL (op), hv = INTVAL (op) < 0 ? -1 : 0;
3053 else
3054 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
3055
3056 #ifdef REAL_ARITHMETIC
3057 REAL_VALUE_FROM_INT (d, lv, hv, mode);
3058 #else
3059 if (hv < 0)
3060 {
3061 d = (double) (~ hv);
3062 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
3063 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
3064 d += (double) (unsigned HOST_WIDE_INT) (~ lv);
3065 d = (- d - 1.0);
3066 }
3067 else
3068 {
3069 d = (double) hv;
3070 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
3071 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
3072 d += (double) (unsigned HOST_WIDE_INT) lv;
3073 }
3074 #endif /* REAL_ARITHMETIC */
3075 d = real_value_truncate (mode, d);
3076 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3077 }
3078 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
3079 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
3080 {
3081 HOST_WIDE_INT hv, lv;
3082 REAL_VALUE_TYPE d;
3083
3084 if (GET_CODE (op) == CONST_INT)
3085 lv = INTVAL (op), hv = INTVAL (op) < 0 ? -1 : 0;
3086 else
3087 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
3088
3089 if (op_mode == VOIDmode)
3090 {
3091 /* We don't know how to interpret negative-looking numbers in
3092 this case, so don't try to fold those. */
3093 if (hv < 0)
3094 return 0;
3095 }
3096 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
3097 ;
3098 else
3099 hv = 0, lv &= GET_MODE_MASK (op_mode);
3100
3101 #ifdef REAL_ARITHMETIC
3102 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
3103 #else
3104
3105 d = (double) (unsigned HOST_WIDE_INT) hv;
3106 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
3107 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
3108 d += (double) (unsigned HOST_WIDE_INT) lv;
3109 #endif /* REAL_ARITHMETIC */
3110 d = real_value_truncate (mode, d);
3111 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3112 }
3113 #endif
3114
3115 if (GET_CODE (op) == CONST_INT
3116 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
3117 {
3118 register HOST_WIDE_INT arg0 = INTVAL (op);
3119 register HOST_WIDE_INT val;
3120
3121 switch (code)
3122 {
3123 case NOT:
3124 val = ~ arg0;
3125 break;
3126
3127 case NEG:
3128 val = - arg0;
3129 break;
3130
3131 case ABS:
3132 val = (arg0 >= 0 ? arg0 : - arg0);
3133 break;
3134
3135 case FFS:
3136 /* Don't use ffs here. Instead, get low order bit and then its
3137 number. If arg0 is zero, this will return 0, as desired. */
3138 arg0 &= GET_MODE_MASK (mode);
3139 val = exact_log2 (arg0 & (- arg0)) + 1;
3140 break;
3141
3142 case TRUNCATE:
3143 val = arg0;
3144 break;
3145
3146 case ZERO_EXTEND:
3147 if (op_mode == VOIDmode)
3148 op_mode = mode;
3149 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
3150 {
3151 /* If we were really extending the mode,
3152 we would have to distinguish between zero-extension
3153 and sign-extension. */
3154 if (width != GET_MODE_BITSIZE (op_mode))
3155 abort ();
3156 val = arg0;
3157 }
3158 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
3159 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
3160 else
3161 return 0;
3162 break;
3163
3164 case SIGN_EXTEND:
3165 if (op_mode == VOIDmode)
3166 op_mode = mode;
3167 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
3168 {
3169 /* If we were really extending the mode,
3170 we would have to distinguish between zero-extension
3171 and sign-extension. */
3172 if (width != GET_MODE_BITSIZE (op_mode))
3173 abort ();
3174 val = arg0;
3175 }
3176 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
3177 {
3178 val
3179 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
3180 if (val
3181 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
3182 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
3183 }
3184 else
3185 return 0;
3186 break;
3187
3188 case SQRT:
3189 return 0;
3190
3191 default:
3192 abort ();
3193 }
3194
3195 /* Clear the bits that don't belong in our mode,
3196 unless they and our sign bit are all one.
3197 So we get either a reasonable negative value or a reasonable
3198 unsigned value for this mode. */
3199 if (width < HOST_BITS_PER_WIDE_INT
3200 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
3201 != ((HOST_WIDE_INT) (-1) << (width - 1))))
3202 val &= ((HOST_WIDE_INT) 1 << width) - 1;
3203
3204 return GEN_INT (val);
3205 }
3206
3207 /* We can do some operations on integer CONST_DOUBLEs. Also allow
3208 for a DImode operation on a CONST_INT. */
3209 else if (GET_MODE (op) == VOIDmode && width <= HOST_BITS_PER_INT * 2
3210 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
3211 {
3212 HOST_WIDE_INT l1, h1, lv, hv;
3213
3214 if (GET_CODE (op) == CONST_DOUBLE)
3215 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
3216 else
3217 l1 = INTVAL (op), h1 = l1 < 0 ? -1 : 0;
3218
3219 switch (code)
3220 {
3221 case NOT:
3222 lv = ~ l1;
3223 hv = ~ h1;
3224 break;
3225
3226 case NEG:
3227 neg_double (l1, h1, &lv, &hv);
3228 break;
3229
3230 case ABS:
3231 if (h1 < 0)
3232 neg_double (l1, h1, &lv, &hv);
3233 else
3234 lv = l1, hv = h1;
3235 break;
3236
3237 case FFS:
3238 hv = 0;
3239 if (l1 == 0)
3240 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & (-h1)) + 1;
3241 else
3242 lv = exact_log2 (l1 & (-l1)) + 1;
3243 break;
3244
3245 case TRUNCATE:
3246 /* This is just a change-of-mode, so do nothing. */
3247 lv = l1, hv = h1;
3248 break;
3249
3250 case ZERO_EXTEND:
3251 if (op_mode == VOIDmode
3252 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
3253 return 0;
3254
3255 hv = 0;
3256 lv = l1 & GET_MODE_MASK (op_mode);
3257 break;
3258
3259 case SIGN_EXTEND:
3260 if (op_mode == VOIDmode
3261 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
3262 return 0;
3263 else
3264 {
3265 lv = l1 & GET_MODE_MASK (op_mode);
3266 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
3267 && (lv & ((HOST_WIDE_INT) 1
3268 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
3269 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
3270
3271 hv = (lv < 0) ? ~ (HOST_WIDE_INT) 0 : 0;
3272 }
3273 break;
3274
3275 case SQRT:
3276 return 0;
3277
3278 default:
3279 return 0;
3280 }
3281
3282 return immed_double_const (lv, hv, mode);
3283 }
3284
3285 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3286 else if (GET_CODE (op) == CONST_DOUBLE
3287 && GET_MODE_CLASS (mode) == MODE_FLOAT)
3288 {
3289 REAL_VALUE_TYPE d;
3290 jmp_buf handler;
3291 rtx x;
3292
3293 if (setjmp (handler))
3294 /* There used to be a warning here, but that is inadvisable.
3295 People may want to cause traps, and the natural way
3296 to do it should not get a warning. */
3297 return 0;
3298
3299 set_float_handler (handler);
3300
3301 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
3302
3303 switch (code)
3304 {
3305 case NEG:
3306 d = REAL_VALUE_NEGATE (d);
3307 break;
3308
3309 case ABS:
3310 if (REAL_VALUE_NEGATIVE (d))
3311 d = REAL_VALUE_NEGATE (d);
3312 break;
3313
3314 case FLOAT_TRUNCATE:
3315 d = real_value_truncate (mode, d);
3316 break;
3317
3318 case FLOAT_EXTEND:
3319 /* All this does is change the mode. */
3320 break;
3321
3322 case FIX:
3323 d = REAL_VALUE_RNDZINT (d);
3324 break;
3325
3326 case UNSIGNED_FIX:
3327 d = REAL_VALUE_UNSIGNED_RNDZINT (d);
3328 break;
3329
3330 case SQRT:
3331 return 0;
3332
3333 default:
3334 abort ();
3335 }
3336
3337 x = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3338 set_float_handler (NULL_PTR);
3339 return x;
3340 }
3341
3342 else if (GET_CODE (op) == CONST_DOUBLE
3343 && GET_MODE_CLASS (GET_MODE (op)) == MODE_FLOAT
3344 && GET_MODE_CLASS (mode) == MODE_INT
3345 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
3346 {
3347 REAL_VALUE_TYPE d;
3348 jmp_buf handler;
3349 HOST_WIDE_INT val;
3350
3351 if (setjmp (handler))
3352 return 0;
3353
3354 set_float_handler (handler);
3355
3356 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
3357
3358 switch (code)
3359 {
3360 case FIX:
3361 val = REAL_VALUE_FIX (d);
3362 break;
3363
3364 case UNSIGNED_FIX:
3365 val = REAL_VALUE_UNSIGNED_FIX (d);
3366 break;
3367
3368 default:
3369 abort ();
3370 }
3371
3372 set_float_handler (NULL_PTR);
3373
3374 /* Clear the bits that don't belong in our mode,
3375 unless they and our sign bit are all one.
3376 So we get either a reasonable negative value or a reasonable
3377 unsigned value for this mode. */
3378 if (width < HOST_BITS_PER_WIDE_INT
3379 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
3380 != ((HOST_WIDE_INT) (-1) << (width - 1))))
3381 val &= ((HOST_WIDE_INT) 1 << width) - 1;
3382
3383 /* If this would be an entire word for the target, but is not for
3384 the host, then sign-extend on the host so that the number will look
3385 the same way on the host that it would on the target.
3386
3387 For example, when building a 64 bit alpha hosted 32 bit sparc
3388 targeted compiler, then we want the 32 bit unsigned value -1 to be
3389 represented as a 64 bit value -1, and not as 0x00000000ffffffff.
3390 The later confuses the sparc backend. */
3391
3392 if (BITS_PER_WORD < HOST_BITS_PER_WIDE_INT && BITS_PER_WORD == width
3393 && (val & ((HOST_WIDE_INT) 1 << (width - 1))))
3394 val |= ((HOST_WIDE_INT) (-1) << width);
3395
3396 return GEN_INT (val);
3397 }
3398 #endif
3399 /* This was formerly used only for non-IEEE float.
3400 eggert@twinsun.com says it is safe for IEEE also. */
3401 else
3402 {
3403 /* There are some simplifications we can do even if the operands
3404 aren't constant. */
3405 switch (code)
3406 {
3407 case NEG:
3408 case NOT:
3409 /* (not (not X)) == X, similarly for NEG. */
3410 if (GET_CODE (op) == code)
3411 return XEXP (op, 0);
3412 break;
3413
3414 case SIGN_EXTEND:
3415 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
3416 becomes just the MINUS if its mode is MODE. This allows
3417 folding switch statements on machines using casesi (such as
3418 the Vax). */
3419 if (GET_CODE (op) == TRUNCATE
3420 && GET_MODE (XEXP (op, 0)) == mode
3421 && GET_CODE (XEXP (op, 0)) == MINUS
3422 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
3423 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
3424 return XEXP (op, 0);
3425
3426 #ifdef POINTERS_EXTEND_UNSIGNED
3427 if (! POINTERS_EXTEND_UNSIGNED
3428 && mode == Pmode && GET_MODE (op) == ptr_mode
3429 && CONSTANT_P (op))
3430 return convert_memory_address (Pmode, op);
3431 #endif
3432 break;
3433
3434 #ifdef POINTERS_EXTEND_UNSIGNED
3435 case ZERO_EXTEND:
3436 if (POINTERS_EXTEND_UNSIGNED
3437 && mode == Pmode && GET_MODE (op) == ptr_mode
3438 && CONSTANT_P (op))
3439 return convert_memory_address (Pmode, op);
3440 break;
3441 #endif
3442 }
3443
3444 return 0;
3445 }
3446 }
3447 \f
3448 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
3449 and OP1. Return 0 if no simplification is possible.
3450
3451 Don't use this for relational operations such as EQ or LT.
3452 Use simplify_relational_operation instead. */
3453
3454 rtx
3455 simplify_binary_operation (code, mode, op0, op1)
3456 enum rtx_code code;
3457 enum machine_mode mode;
3458 rtx op0, op1;
3459 {
3460 register HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3461 HOST_WIDE_INT val;
3462 int width = GET_MODE_BITSIZE (mode);
3463 rtx tem;
3464
3465 /* Relational operations don't work here. We must know the mode
3466 of the operands in order to do the comparison correctly.
3467 Assuming a full word can give incorrect results.
3468 Consider comparing 128 with -128 in QImode. */
3469
3470 if (GET_RTX_CLASS (code) == '<')
3471 abort ();
3472
3473 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3474 if (GET_MODE_CLASS (mode) == MODE_FLOAT
3475 && GET_CODE (op0) == CONST_DOUBLE && GET_CODE (op1) == CONST_DOUBLE
3476 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3477 {
3478 REAL_VALUE_TYPE f0, f1, value;
3479 jmp_buf handler;
3480
3481 if (setjmp (handler))
3482 return 0;
3483
3484 set_float_handler (handler);
3485
3486 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3487 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3488 f0 = real_value_truncate (mode, f0);
3489 f1 = real_value_truncate (mode, f1);
3490
3491 #ifdef REAL_ARITHMETIC
3492 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
3493 #else
3494 switch (code)
3495 {
3496 case PLUS:
3497 value = f0 + f1;
3498 break;
3499 case MINUS:
3500 value = f0 - f1;
3501 break;
3502 case MULT:
3503 value = f0 * f1;
3504 break;
3505 case DIV:
3506 #ifndef REAL_INFINITY
3507 if (f1 == 0)
3508 return 0;
3509 #endif
3510 value = f0 / f1;
3511 break;
3512 case SMIN:
3513 value = MIN (f0, f1);
3514 break;
3515 case SMAX:
3516 value = MAX (f0, f1);
3517 break;
3518 default:
3519 abort ();
3520 }
3521 #endif
3522
3523 value = real_value_truncate (mode, value);
3524 set_float_handler (NULL_PTR);
3525 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
3526 }
3527 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3528
3529 /* We can fold some multi-word operations. */
3530 if (GET_MODE_CLASS (mode) == MODE_INT
3531 && width == HOST_BITS_PER_WIDE_INT * 2
3532 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
3533 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
3534 {
3535 HOST_WIDE_INT l1, l2, h1, h2, lv, hv;
3536
3537 if (GET_CODE (op0) == CONST_DOUBLE)
3538 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
3539 else
3540 l1 = INTVAL (op0), h1 = l1 < 0 ? -1 : 0;
3541
3542 if (GET_CODE (op1) == CONST_DOUBLE)
3543 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
3544 else
3545 l2 = INTVAL (op1), h2 = l2 < 0 ? -1 : 0;
3546
3547 switch (code)
3548 {
3549 case MINUS:
3550 /* A - B == A + (-B). */
3551 neg_double (l2, h2, &lv, &hv);
3552 l2 = lv, h2 = hv;
3553
3554 /* .. fall through ... */
3555
3556 case PLUS:
3557 add_double (l1, h1, l2, h2, &lv, &hv);
3558 break;
3559
3560 case MULT:
3561 mul_double (l1, h1, l2, h2, &lv, &hv);
3562 break;
3563
3564 case DIV: case MOD: case UDIV: case UMOD:
3565 /* We'd need to include tree.h to do this and it doesn't seem worth
3566 it. */
3567 return 0;
3568
3569 case AND:
3570 lv = l1 & l2, hv = h1 & h2;
3571 break;
3572
3573 case IOR:
3574 lv = l1 | l2, hv = h1 | h2;
3575 break;
3576
3577 case XOR:
3578 lv = l1 ^ l2, hv = h1 ^ h2;
3579 break;
3580
3581 case SMIN:
3582 if (h1 < h2
3583 || (h1 == h2
3584 && ((unsigned HOST_WIDE_INT) l1
3585 < (unsigned HOST_WIDE_INT) l2)))
3586 lv = l1, hv = h1;
3587 else
3588 lv = l2, hv = h2;
3589 break;
3590
3591 case SMAX:
3592 if (h1 > h2
3593 || (h1 == h2
3594 && ((unsigned HOST_WIDE_INT) l1
3595 > (unsigned HOST_WIDE_INT) l2)))
3596 lv = l1, hv = h1;
3597 else
3598 lv = l2, hv = h2;
3599 break;
3600
3601 case UMIN:
3602 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
3603 || (h1 == h2
3604 && ((unsigned HOST_WIDE_INT) l1
3605 < (unsigned HOST_WIDE_INT) l2)))
3606 lv = l1, hv = h1;
3607 else
3608 lv = l2, hv = h2;
3609 break;
3610
3611 case UMAX:
3612 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
3613 || (h1 == h2
3614 && ((unsigned HOST_WIDE_INT) l1
3615 > (unsigned HOST_WIDE_INT) l2)))
3616 lv = l1, hv = h1;
3617 else
3618 lv = l2, hv = h2;
3619 break;
3620
3621 case LSHIFTRT: case ASHIFTRT:
3622 case ASHIFT:
3623 case ROTATE: case ROTATERT:
3624 #ifdef SHIFT_COUNT_TRUNCATED
3625 if (SHIFT_COUNT_TRUNCATED)
3626 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
3627 #endif
3628
3629 if (h2 != 0 || l2 < 0 || l2 >= GET_MODE_BITSIZE (mode))
3630 return 0;
3631
3632 if (code == LSHIFTRT || code == ASHIFTRT)
3633 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
3634 code == ASHIFTRT);
3635 else if (code == ASHIFT)
3636 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
3637 else if (code == ROTATE)
3638 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3639 else /* code == ROTATERT */
3640 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3641 break;
3642
3643 default:
3644 return 0;
3645 }
3646
3647 return immed_double_const (lv, hv, mode);
3648 }
3649
3650 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
3651 || width > HOST_BITS_PER_WIDE_INT || width == 0)
3652 {
3653 /* Even if we can't compute a constant result,
3654 there are some cases worth simplifying. */
3655
3656 switch (code)
3657 {
3658 case PLUS:
3659 /* In IEEE floating point, x+0 is not the same as x. Similarly
3660 for the other optimizations below. */
3661 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
3662 && FLOAT_MODE_P (mode) && ! flag_fast_math)
3663 break;
3664
3665 if (op1 == CONST0_RTX (mode))
3666 return op0;
3667
3668 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)) */
3669 if (GET_CODE (op0) == NEG)
3670 return cse_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
3671 else if (GET_CODE (op1) == NEG)
3672 return cse_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
3673
3674 /* Handle both-operands-constant cases. We can only add
3675 CONST_INTs to constants since the sum of relocatable symbols
3676 can't be handled by most assemblers. Don't add CONST_INT
3677 to CONST_INT since overflow won't be computed properly if wider
3678 than HOST_BITS_PER_WIDE_INT. */
3679
3680 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
3681 && GET_CODE (op1) == CONST_INT)
3682 return plus_constant (op0, INTVAL (op1));
3683 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
3684 && GET_CODE (op0) == CONST_INT)
3685 return plus_constant (op1, INTVAL (op0));
3686
3687 /* See if this is something like X * C - X or vice versa or
3688 if the multiplication is written as a shift. If so, we can
3689 distribute and make a new multiply, shift, or maybe just
3690 have X (if C is 2 in the example above). But don't make
3691 real multiply if we didn't have one before. */
3692
3693 if (! FLOAT_MODE_P (mode))
3694 {
3695 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
3696 rtx lhs = op0, rhs = op1;
3697 int had_mult = 0;
3698
3699 if (GET_CODE (lhs) == NEG)
3700 coeff0 = -1, lhs = XEXP (lhs, 0);
3701 else if (GET_CODE (lhs) == MULT
3702 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
3703 {
3704 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
3705 had_mult = 1;
3706 }
3707 else if (GET_CODE (lhs) == ASHIFT
3708 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
3709 && INTVAL (XEXP (lhs, 1)) >= 0
3710 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
3711 {
3712 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
3713 lhs = XEXP (lhs, 0);
3714 }
3715
3716 if (GET_CODE (rhs) == NEG)
3717 coeff1 = -1, rhs = XEXP (rhs, 0);
3718 else if (GET_CODE (rhs) == MULT
3719 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
3720 {
3721 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
3722 had_mult = 1;
3723 }
3724 else if (GET_CODE (rhs) == ASHIFT
3725 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
3726 && INTVAL (XEXP (rhs, 1)) >= 0
3727 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
3728 {
3729 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
3730 rhs = XEXP (rhs, 0);
3731 }
3732
3733 if (rtx_equal_p (lhs, rhs))
3734 {
3735 tem = cse_gen_binary (MULT, mode, lhs,
3736 GEN_INT (coeff0 + coeff1));
3737 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
3738 }
3739 }
3740
3741 /* If one of the operands is a PLUS or a MINUS, see if we can
3742 simplify this by the associative law.
3743 Don't use the associative law for floating point.
3744 The inaccuracy makes it nonassociative,
3745 and subtle programs can break if operations are associated. */
3746
3747 if (INTEGRAL_MODE_P (mode)
3748 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
3749 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS)
3750 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
3751 return tem;
3752 break;
3753
3754 case COMPARE:
3755 #ifdef HAVE_cc0
3756 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
3757 using cc0, in which case we want to leave it as a COMPARE
3758 so we can distinguish it from a register-register-copy.
3759
3760 In IEEE floating point, x-0 is not the same as x. */
3761
3762 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3763 || ! FLOAT_MODE_P (mode) || flag_fast_math)
3764 && op1 == CONST0_RTX (mode))
3765 return op0;
3766 #else
3767 /* Do nothing here. */
3768 #endif
3769 break;
3770
3771 case MINUS:
3772 /* None of these optimizations can be done for IEEE
3773 floating point. */
3774 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
3775 && FLOAT_MODE_P (mode) && ! flag_fast_math)
3776 break;
3777
3778 /* We can't assume x-x is 0 even with non-IEEE floating point,
3779 but since it is zero except in very strange circumstances, we
3780 will treat it as zero with -ffast-math. */
3781 if (rtx_equal_p (op0, op1)
3782 && ! side_effects_p (op0)
3783 && (! FLOAT_MODE_P (mode) || flag_fast_math))
3784 return CONST0_RTX (mode);
3785
3786 /* Change subtraction from zero into negation. */
3787 if (op0 == CONST0_RTX (mode))
3788 return gen_rtx (NEG, mode, op1);
3789
3790 /* (-1 - a) is ~a. */
3791 if (op0 == constm1_rtx)
3792 return gen_rtx (NOT, mode, op1);
3793
3794 /* Subtracting 0 has no effect. */
3795 if (op1 == CONST0_RTX (mode))
3796 return op0;
3797
3798 /* See if this is something like X * C - X or vice versa or
3799 if the multiplication is written as a shift. If so, we can
3800 distribute and make a new multiply, shift, or maybe just
3801 have X (if C is 2 in the example above). But don't make
3802 real multiply if we didn't have one before. */
3803
3804 if (! FLOAT_MODE_P (mode))
3805 {
3806 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
3807 rtx lhs = op0, rhs = op1;
3808 int had_mult = 0;
3809
3810 if (GET_CODE (lhs) == NEG)
3811 coeff0 = -1, lhs = XEXP (lhs, 0);
3812 else if (GET_CODE (lhs) == MULT
3813 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
3814 {
3815 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
3816 had_mult = 1;
3817 }
3818 else if (GET_CODE (lhs) == ASHIFT
3819 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
3820 && INTVAL (XEXP (lhs, 1)) >= 0
3821 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
3822 {
3823 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
3824 lhs = XEXP (lhs, 0);
3825 }
3826
3827 if (GET_CODE (rhs) == NEG)
3828 coeff1 = - 1, rhs = XEXP (rhs, 0);
3829 else if (GET_CODE (rhs) == MULT
3830 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
3831 {
3832 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
3833 had_mult = 1;
3834 }
3835 else if (GET_CODE (rhs) == ASHIFT
3836 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
3837 && INTVAL (XEXP (rhs, 1)) >= 0
3838 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
3839 {
3840 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
3841 rhs = XEXP (rhs, 0);
3842 }
3843
3844 if (rtx_equal_p (lhs, rhs))
3845 {
3846 tem = cse_gen_binary (MULT, mode, lhs,
3847 GEN_INT (coeff0 - coeff1));
3848 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
3849 }
3850 }
3851
3852 /* (a - (-b)) -> (a + b). */
3853 if (GET_CODE (op1) == NEG)
3854 return cse_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
3855
3856 /* If one of the operands is a PLUS or a MINUS, see if we can
3857 simplify this by the associative law.
3858 Don't use the associative law for floating point.
3859 The inaccuracy makes it nonassociative,
3860 and subtle programs can break if operations are associated. */
3861
3862 if (INTEGRAL_MODE_P (mode)
3863 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
3864 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS)
3865 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
3866 return tem;
3867
3868 /* Don't let a relocatable value get a negative coeff. */
3869 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
3870 return plus_constant (op0, - INTVAL (op1));
3871
3872 /* (x - (x & y)) -> (x & ~y) */
3873 if (GET_CODE (op1) == AND)
3874 {
3875 if (rtx_equal_p (op0, XEXP (op1, 0)))
3876 return cse_gen_binary (AND, mode, op0, gen_rtx (NOT, mode, XEXP (op1, 1)));
3877 if (rtx_equal_p (op0, XEXP (op1, 1)))
3878 return cse_gen_binary (AND, mode, op0, gen_rtx (NOT, mode, XEXP (op1, 0)));
3879 }
3880 break;
3881
3882 case MULT:
3883 if (op1 == constm1_rtx)
3884 {
3885 tem = simplify_unary_operation (NEG, mode, op0, mode);
3886
3887 return tem ? tem : gen_rtx (NEG, mode, op0);
3888 }
3889
3890 /* In IEEE floating point, x*0 is not always 0. */
3891 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3892 || ! FLOAT_MODE_P (mode) || flag_fast_math)
3893 && op1 == CONST0_RTX (mode)
3894 && ! side_effects_p (op0))
3895 return op1;
3896
3897 /* In IEEE floating point, x*1 is not equivalent to x for nans.
3898 However, ANSI says we can drop signals,
3899 so we can do this anyway. */
3900 if (op1 == CONST1_RTX (mode))
3901 return op0;
3902
3903 /* Convert multiply by constant power of two into shift unless
3904 we are still generating RTL. This test is a kludge. */
3905 if (GET_CODE (op1) == CONST_INT
3906 && (val = exact_log2 (INTVAL (op1))) >= 0
3907 /* If the mode is larger than the host word size, and the
3908 uppermost bit is set, then this isn't a power of two due
3909 to implicit sign extension. */
3910 && (width <= HOST_BITS_PER_WIDE_INT
3911 || val != HOST_BITS_PER_WIDE_INT - 1)
3912 && ! rtx_equal_function_value_matters)
3913 return gen_rtx (ASHIFT, mode, op0, GEN_INT (val));
3914
3915 if (GET_CODE (op1) == CONST_DOUBLE
3916 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_FLOAT)
3917 {
3918 REAL_VALUE_TYPE d;
3919 jmp_buf handler;
3920 int op1is2, op1ism1;
3921
3922 if (setjmp (handler))
3923 return 0;
3924
3925 set_float_handler (handler);
3926 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
3927 op1is2 = REAL_VALUES_EQUAL (d, dconst2);
3928 op1ism1 = REAL_VALUES_EQUAL (d, dconstm1);
3929 set_float_handler (NULL_PTR);
3930
3931 /* x*2 is x+x and x*(-1) is -x */
3932 if (op1is2 && GET_MODE (op0) == mode)
3933 return gen_rtx (PLUS, mode, op0, copy_rtx (op0));
3934
3935 else if (op1ism1 && GET_MODE (op0) == mode)
3936 return gen_rtx (NEG, mode, op0);
3937 }
3938 break;
3939
3940 case IOR:
3941 if (op1 == const0_rtx)
3942 return op0;
3943 if (GET_CODE (op1) == CONST_INT
3944 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
3945 return op1;
3946 if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
3947 return op0;
3948 /* A | (~A) -> -1 */
3949 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3950 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3951 && ! side_effects_p (op0)
3952 && GET_MODE_CLASS (mode) != MODE_CC)
3953 return constm1_rtx;
3954 break;
3955
3956 case XOR:
3957 if (op1 == const0_rtx)
3958 return op0;
3959 if (GET_CODE (op1) == CONST_INT
3960 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
3961 return gen_rtx (NOT, mode, op0);
3962 if (op0 == op1 && ! side_effects_p (op0)
3963 && GET_MODE_CLASS (mode) != MODE_CC)
3964 return const0_rtx;
3965 break;
3966
3967 case AND:
3968 if (op1 == const0_rtx && ! side_effects_p (op0))
3969 return const0_rtx;
3970 if (GET_CODE (op1) == CONST_INT
3971 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
3972 return op0;
3973 if (op0 == op1 && ! side_effects_p (op0)
3974 && GET_MODE_CLASS (mode) != MODE_CC)
3975 return op0;
3976 /* A & (~A) -> 0 */
3977 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3978 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3979 && ! side_effects_p (op0)
3980 && GET_MODE_CLASS (mode) != MODE_CC)
3981 return const0_rtx;
3982 break;
3983
3984 case UDIV:
3985 /* Convert divide by power of two into shift (divide by 1 handled
3986 below). */
3987 if (GET_CODE (op1) == CONST_INT
3988 && (arg1 = exact_log2 (INTVAL (op1))) > 0)
3989 return gen_rtx (LSHIFTRT, mode, op0, GEN_INT (arg1));
3990
3991 /* ... fall through ... */
3992
3993 case DIV:
3994 if (op1 == CONST1_RTX (mode))
3995 return op0;
3996
3997 /* In IEEE floating point, 0/x is not always 0. */
3998 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3999 || ! FLOAT_MODE_P (mode) || flag_fast_math)
4000 && op0 == CONST0_RTX (mode)
4001 && ! side_effects_p (op1))
4002 return op0;
4003
4004 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
4005 /* Change division by a constant into multiplication. Only do
4006 this with -ffast-math until an expert says it is safe in
4007 general. */
4008 else if (GET_CODE (op1) == CONST_DOUBLE
4009 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_FLOAT
4010 && op1 != CONST0_RTX (mode)
4011 && flag_fast_math)
4012 {
4013 REAL_VALUE_TYPE d;
4014 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
4015
4016 if (! REAL_VALUES_EQUAL (d, dconst0))
4017 {
4018 #if defined (REAL_ARITHMETIC)
4019 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
4020 return gen_rtx (MULT, mode, op0,
4021 CONST_DOUBLE_FROM_REAL_VALUE (d, mode));
4022 #else
4023 return gen_rtx (MULT, mode, op0,
4024 CONST_DOUBLE_FROM_REAL_VALUE (1./d, mode));
4025 #endif
4026 }
4027 }
4028 #endif
4029 break;
4030
4031 case UMOD:
4032 /* Handle modulus by power of two (mod with 1 handled below). */
4033 if (GET_CODE (op1) == CONST_INT
4034 && exact_log2 (INTVAL (op1)) > 0)
4035 return gen_rtx (AND, mode, op0, GEN_INT (INTVAL (op1) - 1));
4036
4037 /* ... fall through ... */
4038
4039 case MOD:
4040 if ((op0 == const0_rtx || op1 == const1_rtx)
4041 && ! side_effects_p (op0) && ! side_effects_p (op1))
4042 return const0_rtx;
4043 break;
4044
4045 case ROTATERT:
4046 case ROTATE:
4047 /* Rotating ~0 always results in ~0. */
4048 if (GET_CODE (op0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
4049 && INTVAL (op0) == GET_MODE_MASK (mode)
4050 && ! side_effects_p (op1))
4051 return op0;
4052
4053 /* ... fall through ... */
4054
4055 case ASHIFT:
4056 case ASHIFTRT:
4057 case LSHIFTRT:
4058 if (op1 == const0_rtx)
4059 return op0;
4060 if (op0 == const0_rtx && ! side_effects_p (op1))
4061 return op0;
4062 break;
4063
4064 case SMIN:
4065 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (op1) == CONST_INT
4066 && INTVAL (op1) == (HOST_WIDE_INT) 1 << (width -1)
4067 && ! side_effects_p (op0))
4068 return op1;
4069 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
4070 return op0;
4071 break;
4072
4073 case SMAX:
4074 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (op1) == CONST_INT
4075 && (INTVAL (op1)
4076 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
4077 && ! side_effects_p (op0))
4078 return op1;
4079 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
4080 return op0;
4081 break;
4082
4083 case UMIN:
4084 if (op1 == const0_rtx && ! side_effects_p (op0))
4085 return op1;
4086 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
4087 return op0;
4088 break;
4089
4090 case UMAX:
4091 if (op1 == constm1_rtx && ! side_effects_p (op0))
4092 return op1;
4093 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
4094 return op0;
4095 break;
4096
4097 default:
4098 abort ();
4099 }
4100
4101 return 0;
4102 }
4103
4104 /* Get the integer argument values in two forms:
4105 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
4106
4107 arg0 = INTVAL (op0);
4108 arg1 = INTVAL (op1);
4109
4110 if (width < HOST_BITS_PER_WIDE_INT)
4111 {
4112 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
4113 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
4114
4115 arg0s = arg0;
4116 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
4117 arg0s |= ((HOST_WIDE_INT) (-1) << width);
4118
4119 arg1s = arg1;
4120 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
4121 arg1s |= ((HOST_WIDE_INT) (-1) << width);
4122 }
4123 else
4124 {
4125 arg0s = arg0;
4126 arg1s = arg1;
4127 }
4128
4129 /* Compute the value of the arithmetic. */
4130
4131 switch (code)
4132 {
4133 case PLUS:
4134 val = arg0s + arg1s;
4135 break;
4136
4137 case MINUS:
4138 val = arg0s - arg1s;
4139 break;
4140
4141 case MULT:
4142 val = arg0s * arg1s;
4143 break;
4144
4145 case DIV:
4146 if (arg1s == 0)
4147 return 0;
4148 val = arg0s / arg1s;
4149 break;
4150
4151 case MOD:
4152 if (arg1s == 0)
4153 return 0;
4154 val = arg0s % arg1s;
4155 break;
4156
4157 case UDIV:
4158 if (arg1 == 0)
4159 return 0;
4160 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
4161 break;
4162
4163 case UMOD:
4164 if (arg1 == 0)
4165 return 0;
4166 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
4167 break;
4168
4169 case AND:
4170 val = arg0 & arg1;
4171 break;
4172
4173 case IOR:
4174 val = arg0 | arg1;
4175 break;
4176
4177 case XOR:
4178 val = arg0 ^ arg1;
4179 break;
4180
4181 case LSHIFTRT:
4182 /* If shift count is undefined, don't fold it; let the machine do
4183 what it wants. But truncate it if the machine will do that. */
4184 if (arg1 < 0)
4185 return 0;
4186
4187 #ifdef SHIFT_COUNT_TRUNCATED
4188 if (SHIFT_COUNT_TRUNCATED)
4189 arg1 %= width;
4190 #endif
4191
4192 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
4193 break;
4194
4195 case ASHIFT:
4196 if (arg1 < 0)
4197 return 0;
4198
4199 #ifdef SHIFT_COUNT_TRUNCATED
4200 if (SHIFT_COUNT_TRUNCATED)
4201 arg1 %= width;
4202 #endif
4203
4204 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
4205 break;
4206
4207 case ASHIFTRT:
4208 if (arg1 < 0)
4209 return 0;
4210
4211 #ifdef SHIFT_COUNT_TRUNCATED
4212 if (SHIFT_COUNT_TRUNCATED)
4213 arg1 %= width;
4214 #endif
4215
4216 val = arg0s >> arg1;
4217
4218 /* Bootstrap compiler may not have sign extended the right shift.
4219 Manually extend the sign to insure bootstrap cc matches gcc. */
4220 if (arg0s < 0 && arg1 > 0)
4221 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
4222
4223 break;
4224
4225 case ROTATERT:
4226 if (arg1 < 0)
4227 return 0;
4228
4229 arg1 %= width;
4230 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
4231 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
4232 break;
4233
4234 case ROTATE:
4235 if (arg1 < 0)
4236 return 0;
4237
4238 arg1 %= width;
4239 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
4240 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
4241 break;
4242
4243 case COMPARE:
4244 /* Do nothing here. */
4245 return 0;
4246
4247 case SMIN:
4248 val = arg0s <= arg1s ? arg0s : arg1s;
4249 break;
4250
4251 case UMIN:
4252 val = ((unsigned HOST_WIDE_INT) arg0
4253 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4254 break;
4255
4256 case SMAX:
4257 val = arg0s > arg1s ? arg0s : arg1s;
4258 break;
4259
4260 case UMAX:
4261 val = ((unsigned HOST_WIDE_INT) arg0
4262 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4263 break;
4264
4265 default:
4266 abort ();
4267 }
4268
4269 /* Clear the bits that don't belong in our mode, unless they and our sign
4270 bit are all one. So we get either a reasonable negative value or a
4271 reasonable unsigned value for this mode. */
4272 if (width < HOST_BITS_PER_WIDE_INT
4273 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4274 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4275 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4276
4277 /* If this would be an entire word for the target, but is not for
4278 the host, then sign-extend on the host so that the number will look
4279 the same way on the host that it would on the target.
4280
4281 For example, when building a 64 bit alpha hosted 32 bit sparc
4282 targeted compiler, then we want the 32 bit unsigned value -1 to be
4283 represented as a 64 bit value -1, and not as 0x00000000ffffffff.
4284 The later confuses the sparc backend. */
4285
4286 if (BITS_PER_WORD < HOST_BITS_PER_WIDE_INT && BITS_PER_WORD == width
4287 && (val & ((HOST_WIDE_INT) 1 << (width - 1))))
4288 val |= ((HOST_WIDE_INT) (-1) << width);
4289
4290 return GEN_INT (val);
4291 }
4292 \f
4293 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
4294 PLUS or MINUS.
4295
4296 Rather than test for specific case, we do this by a brute-force method
4297 and do all possible simplifications until no more changes occur. Then
4298 we rebuild the operation. */
4299
4300 static rtx
4301 simplify_plus_minus (code, mode, op0, op1)
4302 enum rtx_code code;
4303 enum machine_mode mode;
4304 rtx op0, op1;
4305 {
4306 rtx ops[8];
4307 int negs[8];
4308 rtx result, tem;
4309 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts = 0;
4310 int first = 1, negate = 0, changed;
4311 int i, j;
4312
4313 bzero ((char *) ops, sizeof ops);
4314
4315 /* Set up the two operands and then expand them until nothing has been
4316 changed. If we run out of room in our array, give up; this should
4317 almost never happen. */
4318
4319 ops[0] = op0, ops[1] = op1, negs[0] = 0, negs[1] = (code == MINUS);
4320
4321 changed = 1;
4322 while (changed)
4323 {
4324 changed = 0;
4325
4326 for (i = 0; i < n_ops; i++)
4327 switch (GET_CODE (ops[i]))
4328 {
4329 case PLUS:
4330 case MINUS:
4331 if (n_ops == 7)
4332 return 0;
4333
4334 ops[n_ops] = XEXP (ops[i], 1);
4335 negs[n_ops++] = GET_CODE (ops[i]) == MINUS ? !negs[i] : negs[i];
4336 ops[i] = XEXP (ops[i], 0);
4337 input_ops++;
4338 changed = 1;
4339 break;
4340
4341 case NEG:
4342 ops[i] = XEXP (ops[i], 0);
4343 negs[i] = ! negs[i];
4344 changed = 1;
4345 break;
4346
4347 case CONST:
4348 ops[i] = XEXP (ops[i], 0);
4349 input_consts++;
4350 changed = 1;
4351 break;
4352
4353 case NOT:
4354 /* ~a -> (-a - 1) */
4355 if (n_ops != 7)
4356 {
4357 ops[n_ops] = constm1_rtx;
4358 negs[n_ops++] = negs[i];
4359 ops[i] = XEXP (ops[i], 0);
4360 negs[i] = ! negs[i];
4361 changed = 1;
4362 }
4363 break;
4364
4365 case CONST_INT:
4366 if (negs[i])
4367 ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0, changed = 1;
4368 break;
4369 }
4370 }
4371
4372 /* If we only have two operands, we can't do anything. */
4373 if (n_ops <= 2)
4374 return 0;
4375
4376 /* Now simplify each pair of operands until nothing changes. The first
4377 time through just simplify constants against each other. */
4378
4379 changed = 1;
4380 while (changed)
4381 {
4382 changed = first;
4383
4384 for (i = 0; i < n_ops - 1; i++)
4385 for (j = i + 1; j < n_ops; j++)
4386 if (ops[i] != 0 && ops[j] != 0
4387 && (! first || (CONSTANT_P (ops[i]) && CONSTANT_P (ops[j]))))
4388 {
4389 rtx lhs = ops[i], rhs = ops[j];
4390 enum rtx_code ncode = PLUS;
4391
4392 if (negs[i] && ! negs[j])
4393 lhs = ops[j], rhs = ops[i], ncode = MINUS;
4394 else if (! negs[i] && negs[j])
4395 ncode = MINUS;
4396
4397 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4398 if (tem)
4399 {
4400 ops[i] = tem, ops[j] = 0;
4401 negs[i] = negs[i] && negs[j];
4402 if (GET_CODE (tem) == NEG)
4403 ops[i] = XEXP (tem, 0), negs[i] = ! negs[i];
4404
4405 if (GET_CODE (ops[i]) == CONST_INT && negs[i])
4406 ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0;
4407 changed = 1;
4408 }
4409 }
4410
4411 first = 0;
4412 }
4413
4414 /* Pack all the operands to the lower-numbered entries and give up if
4415 we didn't reduce the number of operands we had. Make sure we
4416 count a CONST as two operands. If we have the same number of
4417 operands, but have made more CONSTs than we had, this is also
4418 an improvement, so accept it. */
4419
4420 for (i = 0, j = 0; j < n_ops; j++)
4421 if (ops[j] != 0)
4422 {
4423 ops[i] = ops[j], negs[i++] = negs[j];
4424 if (GET_CODE (ops[j]) == CONST)
4425 n_consts++;
4426 }
4427
4428 if (i + n_consts > input_ops
4429 || (i + n_consts == input_ops && n_consts <= input_consts))
4430 return 0;
4431
4432 n_ops = i;
4433
4434 /* If we have a CONST_INT, put it last. */
4435 for (i = 0; i < n_ops - 1; i++)
4436 if (GET_CODE (ops[i]) == CONST_INT)
4437 {
4438 tem = ops[n_ops - 1], ops[n_ops - 1] = ops[i] , ops[i] = tem;
4439 j = negs[n_ops - 1], negs[n_ops - 1] = negs[i], negs[i] = j;
4440 }
4441
4442 /* Put a non-negated operand first. If there aren't any, make all
4443 operands positive and negate the whole thing later. */
4444 for (i = 0; i < n_ops && negs[i]; i++)
4445 ;
4446
4447 if (i == n_ops)
4448 {
4449 for (i = 0; i < n_ops; i++)
4450 negs[i] = 0;
4451 negate = 1;
4452 }
4453 else if (i != 0)
4454 {
4455 tem = ops[0], ops[0] = ops[i], ops[i] = tem;
4456 j = negs[0], negs[0] = negs[i], negs[i] = j;
4457 }
4458
4459 /* Now make the result by performing the requested operations. */
4460 result = ops[0];
4461 for (i = 1; i < n_ops; i++)
4462 result = cse_gen_binary (negs[i] ? MINUS : PLUS, mode, result, ops[i]);
4463
4464 return negate ? gen_rtx (NEG, mode, result) : result;
4465 }
4466 \f
4467 /* Make a binary operation by properly ordering the operands and
4468 seeing if the expression folds. */
4469
4470 static rtx
4471 cse_gen_binary (code, mode, op0, op1)
4472 enum rtx_code code;
4473 enum machine_mode mode;
4474 rtx op0, op1;
4475 {
4476 rtx tem;
4477
4478 /* Put complex operands first and constants second if commutative. */
4479 if (GET_RTX_CLASS (code) == 'c'
4480 && ((CONSTANT_P (op0) && GET_CODE (op1) != CONST_INT)
4481 || (GET_RTX_CLASS (GET_CODE (op0)) == 'o'
4482 && GET_RTX_CLASS (GET_CODE (op1)) != 'o')
4483 || (GET_CODE (op0) == SUBREG
4484 && GET_RTX_CLASS (GET_CODE (SUBREG_REG (op0))) == 'o'
4485 && GET_RTX_CLASS (GET_CODE (op1)) != 'o')))
4486 tem = op0, op0 = op1, op1 = tem;
4487
4488 /* If this simplifies, do it. */
4489 tem = simplify_binary_operation (code, mode, op0, op1);
4490
4491 if (tem)
4492 return tem;
4493
4494 /* Handle addition and subtraction of CONST_INT specially. Otherwise,
4495 just form the operation. */
4496
4497 if (code == PLUS && GET_CODE (op1) == CONST_INT
4498 && GET_MODE (op0) != VOIDmode)
4499 return plus_constant (op0, INTVAL (op1));
4500 else if (code == MINUS && GET_CODE (op1) == CONST_INT
4501 && GET_MODE (op0) != VOIDmode)
4502 return plus_constant (op0, - INTVAL (op1));
4503 else
4504 return gen_rtx (code, mode, op0, op1);
4505 }
4506 \f
4507 /* Like simplify_binary_operation except used for relational operators.
4508 MODE is the mode of the operands, not that of the result. If MODE
4509 is VOIDmode, both operands must also be VOIDmode and we compare the
4510 operands in "infinite precision".
4511
4512 If no simplification is possible, this function returns zero. Otherwise,
4513 it returns either const_true_rtx or const0_rtx. */
4514
4515 rtx
4516 simplify_relational_operation (code, mode, op0, op1)
4517 enum rtx_code code;
4518 enum machine_mode mode;
4519 rtx op0, op1;
4520 {
4521 int equal, op0lt, op0ltu, op1lt, op1ltu;
4522 rtx tem;
4523
4524 /* If op0 is a compare, extract the comparison arguments from it. */
4525 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4526 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
4527
4528 /* We can't simplify MODE_CC values since we don't know what the
4529 actual comparison is. */
4530 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC
4531 #ifdef HAVE_cc0
4532 || op0 == cc0_rtx
4533 #endif
4534 )
4535 return 0;
4536
4537 /* For integer comparisons of A and B maybe we can simplify A - B and can
4538 then simplify a comparison of that with zero. If A and B are both either
4539 a register or a CONST_INT, this can't help; testing for these cases will
4540 prevent infinite recursion here and speed things up.
4541
4542 If CODE is an unsigned comparison, then we can never do this optimization,
4543 because it gives an incorrect result if the subtraction wraps around zero.
4544 ANSI C defines unsigned operations such that they never overflow, and
4545 thus such cases can not be ignored. */
4546
4547 if (INTEGRAL_MODE_P (mode) && op1 != const0_rtx
4548 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == CONST_INT)
4549 && (GET_CODE (op1) == REG || GET_CODE (op1) == CONST_INT))
4550 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4551 && code != GTU && code != GEU && code != LTU && code != LEU)
4552 return simplify_relational_operation (signed_condition (code),
4553 mode, tem, const0_rtx);
4554
4555 /* For non-IEEE floating-point, if the two operands are equal, we know the
4556 result. */
4557 if (rtx_equal_p (op0, op1)
4558 && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
4559 || ! FLOAT_MODE_P (GET_MODE (op0)) || flag_fast_math))
4560 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
4561
4562 /* If the operands are floating-point constants, see if we can fold
4563 the result. */
4564 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
4565 else if (GET_CODE (op0) == CONST_DOUBLE && GET_CODE (op1) == CONST_DOUBLE
4566 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
4567 {
4568 REAL_VALUE_TYPE d0, d1;
4569 jmp_buf handler;
4570
4571 if (setjmp (handler))
4572 return 0;
4573
4574 set_float_handler (handler);
4575 REAL_VALUE_FROM_CONST_DOUBLE (d0, op0);
4576 REAL_VALUE_FROM_CONST_DOUBLE (d1, op1);
4577 equal = REAL_VALUES_EQUAL (d0, d1);
4578 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
4579 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
4580 set_float_handler (NULL_PTR);
4581 }
4582 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
4583
4584 /* Otherwise, see if the operands are both integers. */
4585 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4586 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
4587 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
4588 {
4589 int width = GET_MODE_BITSIZE (mode);
4590 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4591 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4592
4593 /* Get the two words comprising each integer constant. */
4594 if (GET_CODE (op0) == CONST_DOUBLE)
4595 {
4596 l0u = l0s = CONST_DOUBLE_LOW (op0);
4597 h0u = h0s = CONST_DOUBLE_HIGH (op0);
4598 }
4599 else
4600 {
4601 l0u = l0s = INTVAL (op0);
4602 h0u = 0, h0s = l0s < 0 ? -1 : 0;
4603 }
4604
4605 if (GET_CODE (op1) == CONST_DOUBLE)
4606 {
4607 l1u = l1s = CONST_DOUBLE_LOW (op1);
4608 h1u = h1s = CONST_DOUBLE_HIGH (op1);
4609 }
4610 else
4611 {
4612 l1u = l1s = INTVAL (op1);
4613 h1u = 0, h1s = l1s < 0 ? -1 : 0;
4614 }
4615
4616 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4617 we have to sign or zero-extend the values. */
4618 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4619 h0u = h1u = 0, h0s = l0s < 0 ? -1 : 0, h1s = l1s < 0 ? -1 : 0;
4620
4621 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4622 {
4623 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
4624 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
4625
4626 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
4627 l0s |= ((HOST_WIDE_INT) (-1) << width);
4628
4629 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
4630 l1s |= ((HOST_WIDE_INT) (-1) << width);
4631 }
4632
4633 equal = (h0u == h1u && l0u == l1u);
4634 op0lt = (h0s < h1s || (h0s == h1s && l0s < l1s));
4635 op1lt = (h1s < h0s || (h1s == h0s && l1s < l0s));
4636 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
4637 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
4638 }
4639
4640 /* Otherwise, there are some code-specific tests we can make. */
4641 else
4642 {
4643 switch (code)
4644 {
4645 case EQ:
4646 /* References to the frame plus a constant or labels cannot
4647 be zero, but a SYMBOL_REF can due to #pragma weak. */
4648 if (((NONZERO_BASE_PLUS_P (op0) && op1 == const0_rtx)
4649 || GET_CODE (op0) == LABEL_REF)
4650 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4651 /* On some machines, the ap reg can be 0 sometimes. */
4652 && op0 != arg_pointer_rtx
4653 #endif
4654 )
4655 return const0_rtx;
4656 break;
4657
4658 case NE:
4659 if (((NONZERO_BASE_PLUS_P (op0) && op1 == const0_rtx)
4660 || GET_CODE (op0) == LABEL_REF)
4661 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4662 && op0 != arg_pointer_rtx
4663 #endif
4664 )
4665 return const_true_rtx;
4666 break;
4667
4668 case GEU:
4669 /* Unsigned values are never negative. */
4670 if (op1 == const0_rtx)
4671 return const_true_rtx;
4672 break;
4673
4674 case LTU:
4675 if (op1 == const0_rtx)
4676 return const0_rtx;
4677 break;
4678
4679 case LEU:
4680 /* Unsigned values are never greater than the largest
4681 unsigned value. */
4682 if (GET_CODE (op1) == CONST_INT
4683 && INTVAL (op1) == GET_MODE_MASK (mode)
4684 && INTEGRAL_MODE_P (mode))
4685 return const_true_rtx;
4686 break;
4687
4688 case GTU:
4689 if (GET_CODE (op1) == CONST_INT
4690 && INTVAL (op1) == GET_MODE_MASK (mode)
4691 && INTEGRAL_MODE_P (mode))
4692 return const0_rtx;
4693 break;
4694 }
4695
4696 return 0;
4697 }
4698
4699 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
4700 as appropriate. */
4701 switch (code)
4702 {
4703 case EQ:
4704 return equal ? const_true_rtx : const0_rtx;
4705 case NE:
4706 return ! equal ? const_true_rtx : const0_rtx;
4707 case LT:
4708 return op0lt ? const_true_rtx : const0_rtx;
4709 case GT:
4710 return op1lt ? const_true_rtx : const0_rtx;
4711 case LTU:
4712 return op0ltu ? const_true_rtx : const0_rtx;
4713 case GTU:
4714 return op1ltu ? const_true_rtx : const0_rtx;
4715 case LE:
4716 return equal || op0lt ? const_true_rtx : const0_rtx;
4717 case GE:
4718 return equal || op1lt ? const_true_rtx : const0_rtx;
4719 case LEU:
4720 return equal || op0ltu ? const_true_rtx : const0_rtx;
4721 case GEU:
4722 return equal || op1ltu ? const_true_rtx : const0_rtx;
4723 }
4724
4725 abort ();
4726 }
4727 \f
4728 /* Simplify CODE, an operation with result mode MODE and three operands,
4729 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4730 a constant. Return 0 if no simplifications is possible. */
4731
4732 rtx
4733 simplify_ternary_operation (code, mode, op0_mode, op0, op1, op2)
4734 enum rtx_code code;
4735 enum machine_mode mode, op0_mode;
4736 rtx op0, op1, op2;
4737 {
4738 int width = GET_MODE_BITSIZE (mode);
4739
4740 /* VOIDmode means "infinite" precision. */
4741 if (width == 0)
4742 width = HOST_BITS_PER_WIDE_INT;
4743
4744 switch (code)
4745 {
4746 case SIGN_EXTRACT:
4747 case ZERO_EXTRACT:
4748 if (GET_CODE (op0) == CONST_INT
4749 && GET_CODE (op1) == CONST_INT
4750 && GET_CODE (op2) == CONST_INT
4751 && INTVAL (op1) + INTVAL (op2) <= GET_MODE_BITSIZE (op0_mode)
4752 && width <= HOST_BITS_PER_WIDE_INT)
4753 {
4754 /* Extracting a bit-field from a constant */
4755 HOST_WIDE_INT val = INTVAL (op0);
4756
4757 if (BITS_BIG_ENDIAN)
4758 val >>= (GET_MODE_BITSIZE (op0_mode)
4759 - INTVAL (op2) - INTVAL (op1));
4760 else
4761 val >>= INTVAL (op2);
4762
4763 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4764 {
4765 /* First zero-extend. */
4766 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4767 /* If desired, propagate sign bit. */
4768 if (code == SIGN_EXTRACT
4769 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4770 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4771 }
4772
4773 /* Clear the bits that don't belong in our mode,
4774 unless they and our sign bit are all one.
4775 So we get either a reasonable negative value or a reasonable
4776 unsigned value for this mode. */
4777 if (width < HOST_BITS_PER_WIDE_INT
4778 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4779 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4780 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4781
4782 return GEN_INT (val);
4783 }
4784 break;
4785
4786 case IF_THEN_ELSE:
4787 if (GET_CODE (op0) == CONST_INT)
4788 return op0 != const0_rtx ? op1 : op2;
4789 break;
4790
4791 default:
4792 abort ();
4793 }
4794
4795 return 0;
4796 }
4797 \f
4798 /* If X is a nontrivial arithmetic operation on an argument
4799 for which a constant value can be determined, return
4800 the result of operating on that value, as a constant.
4801 Otherwise, return X, possibly with one or more operands
4802 modified by recursive calls to this function.
4803
4804 If X is a register whose contents are known, we do NOT
4805 return those contents here. equiv_constant is called to
4806 perform that task.
4807
4808 INSN is the insn that we may be modifying. If it is 0, make a copy
4809 of X before modifying it. */
4810
4811 static rtx
4812 fold_rtx (x, insn)
4813 rtx x;
4814 rtx insn;
4815 {
4816 register enum rtx_code code;
4817 register enum machine_mode mode;
4818 register char *fmt;
4819 register int i;
4820 rtx new = 0;
4821 int copied = 0;
4822 int must_swap = 0;
4823
4824 /* Folded equivalents of first two operands of X. */
4825 rtx folded_arg0;
4826 rtx folded_arg1;
4827
4828 /* Constant equivalents of first three operands of X;
4829 0 when no such equivalent is known. */
4830 rtx const_arg0;
4831 rtx const_arg1;
4832 rtx const_arg2;
4833
4834 /* The mode of the first operand of X. We need this for sign and zero
4835 extends. */
4836 enum machine_mode mode_arg0;
4837
4838 if (x == 0)
4839 return x;
4840
4841 mode = GET_MODE (x);
4842 code = GET_CODE (x);
4843 switch (code)
4844 {
4845 case CONST:
4846 case CONST_INT:
4847 case CONST_DOUBLE:
4848 case SYMBOL_REF:
4849 case LABEL_REF:
4850 case REG:
4851 /* No use simplifying an EXPR_LIST
4852 since they are used only for lists of args
4853 in a function call's REG_EQUAL note. */
4854 case EXPR_LIST:
4855 return x;
4856
4857 #ifdef HAVE_cc0
4858 case CC0:
4859 return prev_insn_cc0;
4860 #endif
4861
4862 case PC:
4863 /* If the next insn is a CODE_LABEL followed by a jump table,
4864 PC's value is a LABEL_REF pointing to that label. That
4865 lets us fold switch statements on the Vax. */
4866 if (insn && GET_CODE (insn) == JUMP_INSN)
4867 {
4868 rtx next = next_nonnote_insn (insn);
4869
4870 if (next && GET_CODE (next) == CODE_LABEL
4871 && NEXT_INSN (next) != 0
4872 && GET_CODE (NEXT_INSN (next)) == JUMP_INSN
4873 && (GET_CODE (PATTERN (NEXT_INSN (next))) == ADDR_VEC
4874 || GET_CODE (PATTERN (NEXT_INSN (next))) == ADDR_DIFF_VEC))
4875 return gen_rtx (LABEL_REF, Pmode, next);
4876 }
4877 break;
4878
4879 case SUBREG:
4880 /* See if we previously assigned a constant value to this SUBREG. */
4881 if ((new = lookup_as_function (x, CONST_INT)) != 0
4882 || (new = lookup_as_function (x, CONST_DOUBLE)) != 0)
4883 return new;
4884
4885 /* If this is a paradoxical SUBREG, we have no idea what value the
4886 extra bits would have. However, if the operand is equivalent
4887 to a SUBREG whose operand is the same as our mode, and all the
4888 modes are within a word, we can just use the inner operand
4889 because these SUBREGs just say how to treat the register.
4890
4891 Similarly if we find an integer constant. */
4892
4893 if (GET_MODE_SIZE (mode) > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
4894 {
4895 enum machine_mode imode = GET_MODE (SUBREG_REG (x));
4896 struct table_elt *elt;
4897
4898 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
4899 && GET_MODE_SIZE (imode) <= UNITS_PER_WORD
4900 && (elt = lookup (SUBREG_REG (x), HASH (SUBREG_REG (x), imode),
4901 imode)) != 0)
4902 for (elt = elt->first_same_value;
4903 elt; elt = elt->next_same_value)
4904 {
4905 if (CONSTANT_P (elt->exp)
4906 && GET_MODE (elt->exp) == VOIDmode)
4907 return elt->exp;
4908
4909 if (GET_CODE (elt->exp) == SUBREG
4910 && GET_MODE (SUBREG_REG (elt->exp)) == mode
4911 && exp_equiv_p (elt->exp, elt->exp, 1, 0))
4912 return copy_rtx (SUBREG_REG (elt->exp));
4913 }
4914
4915 return x;
4916 }
4917
4918 /* Fold SUBREG_REG. If it changed, see if we can simplify the SUBREG.
4919 We might be able to if the SUBREG is extracting a single word in an
4920 integral mode or extracting the low part. */
4921
4922 folded_arg0 = fold_rtx (SUBREG_REG (x), insn);
4923 const_arg0 = equiv_constant (folded_arg0);
4924 if (const_arg0)
4925 folded_arg0 = const_arg0;
4926
4927 if (folded_arg0 != SUBREG_REG (x))
4928 {
4929 new = 0;
4930
4931 if (GET_MODE_CLASS (mode) == MODE_INT
4932 && GET_MODE_SIZE (mode) == UNITS_PER_WORD
4933 && GET_MODE (SUBREG_REG (x)) != VOIDmode)
4934 new = operand_subword (folded_arg0, SUBREG_WORD (x), 0,
4935 GET_MODE (SUBREG_REG (x)));
4936 if (new == 0 && subreg_lowpart_p (x))
4937 new = gen_lowpart_if_possible (mode, folded_arg0);
4938 if (new)
4939 return new;
4940 }
4941
4942 /* If this is a narrowing SUBREG and our operand is a REG, see if
4943 we can find an equivalence for REG that is an arithmetic operation
4944 in a wider mode where both operands are paradoxical SUBREGs
4945 from objects of our result mode. In that case, we couldn't report
4946 an equivalent value for that operation, since we don't know what the
4947 extra bits will be. But we can find an equivalence for this SUBREG
4948 by folding that operation is the narrow mode. This allows us to
4949 fold arithmetic in narrow modes when the machine only supports
4950 word-sized arithmetic.
4951
4952 Also look for a case where we have a SUBREG whose operand is the
4953 same as our result. If both modes are smaller than a word, we
4954 are simply interpreting a register in different modes and we
4955 can use the inner value. */
4956
4957 if (GET_CODE (folded_arg0) == REG
4958 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (folded_arg0))
4959 && subreg_lowpart_p (x))
4960 {
4961 struct table_elt *elt;
4962
4963 /* We can use HASH here since we know that canon_hash won't be
4964 called. */
4965 elt = lookup (folded_arg0,
4966 HASH (folded_arg0, GET_MODE (folded_arg0)),
4967 GET_MODE (folded_arg0));
4968
4969 if (elt)
4970 elt = elt->first_same_value;
4971
4972 for (; elt; elt = elt->next_same_value)
4973 {
4974 enum rtx_code eltcode = GET_CODE (elt->exp);
4975
4976 /* Just check for unary and binary operations. */
4977 if (GET_RTX_CLASS (GET_CODE (elt->exp)) == '1'
4978 && GET_CODE (elt->exp) != SIGN_EXTEND
4979 && GET_CODE (elt->exp) != ZERO_EXTEND
4980 && GET_CODE (XEXP (elt->exp, 0)) == SUBREG
4981 && GET_MODE (SUBREG_REG (XEXP (elt->exp, 0))) == mode)
4982 {
4983 rtx op0 = SUBREG_REG (XEXP (elt->exp, 0));
4984
4985 if (GET_CODE (op0) != REG && ! CONSTANT_P (op0))
4986 op0 = fold_rtx (op0, NULL_RTX);
4987
4988 op0 = equiv_constant (op0);
4989 if (op0)
4990 new = simplify_unary_operation (GET_CODE (elt->exp), mode,
4991 op0, mode);
4992 }
4993 else if ((GET_RTX_CLASS (GET_CODE (elt->exp)) == '2'
4994 || GET_RTX_CLASS (GET_CODE (elt->exp)) == 'c')
4995 && eltcode != DIV && eltcode != MOD
4996 && eltcode != UDIV && eltcode != UMOD
4997 && eltcode != ASHIFTRT && eltcode != LSHIFTRT
4998 && eltcode != ROTATE && eltcode != ROTATERT
4999 && ((GET_CODE (XEXP (elt->exp, 0)) == SUBREG
5000 && (GET_MODE (SUBREG_REG (XEXP (elt->exp, 0)))
5001 == mode))
5002 || CONSTANT_P (XEXP (elt->exp, 0)))
5003 && ((GET_CODE (XEXP (elt->exp, 1)) == SUBREG
5004 && (GET_MODE (SUBREG_REG (XEXP (elt->exp, 1)))
5005 == mode))
5006 || CONSTANT_P (XEXP (elt->exp, 1))))
5007 {
5008 rtx op0 = gen_lowpart_common (mode, XEXP (elt->exp, 0));
5009 rtx op1 = gen_lowpart_common (mode, XEXP (elt->exp, 1));
5010
5011 if (op0 && GET_CODE (op0) != REG && ! CONSTANT_P (op0))
5012 op0 = fold_rtx (op0, NULL_RTX);
5013
5014 if (op0)
5015 op0 = equiv_constant (op0);
5016
5017 if (op1 && GET_CODE (op1) != REG && ! CONSTANT_P (op1))
5018 op1 = fold_rtx (op1, NULL_RTX);
5019
5020 if (op1)
5021 op1 = equiv_constant (op1);
5022
5023 /* If we are looking for the low SImode part of
5024 (ashift:DI c (const_int 32)), it doesn't work
5025 to compute that in SImode, because a 32-bit shift
5026 in SImode is unpredictable. We know the value is 0. */
5027 if (op0 && op1
5028 && GET_CODE (elt->exp) == ASHIFT
5029 && GET_CODE (op1) == CONST_INT
5030 && INTVAL (op1) >= GET_MODE_BITSIZE (mode))
5031 {
5032 if (INTVAL (op1) < GET_MODE_BITSIZE (GET_MODE (elt->exp)))
5033
5034 /* If the count fits in the inner mode's width,
5035 but exceeds the outer mode's width,
5036 the value will get truncated to 0
5037 by the subreg. */
5038 new = const0_rtx;
5039 else
5040 /* If the count exceeds even the inner mode's width,
5041 don't fold this expression. */
5042 new = 0;
5043 }
5044 else if (op0 && op1)
5045 new = simplify_binary_operation (GET_CODE (elt->exp), mode,
5046 op0, op1);
5047 }
5048
5049 else if (GET_CODE (elt->exp) == SUBREG
5050 && GET_MODE (SUBREG_REG (elt->exp)) == mode
5051 && (GET_MODE_SIZE (GET_MODE (folded_arg0))
5052 <= UNITS_PER_WORD)
5053 && exp_equiv_p (elt->exp, elt->exp, 1, 0))
5054 new = copy_rtx (SUBREG_REG (elt->exp));
5055
5056 if (new)
5057 return new;
5058 }
5059 }
5060
5061 return x;
5062
5063 case NOT:
5064 case NEG:
5065 /* If we have (NOT Y), see if Y is known to be (NOT Z).
5066 If so, (NOT Y) simplifies to Z. Similarly for NEG. */
5067 new = lookup_as_function (XEXP (x, 0), code);
5068 if (new)
5069 return fold_rtx (copy_rtx (XEXP (new, 0)), insn);
5070 break;
5071
5072 case MEM:
5073 /* If we are not actually processing an insn, don't try to find the
5074 best address. Not only don't we care, but we could modify the
5075 MEM in an invalid way since we have no insn to validate against. */
5076 if (insn != 0)
5077 find_best_addr (insn, &XEXP (x, 0));
5078
5079 {
5080 /* Even if we don't fold in the insn itself,
5081 we can safely do so here, in hopes of getting a constant. */
5082 rtx addr = fold_rtx (XEXP (x, 0), NULL_RTX);
5083 rtx base = 0;
5084 HOST_WIDE_INT offset = 0;
5085
5086 if (GET_CODE (addr) == REG
5087 && REGNO_QTY_VALID_P (REGNO (addr))
5088 && GET_MODE (addr) == qty_mode[reg_qty[REGNO (addr)]]
5089 && qty_const[reg_qty[REGNO (addr)]] != 0)
5090 addr = qty_const[reg_qty[REGNO (addr)]];
5091
5092 /* If address is constant, split it into a base and integer offset. */
5093 if (GET_CODE (addr) == SYMBOL_REF || GET_CODE (addr) == LABEL_REF)
5094 base = addr;
5095 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
5096 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
5097 {
5098 base = XEXP (XEXP (addr, 0), 0);
5099 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
5100 }
5101 else if (GET_CODE (addr) == LO_SUM
5102 && GET_CODE (XEXP (addr, 1)) == SYMBOL_REF)
5103 base = XEXP (addr, 1);
5104
5105 /* If this is a constant pool reference, we can fold it into its
5106 constant to allow better value tracking. */
5107 if (base && GET_CODE (base) == SYMBOL_REF
5108 && CONSTANT_POOL_ADDRESS_P (base))
5109 {
5110 rtx constant = get_pool_constant (base);
5111 enum machine_mode const_mode = get_pool_mode (base);
5112 rtx new;
5113
5114 if (CONSTANT_P (constant) && GET_CODE (constant) != CONST_INT)
5115 constant_pool_entries_cost = COST (constant);
5116
5117 /* If we are loading the full constant, we have an equivalence. */
5118 if (offset == 0 && mode == const_mode)
5119 return constant;
5120
5121 /* If this actually isn't a constant (weird!), we can't do
5122 anything. Otherwise, handle the two most common cases:
5123 extracting a word from a multi-word constant, and extracting
5124 the low-order bits. Other cases don't seem common enough to
5125 worry about. */
5126 if (! CONSTANT_P (constant))
5127 return x;
5128
5129 if (GET_MODE_CLASS (mode) == MODE_INT
5130 && GET_MODE_SIZE (mode) == UNITS_PER_WORD
5131 && offset % UNITS_PER_WORD == 0
5132 && (new = operand_subword (constant,
5133 offset / UNITS_PER_WORD,
5134 0, const_mode)) != 0)
5135 return new;
5136
5137 if (((BYTES_BIG_ENDIAN
5138 && offset == GET_MODE_SIZE (GET_MODE (constant)) - 1)
5139 || (! BYTES_BIG_ENDIAN && offset == 0))
5140 && (new = gen_lowpart_if_possible (mode, constant)) != 0)
5141 return new;
5142 }
5143
5144 /* If this is a reference to a label at a known position in a jump
5145 table, we also know its value. */
5146 if (base && GET_CODE (base) == LABEL_REF)
5147 {
5148 rtx label = XEXP (base, 0);
5149 rtx table_insn = NEXT_INSN (label);
5150
5151 if (table_insn && GET_CODE (table_insn) == JUMP_INSN
5152 && GET_CODE (PATTERN (table_insn)) == ADDR_VEC)
5153 {
5154 rtx table = PATTERN (table_insn);
5155
5156 if (offset >= 0
5157 && (offset / GET_MODE_SIZE (GET_MODE (table))
5158 < XVECLEN (table, 0)))
5159 return XVECEXP (table, 0,
5160 offset / GET_MODE_SIZE (GET_MODE (table)));
5161 }
5162 if (table_insn && GET_CODE (table_insn) == JUMP_INSN
5163 && GET_CODE (PATTERN (table_insn)) == ADDR_DIFF_VEC)
5164 {
5165 rtx table = PATTERN (table_insn);
5166
5167 if (offset >= 0
5168 && (offset / GET_MODE_SIZE (GET_MODE (table))
5169 < XVECLEN (table, 1)))
5170 {
5171 offset /= GET_MODE_SIZE (GET_MODE (table));
5172 new = gen_rtx (MINUS, Pmode, XVECEXP (table, 1, offset),
5173 XEXP (table, 0));
5174
5175 if (GET_MODE (table) != Pmode)
5176 new = gen_rtx (TRUNCATE, GET_MODE (table), new);
5177
5178 /* Indicate this is a constant. This isn't a
5179 valid form of CONST, but it will only be used
5180 to fold the next insns and then discarded, so
5181 it should be safe. */
5182 return gen_rtx (CONST, GET_MODE (new), new);
5183 }
5184 }
5185 }
5186
5187 return x;
5188 }
5189
5190 case ASM_OPERANDS:
5191 for (i = XVECLEN (x, 3) - 1; i >= 0; i--)
5192 validate_change (insn, &XVECEXP (x, 3, i),
5193 fold_rtx (XVECEXP (x, 3, i), insn), 0);
5194 break;
5195 }
5196
5197 const_arg0 = 0;
5198 const_arg1 = 0;
5199 const_arg2 = 0;
5200 mode_arg0 = VOIDmode;
5201
5202 /* Try folding our operands.
5203 Then see which ones have constant values known. */
5204
5205 fmt = GET_RTX_FORMAT (code);
5206 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5207 if (fmt[i] == 'e')
5208 {
5209 rtx arg = XEXP (x, i);
5210 rtx folded_arg = arg, const_arg = 0;
5211 enum machine_mode mode_arg = GET_MODE (arg);
5212 rtx cheap_arg, expensive_arg;
5213 rtx replacements[2];
5214 int j;
5215
5216 /* Most arguments are cheap, so handle them specially. */
5217 switch (GET_CODE (arg))
5218 {
5219 case REG:
5220 /* This is the same as calling equiv_constant; it is duplicated
5221 here for speed. */
5222 if (REGNO_QTY_VALID_P (REGNO (arg))
5223 && qty_const[reg_qty[REGNO (arg)]] != 0
5224 && GET_CODE (qty_const[reg_qty[REGNO (arg)]]) != REG
5225 && GET_CODE (qty_const[reg_qty[REGNO (arg)]]) != PLUS)
5226 const_arg
5227 = gen_lowpart_if_possible (GET_MODE (arg),
5228 qty_const[reg_qty[REGNO (arg)]]);
5229 break;
5230
5231 case CONST:
5232 case CONST_INT:
5233 case SYMBOL_REF:
5234 case LABEL_REF:
5235 case CONST_DOUBLE:
5236 const_arg = arg;
5237 break;
5238
5239 #ifdef HAVE_cc0
5240 case CC0:
5241 folded_arg = prev_insn_cc0;
5242 mode_arg = prev_insn_cc0_mode;
5243 const_arg = equiv_constant (folded_arg);
5244 break;
5245 #endif
5246
5247 default:
5248 folded_arg = fold_rtx (arg, insn);
5249 const_arg = equiv_constant (folded_arg);
5250 }
5251
5252 /* For the first three operands, see if the operand
5253 is constant or equivalent to a constant. */
5254 switch (i)
5255 {
5256 case 0:
5257 folded_arg0 = folded_arg;
5258 const_arg0 = const_arg;
5259 mode_arg0 = mode_arg;
5260 break;
5261 case 1:
5262 folded_arg1 = folded_arg;
5263 const_arg1 = const_arg;
5264 break;
5265 case 2:
5266 const_arg2 = const_arg;
5267 break;
5268 }
5269
5270 /* Pick the least expensive of the folded argument and an
5271 equivalent constant argument. */
5272 if (const_arg == 0 || const_arg == folded_arg
5273 || COST (const_arg) > COST (folded_arg))
5274 cheap_arg = folded_arg, expensive_arg = const_arg;
5275 else
5276 cheap_arg = const_arg, expensive_arg = folded_arg;
5277
5278 /* Try to replace the operand with the cheapest of the two
5279 possibilities. If it doesn't work and this is either of the first
5280 two operands of a commutative operation, try swapping them.
5281 If THAT fails, try the more expensive, provided it is cheaper
5282 than what is already there. */
5283
5284 if (cheap_arg == XEXP (x, i))
5285 continue;
5286
5287 if (insn == 0 && ! copied)
5288 {
5289 x = copy_rtx (x);
5290 copied = 1;
5291 }
5292
5293 replacements[0] = cheap_arg, replacements[1] = expensive_arg;
5294 for (j = 0;
5295 j < 2 && replacements[j]
5296 && COST (replacements[j]) < COST (XEXP (x, i));
5297 j++)
5298 {
5299 if (validate_change (insn, &XEXP (x, i), replacements[j], 0))
5300 break;
5301
5302 if (code == NE || code == EQ || GET_RTX_CLASS (code) == 'c')
5303 {
5304 validate_change (insn, &XEXP (x, i), XEXP (x, 1 - i), 1);
5305 validate_change (insn, &XEXP (x, 1 - i), replacements[j], 1);
5306
5307 if (apply_change_group ())
5308 {
5309 /* Swap them back to be invalid so that this loop can
5310 continue and flag them to be swapped back later. */
5311 rtx tem;
5312
5313 tem = XEXP (x, 0); XEXP (x, 0) = XEXP (x, 1);
5314 XEXP (x, 1) = tem;
5315 must_swap = 1;
5316 break;
5317 }
5318 }
5319 }
5320 }
5321
5322 else if (fmt[i] == 'E')
5323 /* Don't try to fold inside of a vector of expressions.
5324 Doing nothing is harmless. */
5325 ;
5326
5327 /* If a commutative operation, place a constant integer as the second
5328 operand unless the first operand is also a constant integer. Otherwise,
5329 place any constant second unless the first operand is also a constant. */
5330
5331 if (code == EQ || code == NE || GET_RTX_CLASS (code) == 'c')
5332 {
5333 if (must_swap || (const_arg0
5334 && (const_arg1 == 0
5335 || (GET_CODE (const_arg0) == CONST_INT
5336 && GET_CODE (const_arg1) != CONST_INT))))
5337 {
5338 register rtx tem = XEXP (x, 0);
5339
5340 if (insn == 0 && ! copied)
5341 {
5342 x = copy_rtx (x);
5343 copied = 1;
5344 }
5345
5346 validate_change (insn, &XEXP (x, 0), XEXP (x, 1), 1);
5347 validate_change (insn, &XEXP (x, 1), tem, 1);
5348 if (apply_change_group ())
5349 {
5350 tem = const_arg0, const_arg0 = const_arg1, const_arg1 = tem;
5351 tem = folded_arg0, folded_arg0 = folded_arg1, folded_arg1 = tem;
5352 }
5353 }
5354 }
5355
5356 /* If X is an arithmetic operation, see if we can simplify it. */
5357
5358 switch (GET_RTX_CLASS (code))
5359 {
5360 case '1':
5361 {
5362 int is_const = 0;
5363
5364 /* We can't simplify extension ops unless we know the
5365 original mode. */
5366 if ((code == ZERO_EXTEND || code == SIGN_EXTEND)
5367 && mode_arg0 == VOIDmode)
5368 break;
5369
5370 /* If we had a CONST, strip it off and put it back later if we
5371 fold. */
5372 if (const_arg0 != 0 && GET_CODE (const_arg0) == CONST)
5373 is_const = 1, const_arg0 = XEXP (const_arg0, 0);
5374
5375 new = simplify_unary_operation (code, mode,
5376 const_arg0 ? const_arg0 : folded_arg0,
5377 mode_arg0);
5378 if (new != 0 && is_const)
5379 new = gen_rtx (CONST, mode, new);
5380 }
5381 break;
5382
5383 case '<':
5384 /* See what items are actually being compared and set FOLDED_ARG[01]
5385 to those values and CODE to the actual comparison code. If any are
5386 constant, set CONST_ARG0 and CONST_ARG1 appropriately. We needn't
5387 do anything if both operands are already known to be constant. */
5388
5389 if (const_arg0 == 0 || const_arg1 == 0)
5390 {
5391 struct table_elt *p0, *p1;
5392 rtx true = const_true_rtx, false = const0_rtx;
5393 enum machine_mode mode_arg1;
5394
5395 #ifdef FLOAT_STORE_FLAG_VALUE
5396 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5397 {
5398 true = CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE,
5399 mode);
5400 false = CONST0_RTX (mode);
5401 }
5402 #endif
5403
5404 code = find_comparison_args (code, &folded_arg0, &folded_arg1,
5405 &mode_arg0, &mode_arg1);
5406 const_arg0 = equiv_constant (folded_arg0);
5407 const_arg1 = equiv_constant (folded_arg1);
5408
5409 /* If the mode is VOIDmode or a MODE_CC mode, we don't know
5410 what kinds of things are being compared, so we can't do
5411 anything with this comparison. */
5412
5413 if (mode_arg0 == VOIDmode || GET_MODE_CLASS (mode_arg0) == MODE_CC)
5414 break;
5415
5416 /* If we do not now have two constants being compared, see
5417 if we can nevertheless deduce some things about the
5418 comparison. */
5419 if (const_arg0 == 0 || const_arg1 == 0)
5420 {
5421 /* Is FOLDED_ARG0 frame-pointer plus a constant? Or
5422 non-explicit constant? These aren't zero, but we
5423 don't know their sign. */
5424 if (const_arg1 == const0_rtx
5425 && (NONZERO_BASE_PLUS_P (folded_arg0)
5426 #if 0 /* Sad to say, on sysvr4, #pragma weak can make a symbol address
5427 come out as 0. */
5428 || GET_CODE (folded_arg0) == SYMBOL_REF
5429 #endif
5430 || GET_CODE (folded_arg0) == LABEL_REF
5431 || GET_CODE (folded_arg0) == CONST))
5432 {
5433 if (code == EQ)
5434 return false;
5435 else if (code == NE)
5436 return true;
5437 }
5438
5439 /* See if the two operands are the same. We don't do this
5440 for IEEE floating-point since we can't assume x == x
5441 since x might be a NaN. */
5442
5443 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
5444 || ! FLOAT_MODE_P (mode_arg0) || flag_fast_math)
5445 && (folded_arg0 == folded_arg1
5446 || (GET_CODE (folded_arg0) == REG
5447 && GET_CODE (folded_arg1) == REG
5448 && (reg_qty[REGNO (folded_arg0)]
5449 == reg_qty[REGNO (folded_arg1)]))
5450 || ((p0 = lookup (folded_arg0,
5451 (safe_hash (folded_arg0, mode_arg0)
5452 % NBUCKETS), mode_arg0))
5453 && (p1 = lookup (folded_arg1,
5454 (safe_hash (folded_arg1, mode_arg0)
5455 % NBUCKETS), mode_arg0))
5456 && p0->first_same_value == p1->first_same_value)))
5457 return ((code == EQ || code == LE || code == GE
5458 || code == LEU || code == GEU)
5459 ? true : false);
5460
5461 /* If FOLDED_ARG0 is a register, see if the comparison we are
5462 doing now is either the same as we did before or the reverse
5463 (we only check the reverse if not floating-point). */
5464 else if (GET_CODE (folded_arg0) == REG)
5465 {
5466 int qty = reg_qty[REGNO (folded_arg0)];
5467
5468 if (REGNO_QTY_VALID_P (REGNO (folded_arg0))
5469 && (comparison_dominates_p (qty_comparison_code[qty], code)
5470 || (comparison_dominates_p (qty_comparison_code[qty],
5471 reverse_condition (code))
5472 && ! FLOAT_MODE_P (mode_arg0)))
5473 && (rtx_equal_p (qty_comparison_const[qty], folded_arg1)
5474 || (const_arg1
5475 && rtx_equal_p (qty_comparison_const[qty],
5476 const_arg1))
5477 || (GET_CODE (folded_arg1) == REG
5478 && (reg_qty[REGNO (folded_arg1)]
5479 == qty_comparison_qty[qty]))))
5480 return (comparison_dominates_p (qty_comparison_code[qty],
5481 code)
5482 ? true : false);
5483 }
5484 }
5485 }
5486
5487 /* If we are comparing against zero, see if the first operand is
5488 equivalent to an IOR with a constant. If so, we may be able to
5489 determine the result of this comparison. */
5490
5491 if (const_arg1 == const0_rtx)
5492 {
5493 rtx y = lookup_as_function (folded_arg0, IOR);
5494 rtx inner_const;
5495
5496 if (y != 0
5497 && (inner_const = equiv_constant (XEXP (y, 1))) != 0
5498 && GET_CODE (inner_const) == CONST_INT
5499 && INTVAL (inner_const) != 0)
5500 {
5501 int sign_bitnum = GET_MODE_BITSIZE (mode_arg0) - 1;
5502 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5503 && (INTVAL (inner_const)
5504 & ((HOST_WIDE_INT) 1 << sign_bitnum)));
5505 rtx true = const_true_rtx, false = const0_rtx;
5506
5507 #ifdef FLOAT_STORE_FLAG_VALUE
5508 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5509 {
5510 true = CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE,
5511 mode);
5512 false = CONST0_RTX (mode);
5513 }
5514 #endif
5515
5516 switch (code)
5517 {
5518 case EQ:
5519 return false;
5520 case NE:
5521 return true;
5522 case LT: case LE:
5523 if (has_sign)
5524 return true;
5525 break;
5526 case GT: case GE:
5527 if (has_sign)
5528 return false;
5529 break;
5530 }
5531 }
5532 }
5533
5534 new = simplify_relational_operation (code, mode_arg0,
5535 const_arg0 ? const_arg0 : folded_arg0,
5536 const_arg1 ? const_arg1 : folded_arg1);
5537 #ifdef FLOAT_STORE_FLAG_VALUE
5538 if (new != 0 && GET_MODE_CLASS (mode) == MODE_FLOAT)
5539 new = ((new == const0_rtx) ? CONST0_RTX (mode)
5540 : CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE, mode));
5541 #endif
5542 break;
5543
5544 case '2':
5545 case 'c':
5546 switch (code)
5547 {
5548 case PLUS:
5549 /* If the second operand is a LABEL_REF, see if the first is a MINUS
5550 with that LABEL_REF as its second operand. If so, the result is
5551 the first operand of that MINUS. This handles switches with an
5552 ADDR_DIFF_VEC table. */
5553 if (const_arg1 && GET_CODE (const_arg1) == LABEL_REF)
5554 {
5555 rtx y
5556 = GET_CODE (folded_arg0) == MINUS ? folded_arg0
5557 : lookup_as_function (folded_arg0, MINUS);
5558
5559 if (y != 0 && GET_CODE (XEXP (y, 1)) == LABEL_REF
5560 && XEXP (XEXP (y, 1), 0) == XEXP (const_arg1, 0))
5561 return XEXP (y, 0);
5562
5563 /* Now try for a CONST of a MINUS like the above. */
5564 if ((y = (GET_CODE (folded_arg0) == CONST ? folded_arg0
5565 : lookup_as_function (folded_arg0, CONST))) != 0
5566 && GET_CODE (XEXP (y, 0)) == MINUS
5567 && GET_CODE (XEXP (XEXP (y, 0), 1)) == LABEL_REF
5568 && XEXP (XEXP (XEXP (y, 0),1), 0) == XEXP (const_arg1, 0))
5569 return XEXP (XEXP (y, 0), 0);
5570 }
5571
5572 /* Likewise if the operands are in the other order. */
5573 if (const_arg0 && GET_CODE (const_arg0) == LABEL_REF)
5574 {
5575 rtx y
5576 = GET_CODE (folded_arg1) == MINUS ? folded_arg1
5577 : lookup_as_function (folded_arg1, MINUS);
5578
5579 if (y != 0 && GET_CODE (XEXP (y, 1)) == LABEL_REF
5580 && XEXP (XEXP (y, 1), 0) == XEXP (const_arg0, 0))
5581 return XEXP (y, 0);
5582
5583 /* Now try for a CONST of a MINUS like the above. */
5584 if ((y = (GET_CODE (folded_arg1) == CONST ? folded_arg1
5585 : lookup_as_function (folded_arg1, CONST))) != 0
5586 && GET_CODE (XEXP (y, 0)) == MINUS
5587 && GET_CODE (XEXP (XEXP (y, 0), 1)) == LABEL_REF
5588 && XEXP (XEXP (XEXP (y, 0),1), 0) == XEXP (const_arg0, 0))
5589 return XEXP (XEXP (y, 0), 0);
5590 }
5591
5592 /* If second operand is a register equivalent to a negative
5593 CONST_INT, see if we can find a register equivalent to the
5594 positive constant. Make a MINUS if so. Don't do this for
5595 a non-negative constant since we might then alternate between
5596 chosing positive and negative constants. Having the positive
5597 constant previously-used is the more common case. Be sure
5598 the resulting constant is non-negative; if const_arg1 were
5599 the smallest negative number this would overflow: depending
5600 on the mode, this would either just be the same value (and
5601 hence not save anything) or be incorrect. */
5602 if (const_arg1 != 0 && GET_CODE (const_arg1) == CONST_INT
5603 && INTVAL (const_arg1) < 0
5604 && - INTVAL (const_arg1) >= 0
5605 && GET_CODE (folded_arg1) == REG)
5606 {
5607 rtx new_const = GEN_INT (- INTVAL (const_arg1));
5608 struct table_elt *p
5609 = lookup (new_const, safe_hash (new_const, mode) % NBUCKETS,
5610 mode);
5611
5612 if (p)
5613 for (p = p->first_same_value; p; p = p->next_same_value)
5614 if (GET_CODE (p->exp) == REG)
5615 return cse_gen_binary (MINUS, mode, folded_arg0,
5616 canon_reg (p->exp, NULL_RTX));
5617 }
5618 goto from_plus;
5619
5620 case MINUS:
5621 /* If we have (MINUS Y C), see if Y is known to be (PLUS Z C2).
5622 If so, produce (PLUS Z C2-C). */
5623 if (const_arg1 != 0 && GET_CODE (const_arg1) == CONST_INT)
5624 {
5625 rtx y = lookup_as_function (XEXP (x, 0), PLUS);
5626 if (y && GET_CODE (XEXP (y, 1)) == CONST_INT)
5627 return fold_rtx (plus_constant (copy_rtx (y),
5628 -INTVAL (const_arg1)),
5629 NULL_RTX);
5630 }
5631
5632 /* ... fall through ... */
5633
5634 from_plus:
5635 case SMIN: case SMAX: case UMIN: case UMAX:
5636 case IOR: case AND: case XOR:
5637 case MULT: case DIV: case UDIV:
5638 case ASHIFT: case LSHIFTRT: case ASHIFTRT:
5639 /* If we have (<op> <reg> <const_int>) for an associative OP and REG
5640 is known to be of similar form, we may be able to replace the
5641 operation with a combined operation. This may eliminate the
5642 intermediate operation if every use is simplified in this way.
5643 Note that the similar optimization done by combine.c only works
5644 if the intermediate operation's result has only one reference. */
5645
5646 if (GET_CODE (folded_arg0) == REG
5647 && const_arg1 && GET_CODE (const_arg1) == CONST_INT)
5648 {
5649 int is_shift
5650 = (code == ASHIFT || code == ASHIFTRT || code == LSHIFTRT);
5651 rtx y = lookup_as_function (folded_arg0, code);
5652 rtx inner_const;
5653 enum rtx_code associate_code;
5654 rtx new_const;
5655
5656 if (y == 0
5657 || 0 == (inner_const
5658 = equiv_constant (fold_rtx (XEXP (y, 1), 0)))
5659 || GET_CODE (inner_const) != CONST_INT
5660 /* If we have compiled a statement like
5661 "if (x == (x & mask1))", and now are looking at
5662 "x & mask2", we will have a case where the first operand
5663 of Y is the same as our first operand. Unless we detect
5664 this case, an infinite loop will result. */
5665 || XEXP (y, 0) == folded_arg0)
5666 break;
5667
5668 /* Don't associate these operations if they are a PLUS with the
5669 same constant and it is a power of two. These might be doable
5670 with a pre- or post-increment. Similarly for two subtracts of
5671 identical powers of two with post decrement. */
5672
5673 if (code == PLUS && INTVAL (const_arg1) == INTVAL (inner_const)
5674 && (0
5675 #if defined(HAVE_PRE_INCREMENT) || defined(HAVE_POST_INCREMENT)
5676 || exact_log2 (INTVAL (const_arg1)) >= 0
5677 #endif
5678 #if defined(HAVE_PRE_DECREMENT) || defined(HAVE_POST_DECREMENT)
5679 || exact_log2 (- INTVAL (const_arg1)) >= 0
5680 #endif
5681 ))
5682 break;
5683
5684 /* Compute the code used to compose the constants. For example,
5685 A/C1/C2 is A/(C1 * C2), so if CODE == DIV, we want MULT. */
5686
5687 associate_code
5688 = (code == MULT || code == DIV || code == UDIV ? MULT
5689 : is_shift || code == PLUS || code == MINUS ? PLUS : code);
5690
5691 new_const = simplify_binary_operation (associate_code, mode,
5692 const_arg1, inner_const);
5693
5694 if (new_const == 0)
5695 break;
5696
5697 /* If we are associating shift operations, don't let this
5698 produce a shift of the size of the object or larger.
5699 This could occur when we follow a sign-extend by a right
5700 shift on a machine that does a sign-extend as a pair
5701 of shifts. */
5702
5703 if (is_shift && GET_CODE (new_const) == CONST_INT
5704 && INTVAL (new_const) >= GET_MODE_BITSIZE (mode))
5705 {
5706 /* As an exception, we can turn an ASHIFTRT of this
5707 form into a shift of the number of bits - 1. */
5708 if (code == ASHIFTRT)
5709 new_const = GEN_INT (GET_MODE_BITSIZE (mode) - 1);
5710 else
5711 break;
5712 }
5713
5714 y = copy_rtx (XEXP (y, 0));
5715
5716 /* If Y contains our first operand (the most common way this
5717 can happen is if Y is a MEM), we would do into an infinite
5718 loop if we tried to fold it. So don't in that case. */
5719
5720 if (! reg_mentioned_p (folded_arg0, y))
5721 y = fold_rtx (y, insn);
5722
5723 return cse_gen_binary (code, mode, y, new_const);
5724 }
5725 }
5726
5727 new = simplify_binary_operation (code, mode,
5728 const_arg0 ? const_arg0 : folded_arg0,
5729 const_arg1 ? const_arg1 : folded_arg1);
5730 break;
5731
5732 case 'o':
5733 /* (lo_sum (high X) X) is simply X. */
5734 if (code == LO_SUM && const_arg0 != 0
5735 && GET_CODE (const_arg0) == HIGH
5736 && rtx_equal_p (XEXP (const_arg0, 0), const_arg1))
5737 return const_arg1;
5738 break;
5739
5740 case '3':
5741 case 'b':
5742 new = simplify_ternary_operation (code, mode, mode_arg0,
5743 const_arg0 ? const_arg0 : folded_arg0,
5744 const_arg1 ? const_arg1 : folded_arg1,
5745 const_arg2 ? const_arg2 : XEXP (x, 2));
5746 break;
5747 }
5748
5749 return new ? new : x;
5750 }
5751 \f
5752 /* Return a constant value currently equivalent to X.
5753 Return 0 if we don't know one. */
5754
5755 static rtx
5756 equiv_constant (x)
5757 rtx x;
5758 {
5759 if (GET_CODE (x) == REG
5760 && REGNO_QTY_VALID_P (REGNO (x))
5761 && qty_const[reg_qty[REGNO (x)]])
5762 x = gen_lowpart_if_possible (GET_MODE (x), qty_const[reg_qty[REGNO (x)]]);
5763
5764 if (x != 0 && CONSTANT_P (x))
5765 return x;
5766
5767 /* If X is a MEM, try to fold it outside the context of any insn to see if
5768 it might be equivalent to a constant. That handles the case where it
5769 is a constant-pool reference. Then try to look it up in the hash table
5770 in case it is something whose value we have seen before. */
5771
5772 if (GET_CODE (x) == MEM)
5773 {
5774 struct table_elt *elt;
5775
5776 x = fold_rtx (x, NULL_RTX);
5777 if (CONSTANT_P (x))
5778 return x;
5779
5780 elt = lookup (x, safe_hash (x, GET_MODE (x)) % NBUCKETS, GET_MODE (x));
5781 if (elt == 0)
5782 return 0;
5783
5784 for (elt = elt->first_same_value; elt; elt = elt->next_same_value)
5785 if (elt->is_const && CONSTANT_P (elt->exp))
5786 return elt->exp;
5787 }
5788
5789 return 0;
5790 }
5791 \f
5792 /* Assuming that X is an rtx (e.g., MEM, REG or SUBREG) for a fixed-point
5793 number, return an rtx (MEM, SUBREG, or CONST_INT) that refers to the
5794 least-significant part of X.
5795 MODE specifies how big a part of X to return.
5796
5797 If the requested operation cannot be done, 0 is returned.
5798
5799 This is similar to gen_lowpart in emit-rtl.c. */
5800
5801 rtx
5802 gen_lowpart_if_possible (mode, x)
5803 enum machine_mode mode;
5804 register rtx x;
5805 {
5806 rtx result = gen_lowpart_common (mode, x);
5807
5808 if (result)
5809 return result;
5810 else if (GET_CODE (x) == MEM)
5811 {
5812 /* This is the only other case we handle. */
5813 register int offset = 0;
5814 rtx new;
5815
5816 if (WORDS_BIG_ENDIAN)
5817 offset = (MAX (GET_MODE_SIZE (GET_MODE (x)), UNITS_PER_WORD)
5818 - MAX (GET_MODE_SIZE (mode), UNITS_PER_WORD));
5819 if (BYTES_BIG_ENDIAN)
5820 /* Adjust the address so that the address-after-the-data is
5821 unchanged. */
5822 offset -= (MIN (UNITS_PER_WORD, GET_MODE_SIZE (mode))
5823 - MIN (UNITS_PER_WORD, GET_MODE_SIZE (GET_MODE (x))));
5824 new = gen_rtx (MEM, mode, plus_constant (XEXP (x, 0), offset));
5825 if (! memory_address_p (mode, XEXP (new, 0)))
5826 return 0;
5827 MEM_VOLATILE_P (new) = MEM_VOLATILE_P (x);
5828 RTX_UNCHANGING_P (new) = RTX_UNCHANGING_P (x);
5829 MEM_IN_STRUCT_P (new) = MEM_IN_STRUCT_P (x);
5830 return new;
5831 }
5832 else
5833 return 0;
5834 }
5835 \f
5836 /* Given INSN, a jump insn, TAKEN indicates if we are following the "taken"
5837 branch. It will be zero if not.
5838
5839 In certain cases, this can cause us to add an equivalence. For example,
5840 if we are following the taken case of
5841 if (i == 2)
5842 we can add the fact that `i' and '2' are now equivalent.
5843
5844 In any case, we can record that this comparison was passed. If the same
5845 comparison is seen later, we will know its value. */
5846
5847 static void
5848 record_jump_equiv (insn, taken)
5849 rtx insn;
5850 int taken;
5851 {
5852 int cond_known_true;
5853 rtx op0, op1;
5854 enum machine_mode mode, mode0, mode1;
5855 int reversed_nonequality = 0;
5856 enum rtx_code code;
5857
5858 /* Ensure this is the right kind of insn. */
5859 if (! condjump_p (insn) || simplejump_p (insn))
5860 return;
5861
5862 /* See if this jump condition is known true or false. */
5863 if (taken)
5864 cond_known_true = (XEXP (SET_SRC (PATTERN (insn)), 2) == pc_rtx);
5865 else
5866 cond_known_true = (XEXP (SET_SRC (PATTERN (insn)), 1) == pc_rtx);
5867
5868 /* Get the type of comparison being done and the operands being compared.
5869 If we had to reverse a non-equality condition, record that fact so we
5870 know that it isn't valid for floating-point. */
5871 code = GET_CODE (XEXP (SET_SRC (PATTERN (insn)), 0));
5872 op0 = fold_rtx (XEXP (XEXP (SET_SRC (PATTERN (insn)), 0), 0), insn);
5873 op1 = fold_rtx (XEXP (XEXP (SET_SRC (PATTERN (insn)), 0), 1), insn);
5874
5875 code = find_comparison_args (code, &op0, &op1, &mode0, &mode1);
5876 if (! cond_known_true)
5877 {
5878 reversed_nonequality = (code != EQ && code != NE);
5879 code = reverse_condition (code);
5880 }
5881
5882 /* The mode is the mode of the non-constant. */
5883 mode = mode0;
5884 if (mode1 != VOIDmode)
5885 mode = mode1;
5886
5887 record_jump_cond (code, mode, op0, op1, reversed_nonequality);
5888 }
5889
5890 /* We know that comparison CODE applied to OP0 and OP1 in MODE is true.
5891 REVERSED_NONEQUALITY is nonzero if CODE had to be swapped.
5892 Make any useful entries we can with that information. Called from
5893 above function and called recursively. */
5894
5895 static void
5896 record_jump_cond (code, mode, op0, op1, reversed_nonequality)
5897 enum rtx_code code;
5898 enum machine_mode mode;
5899 rtx op0, op1;
5900 int reversed_nonequality;
5901 {
5902 unsigned op0_hash, op1_hash;
5903 int op0_in_memory, op0_in_struct, op1_in_memory, op1_in_struct;
5904 struct table_elt *op0_elt, *op1_elt;
5905
5906 /* If OP0 and OP1 are known equal, and either is a paradoxical SUBREG,
5907 we know that they are also equal in the smaller mode (this is also
5908 true for all smaller modes whether or not there is a SUBREG, but
5909 is not worth testing for with no SUBREG. */
5910
5911 /* Note that GET_MODE (op0) may not equal MODE. */
5912 if (code == EQ && GET_CODE (op0) == SUBREG
5913 && (GET_MODE_SIZE (GET_MODE (op0))
5914 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))))
5915 {
5916 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
5917 rtx tem = gen_lowpart_if_possible (inner_mode, op1);
5918
5919 record_jump_cond (code, mode, SUBREG_REG (op0),
5920 tem ? tem : gen_rtx (SUBREG, inner_mode, op1, 0),
5921 reversed_nonequality);
5922 }
5923
5924 if (code == EQ && GET_CODE (op1) == SUBREG
5925 && (GET_MODE_SIZE (GET_MODE (op1))
5926 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (op1)))))
5927 {
5928 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op1));
5929 rtx tem = gen_lowpart_if_possible (inner_mode, op0);
5930
5931 record_jump_cond (code, mode, SUBREG_REG (op1),
5932 tem ? tem : gen_rtx (SUBREG, inner_mode, op0, 0),
5933 reversed_nonequality);
5934 }
5935
5936 /* Similarly, if this is an NE comparison, and either is a SUBREG
5937 making a smaller mode, we know the whole thing is also NE. */
5938
5939 /* Note that GET_MODE (op0) may not equal MODE;
5940 if we test MODE instead, we can get an infinite recursion
5941 alternating between two modes each wider than MODE. */
5942
5943 if (code == NE && GET_CODE (op0) == SUBREG
5944 && subreg_lowpart_p (op0)
5945 && (GET_MODE_SIZE (GET_MODE (op0))
5946 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))))
5947 {
5948 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
5949 rtx tem = gen_lowpart_if_possible (inner_mode, op1);
5950
5951 record_jump_cond (code, mode, SUBREG_REG (op0),
5952 tem ? tem : gen_rtx (SUBREG, inner_mode, op1, 0),
5953 reversed_nonequality);
5954 }
5955
5956 if (code == NE && GET_CODE (op1) == SUBREG
5957 && subreg_lowpart_p (op1)
5958 && (GET_MODE_SIZE (GET_MODE (op1))
5959 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op1)))))
5960 {
5961 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op1));
5962 rtx tem = gen_lowpart_if_possible (inner_mode, op0);
5963
5964 record_jump_cond (code, mode, SUBREG_REG (op1),
5965 tem ? tem : gen_rtx (SUBREG, inner_mode, op0, 0),
5966 reversed_nonequality);
5967 }
5968
5969 /* Hash both operands. */
5970
5971 do_not_record = 0;
5972 hash_arg_in_memory = 0;
5973 hash_arg_in_struct = 0;
5974 op0_hash = HASH (op0, mode);
5975 op0_in_memory = hash_arg_in_memory;
5976 op0_in_struct = hash_arg_in_struct;
5977
5978 if (do_not_record)
5979 return;
5980
5981 do_not_record = 0;
5982 hash_arg_in_memory = 0;
5983 hash_arg_in_struct = 0;
5984 op1_hash = HASH (op1, mode);
5985 op1_in_memory = hash_arg_in_memory;
5986 op1_in_struct = hash_arg_in_struct;
5987
5988 if (do_not_record)
5989 return;
5990
5991 /* Look up both operands. */
5992 op0_elt = lookup (op0, op0_hash, mode);
5993 op1_elt = lookup (op1, op1_hash, mode);
5994
5995 /* If both operands are already equivalent or if they are not in the
5996 table but are identical, do nothing. */
5997 if ((op0_elt != 0 && op1_elt != 0
5998 && op0_elt->first_same_value == op1_elt->first_same_value)
5999 || op0 == op1 || rtx_equal_p (op0, op1))
6000 return;
6001
6002 /* If we aren't setting two things equal all we can do is save this
6003 comparison. Similarly if this is floating-point. In the latter
6004 case, OP1 might be zero and both -0.0 and 0.0 are equal to it.
6005 If we record the equality, we might inadvertently delete code
6006 whose intent was to change -0 to +0. */
6007
6008 if (code != EQ || FLOAT_MODE_P (GET_MODE (op0)))
6009 {
6010 /* If we reversed a floating-point comparison, if OP0 is not a
6011 register, or if OP1 is neither a register or constant, we can't
6012 do anything. */
6013
6014 if (GET_CODE (op1) != REG)
6015 op1 = equiv_constant (op1);
6016
6017 if ((reversed_nonequality && FLOAT_MODE_P (mode))
6018 || GET_CODE (op0) != REG || op1 == 0)
6019 return;
6020
6021 /* Put OP0 in the hash table if it isn't already. This gives it a
6022 new quantity number. */
6023 if (op0_elt == 0)
6024 {
6025 if (insert_regs (op0, NULL_PTR, 0))
6026 {
6027 rehash_using_reg (op0);
6028 op0_hash = HASH (op0, mode);
6029
6030 /* If OP0 is contained in OP1, this changes its hash code
6031 as well. Faster to rehash than to check, except
6032 for the simple case of a constant. */
6033 if (! CONSTANT_P (op1))
6034 op1_hash = HASH (op1,mode);
6035 }
6036
6037 op0_elt = insert (op0, NULL_PTR, op0_hash, mode);
6038 op0_elt->in_memory = op0_in_memory;
6039 op0_elt->in_struct = op0_in_struct;
6040 }
6041
6042 qty_comparison_code[reg_qty[REGNO (op0)]] = code;
6043 if (GET_CODE (op1) == REG)
6044 {
6045 /* Look it up again--in case op0 and op1 are the same. */
6046 op1_elt = lookup (op1, op1_hash, mode);
6047
6048 /* Put OP1 in the hash table so it gets a new quantity number. */
6049 if (op1_elt == 0)
6050 {
6051 if (insert_regs (op1, NULL_PTR, 0))
6052 {
6053 rehash_using_reg (op1);
6054 op1_hash = HASH (op1, mode);
6055 }
6056
6057 op1_elt = insert (op1, NULL_PTR, op1_hash, mode);
6058 op1_elt->in_memory = op1_in_memory;
6059 op1_elt->in_struct = op1_in_struct;
6060 }
6061
6062 qty_comparison_qty[reg_qty[REGNO (op0)]] = reg_qty[REGNO (op1)];
6063 qty_comparison_const[reg_qty[REGNO (op0)]] = 0;
6064 }
6065 else
6066 {
6067 qty_comparison_qty[reg_qty[REGNO (op0)]] = -1;
6068 qty_comparison_const[reg_qty[REGNO (op0)]] = op1;
6069 }
6070
6071 return;
6072 }
6073
6074 /* If either side is still missing an equivalence, make it now,
6075 then merge the equivalences. */
6076
6077 if (op0_elt == 0)
6078 {
6079 if (insert_regs (op0, NULL_PTR, 0))
6080 {
6081 rehash_using_reg (op0);
6082 op0_hash = HASH (op0, mode);
6083 }
6084
6085 op0_elt = insert (op0, NULL_PTR, op0_hash, mode);
6086 op0_elt->in_memory = op0_in_memory;
6087 op0_elt->in_struct = op0_in_struct;
6088 }
6089
6090 if (op1_elt == 0)
6091 {
6092 if (insert_regs (op1, NULL_PTR, 0))
6093 {
6094 rehash_using_reg (op1);
6095 op1_hash = HASH (op1, mode);
6096 }
6097
6098 op1_elt = insert (op1, NULL_PTR, op1_hash, mode);
6099 op1_elt->in_memory = op1_in_memory;
6100 op1_elt->in_struct = op1_in_struct;
6101 }
6102
6103 merge_equiv_classes (op0_elt, op1_elt);
6104 last_jump_equiv_class = op0_elt;
6105 }
6106 \f
6107 /* CSE processing for one instruction.
6108 First simplify sources and addresses of all assignments
6109 in the instruction, using previously-computed equivalents values.
6110 Then install the new sources and destinations in the table
6111 of available values.
6112
6113 If IN_LIBCALL_BLOCK is nonzero, don't record any equivalence made in
6114 the insn. */
6115
6116 /* Data on one SET contained in the instruction. */
6117
6118 struct set
6119 {
6120 /* The SET rtx itself. */
6121 rtx rtl;
6122 /* The SET_SRC of the rtx (the original value, if it is changing). */
6123 rtx src;
6124 /* The hash-table element for the SET_SRC of the SET. */
6125 struct table_elt *src_elt;
6126 /* Hash value for the SET_SRC. */
6127 unsigned src_hash;
6128 /* Hash value for the SET_DEST. */
6129 unsigned dest_hash;
6130 /* The SET_DEST, with SUBREG, etc., stripped. */
6131 rtx inner_dest;
6132 /* Place where the pointer to the INNER_DEST was found. */
6133 rtx *inner_dest_loc;
6134 /* Nonzero if the SET_SRC is in memory. */
6135 char src_in_memory;
6136 /* Nonzero if the SET_SRC is in a structure. */
6137 char src_in_struct;
6138 /* Nonzero if the SET_SRC contains something
6139 whose value cannot be predicted and understood. */
6140 char src_volatile;
6141 /* Original machine mode, in case it becomes a CONST_INT. */
6142 enum machine_mode mode;
6143 /* A constant equivalent for SET_SRC, if any. */
6144 rtx src_const;
6145 /* Hash value of constant equivalent for SET_SRC. */
6146 unsigned src_const_hash;
6147 /* Table entry for constant equivalent for SET_SRC, if any. */
6148 struct table_elt *src_const_elt;
6149 };
6150
6151 static void
6152 cse_insn (insn, in_libcall_block)
6153 rtx insn;
6154 int in_libcall_block;
6155 {
6156 register rtx x = PATTERN (insn);
6157 register int i;
6158 rtx tem;
6159 register int n_sets = 0;
6160
6161 /* Records what this insn does to set CC0. */
6162 rtx this_insn_cc0 = 0;
6163 enum machine_mode this_insn_cc0_mode;
6164 struct write_data writes_memory;
6165 static struct write_data init = {0, 0, 0, 0};
6166
6167 rtx src_eqv = 0;
6168 struct table_elt *src_eqv_elt = 0;
6169 int src_eqv_volatile;
6170 int src_eqv_in_memory;
6171 int src_eqv_in_struct;
6172 unsigned src_eqv_hash;
6173
6174 struct set *sets;
6175
6176 this_insn = insn;
6177 writes_memory = init;
6178
6179 /* Find all the SETs and CLOBBERs in this instruction.
6180 Record all the SETs in the array `set' and count them.
6181 Also determine whether there is a CLOBBER that invalidates
6182 all memory references, or all references at varying addresses. */
6183
6184 if (GET_CODE (insn) == CALL_INSN)
6185 {
6186 for (tem = CALL_INSN_FUNCTION_USAGE (insn); tem; tem = XEXP (tem, 1))
6187 if (GET_CODE (XEXP (tem, 0)) == CLOBBER)
6188 invalidate (SET_DEST (XEXP (tem, 0)), VOIDmode);
6189 }
6190
6191 if (GET_CODE (x) == SET)
6192 {
6193 sets = (struct set *) alloca (sizeof (struct set));
6194 sets[0].rtl = x;
6195
6196 /* Ignore SETs that are unconditional jumps.
6197 They never need cse processing, so this does not hurt.
6198 The reason is not efficiency but rather
6199 so that we can test at the end for instructions
6200 that have been simplified to unconditional jumps
6201 and not be misled by unchanged instructions
6202 that were unconditional jumps to begin with. */
6203 if (SET_DEST (x) == pc_rtx
6204 && GET_CODE (SET_SRC (x)) == LABEL_REF)
6205 ;
6206
6207 /* Don't count call-insns, (set (reg 0) (call ...)), as a set.
6208 The hard function value register is used only once, to copy to
6209 someplace else, so it isn't worth cse'ing (and on 80386 is unsafe)!
6210 Ensure we invalidate the destination register. On the 80386 no
6211 other code would invalidate it since it is a fixed_reg.
6212 We need not check the return of apply_change_group; see canon_reg. */
6213
6214 else if (GET_CODE (SET_SRC (x)) == CALL)
6215 {
6216 canon_reg (SET_SRC (x), insn);
6217 apply_change_group ();
6218 fold_rtx (SET_SRC (x), insn);
6219 invalidate (SET_DEST (x), VOIDmode);
6220 }
6221 else
6222 n_sets = 1;
6223 }
6224 else if (GET_CODE (x) == PARALLEL)
6225 {
6226 register int lim = XVECLEN (x, 0);
6227
6228 sets = (struct set *) alloca (lim * sizeof (struct set));
6229
6230 /* Find all regs explicitly clobbered in this insn,
6231 and ensure they are not replaced with any other regs
6232 elsewhere in this insn.
6233 When a reg that is clobbered is also used for input,
6234 we should presume that that is for a reason,
6235 and we should not substitute some other register
6236 which is not supposed to be clobbered.
6237 Therefore, this loop cannot be merged into the one below
6238 because a CALL may precede a CLOBBER and refer to the
6239 value clobbered. We must not let a canonicalization do
6240 anything in that case. */
6241 for (i = 0; i < lim; i++)
6242 {
6243 register rtx y = XVECEXP (x, 0, i);
6244 if (GET_CODE (y) == CLOBBER)
6245 {
6246 rtx clobbered = XEXP (y, 0);
6247
6248 if (GET_CODE (clobbered) == REG
6249 || GET_CODE (clobbered) == SUBREG)
6250 invalidate (clobbered, VOIDmode);
6251 else if (GET_CODE (clobbered) == STRICT_LOW_PART
6252 || GET_CODE (clobbered) == ZERO_EXTRACT)
6253 invalidate (XEXP (clobbered, 0), GET_MODE (clobbered));
6254 }
6255 }
6256
6257 for (i = 0; i < lim; i++)
6258 {
6259 register rtx y = XVECEXP (x, 0, i);
6260 if (GET_CODE (y) == SET)
6261 {
6262 /* As above, we ignore unconditional jumps and call-insns and
6263 ignore the result of apply_change_group. */
6264 if (GET_CODE (SET_SRC (y)) == CALL)
6265 {
6266 canon_reg (SET_SRC (y), insn);
6267 apply_change_group ();
6268 fold_rtx (SET_SRC (y), insn);
6269 invalidate (SET_DEST (y), VOIDmode);
6270 }
6271 else if (SET_DEST (y) == pc_rtx
6272 && GET_CODE (SET_SRC (y)) == LABEL_REF)
6273 ;
6274 else
6275 sets[n_sets++].rtl = y;
6276 }
6277 else if (GET_CODE (y) == CLOBBER)
6278 {
6279 /* If we clobber memory, take note of that,
6280 and canon the address.
6281 This does nothing when a register is clobbered
6282 because we have already invalidated the reg. */
6283 if (GET_CODE (XEXP (y, 0)) == MEM)
6284 {
6285 canon_reg (XEXP (y, 0), NULL_RTX);
6286 note_mem_written (XEXP (y, 0), &writes_memory);
6287 }
6288 }
6289 else if (GET_CODE (y) == USE
6290 && ! (GET_CODE (XEXP (y, 0)) == REG
6291 && REGNO (XEXP (y, 0)) < FIRST_PSEUDO_REGISTER))
6292 canon_reg (y, NULL_RTX);
6293 else if (GET_CODE (y) == CALL)
6294 {
6295 /* The result of apply_change_group can be ignored; see
6296 canon_reg. */
6297 canon_reg (y, insn);
6298 apply_change_group ();
6299 fold_rtx (y, insn);
6300 }
6301 }
6302 }
6303 else if (GET_CODE (x) == CLOBBER)
6304 {
6305 if (GET_CODE (XEXP (x, 0)) == MEM)
6306 {
6307 canon_reg (XEXP (x, 0), NULL_RTX);
6308 note_mem_written (XEXP (x, 0), &writes_memory);
6309 }
6310 }
6311
6312 /* Canonicalize a USE of a pseudo register or memory location. */
6313 else if (GET_CODE (x) == USE
6314 && ! (GET_CODE (XEXP (x, 0)) == REG
6315 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER))
6316 canon_reg (XEXP (x, 0), NULL_RTX);
6317 else if (GET_CODE (x) == CALL)
6318 {
6319 /* The result of apply_change_group can be ignored; see canon_reg. */
6320 canon_reg (x, insn);
6321 apply_change_group ();
6322 fold_rtx (x, insn);
6323 }
6324
6325 /* Store the equivalent value in SRC_EQV, if different, or if the DEST
6326 is a STRICT_LOW_PART. The latter condition is necessary because SRC_EQV
6327 is handled specially for this case, and if it isn't set, then there will
6328 be no equivalence for the destination. */
6329 if (n_sets == 1 && REG_NOTES (insn) != 0
6330 && (tem = find_reg_note (insn, REG_EQUAL, NULL_RTX)) != 0
6331 && (! rtx_equal_p (XEXP (tem, 0), SET_SRC (sets[0].rtl))
6332 || GET_CODE (SET_DEST (sets[0].rtl)) == STRICT_LOW_PART))
6333 src_eqv = canon_reg (XEXP (tem, 0), NULL_RTX);
6334
6335 /* Canonicalize sources and addresses of destinations.
6336 We do this in a separate pass to avoid problems when a MATCH_DUP is
6337 present in the insn pattern. In that case, we want to ensure that
6338 we don't break the duplicate nature of the pattern. So we will replace
6339 both operands at the same time. Otherwise, we would fail to find an
6340 equivalent substitution in the loop calling validate_change below.
6341
6342 We used to suppress canonicalization of DEST if it appears in SRC,
6343 but we don't do this any more. */
6344
6345 for (i = 0; i < n_sets; i++)
6346 {
6347 rtx dest = SET_DEST (sets[i].rtl);
6348 rtx src = SET_SRC (sets[i].rtl);
6349 rtx new = canon_reg (src, insn);
6350 int insn_code;
6351
6352 if ((GET_CODE (new) == REG && GET_CODE (src) == REG
6353 && ((REGNO (new) < FIRST_PSEUDO_REGISTER)
6354 != (REGNO (src) < FIRST_PSEUDO_REGISTER)))
6355 || (insn_code = recog_memoized (insn)) < 0
6356 || insn_n_dups[insn_code] > 0)
6357 validate_change (insn, &SET_SRC (sets[i].rtl), new, 1);
6358 else
6359 SET_SRC (sets[i].rtl) = new;
6360
6361 if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
6362 {
6363 validate_change (insn, &XEXP (dest, 1),
6364 canon_reg (XEXP (dest, 1), insn), 1);
6365 validate_change (insn, &XEXP (dest, 2),
6366 canon_reg (XEXP (dest, 2), insn), 1);
6367 }
6368
6369 while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART
6370 || GET_CODE (dest) == ZERO_EXTRACT
6371 || GET_CODE (dest) == SIGN_EXTRACT)
6372 dest = XEXP (dest, 0);
6373
6374 if (GET_CODE (dest) == MEM)
6375 canon_reg (dest, insn);
6376 }
6377
6378 /* Now that we have done all the replacements, we can apply the change
6379 group and see if they all work. Note that this will cause some
6380 canonicalizations that would have worked individually not to be applied
6381 because some other canonicalization didn't work, but this should not
6382 occur often.
6383
6384 The result of apply_change_group can be ignored; see canon_reg. */
6385
6386 apply_change_group ();
6387
6388 /* Set sets[i].src_elt to the class each source belongs to.
6389 Detect assignments from or to volatile things
6390 and set set[i] to zero so they will be ignored
6391 in the rest of this function.
6392
6393 Nothing in this loop changes the hash table or the register chains. */
6394
6395 for (i = 0; i < n_sets; i++)
6396 {
6397 register rtx src, dest;
6398 register rtx src_folded;
6399 register struct table_elt *elt = 0, *p;
6400 enum machine_mode mode;
6401 rtx src_eqv_here;
6402 rtx src_const = 0;
6403 rtx src_related = 0;
6404 struct table_elt *src_const_elt = 0;
6405 int src_cost = 10000, src_eqv_cost = 10000, src_folded_cost = 10000;
6406 int src_related_cost = 10000, src_elt_cost = 10000;
6407 /* Set non-zero if we need to call force_const_mem on with the
6408 contents of src_folded before using it. */
6409 int src_folded_force_flag = 0;
6410
6411 dest = SET_DEST (sets[i].rtl);
6412 src = SET_SRC (sets[i].rtl);
6413
6414 /* If SRC is a constant that has no machine mode,
6415 hash it with the destination's machine mode.
6416 This way we can keep different modes separate. */
6417
6418 mode = GET_MODE (src) == VOIDmode ? GET_MODE (dest) : GET_MODE (src);
6419 sets[i].mode = mode;
6420
6421 if (src_eqv)
6422 {
6423 enum machine_mode eqvmode = mode;
6424 if (GET_CODE (dest) == STRICT_LOW_PART)
6425 eqvmode = GET_MODE (SUBREG_REG (XEXP (dest, 0)));
6426 do_not_record = 0;
6427 hash_arg_in_memory = 0;
6428 hash_arg_in_struct = 0;
6429 src_eqv = fold_rtx (src_eqv, insn);
6430 src_eqv_hash = HASH (src_eqv, eqvmode);
6431
6432 /* Find the equivalence class for the equivalent expression. */
6433
6434 if (!do_not_record)
6435 src_eqv_elt = lookup (src_eqv, src_eqv_hash, eqvmode);
6436
6437 src_eqv_volatile = do_not_record;
6438 src_eqv_in_memory = hash_arg_in_memory;
6439 src_eqv_in_struct = hash_arg_in_struct;
6440 }
6441
6442 /* If this is a STRICT_LOW_PART assignment, src_eqv corresponds to the
6443 value of the INNER register, not the destination. So it is not
6444 a valid substitution for the source. But save it for later. */
6445 if (GET_CODE (dest) == STRICT_LOW_PART)
6446 src_eqv_here = 0;
6447 else
6448 src_eqv_here = src_eqv;
6449
6450 /* Simplify and foldable subexpressions in SRC. Then get the fully-
6451 simplified result, which may not necessarily be valid. */
6452 src_folded = fold_rtx (src, insn);
6453
6454 #if 0
6455 /* ??? This caused bad code to be generated for the m68k port with -O2.
6456 Suppose src is (CONST_INT -1), and that after truncation src_folded
6457 is (CONST_INT 3). Suppose src_folded is then used for src_const.
6458 At the end we will add src and src_const to the same equivalence
6459 class. We now have 3 and -1 on the same equivalence class. This
6460 causes later instructions to be mis-optimized. */
6461 /* If storing a constant in a bitfield, pre-truncate the constant
6462 so we will be able to record it later. */
6463 if (GET_CODE (SET_DEST (sets[i].rtl)) == ZERO_EXTRACT
6464 || GET_CODE (SET_DEST (sets[i].rtl)) == SIGN_EXTRACT)
6465 {
6466 rtx width = XEXP (SET_DEST (sets[i].rtl), 1);
6467
6468 if (GET_CODE (src) == CONST_INT
6469 && GET_CODE (width) == CONST_INT
6470 && INTVAL (width) < HOST_BITS_PER_WIDE_INT
6471 && (INTVAL (src) & ((HOST_WIDE_INT) (-1) << INTVAL (width))))
6472 src_folded
6473 = GEN_INT (INTVAL (src) & (((HOST_WIDE_INT) 1
6474 << INTVAL (width)) - 1));
6475 }
6476 #endif
6477
6478 /* Compute SRC's hash code, and also notice if it
6479 should not be recorded at all. In that case,
6480 prevent any further processing of this assignment. */
6481 do_not_record = 0;
6482 hash_arg_in_memory = 0;
6483 hash_arg_in_struct = 0;
6484
6485 sets[i].src = src;
6486 sets[i].src_hash = HASH (src, mode);
6487 sets[i].src_volatile = do_not_record;
6488 sets[i].src_in_memory = hash_arg_in_memory;
6489 sets[i].src_in_struct = hash_arg_in_struct;
6490
6491 /* If SRC is a MEM, there is a REG_EQUIV note for SRC, and DEST is
6492 a pseudo that is set more than once, do not record SRC. Using
6493 SRC as a replacement for anything else will be incorrect in that
6494 situation. Note that this usually occurs only for stack slots,
6495 in which case all the RTL would be refering to SRC, so we don't
6496 lose any optimization opportunities by not having SRC in the
6497 hash table. */
6498
6499 if (GET_CODE (src) == MEM
6500 && find_reg_note (insn, REG_EQUIV, src) != 0
6501 && GET_CODE (dest) == REG
6502 && REGNO (dest) >= FIRST_PSEUDO_REGISTER
6503 && REG_N_SETS (REGNO (dest)) != 1)
6504 sets[i].src_volatile = 1;
6505
6506 #if 0
6507 /* It is no longer clear why we used to do this, but it doesn't
6508 appear to still be needed. So let's try without it since this
6509 code hurts cse'ing widened ops. */
6510 /* If source is a perverse subreg (such as QI treated as an SI),
6511 treat it as volatile. It may do the work of an SI in one context
6512 where the extra bits are not being used, but cannot replace an SI
6513 in general. */
6514 if (GET_CODE (src) == SUBREG
6515 && (GET_MODE_SIZE (GET_MODE (src))
6516 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))))
6517 sets[i].src_volatile = 1;
6518 #endif
6519
6520 /* Locate all possible equivalent forms for SRC. Try to replace
6521 SRC in the insn with each cheaper equivalent.
6522
6523 We have the following types of equivalents: SRC itself, a folded
6524 version, a value given in a REG_EQUAL note, or a value related
6525 to a constant.
6526
6527 Each of these equivalents may be part of an additional class
6528 of equivalents (if more than one is in the table, they must be in
6529 the same class; we check for this).
6530
6531 If the source is volatile, we don't do any table lookups.
6532
6533 We note any constant equivalent for possible later use in a
6534 REG_NOTE. */
6535
6536 if (!sets[i].src_volatile)
6537 elt = lookup (src, sets[i].src_hash, mode);
6538
6539 sets[i].src_elt = elt;
6540
6541 if (elt && src_eqv_here && src_eqv_elt)
6542 {
6543 if (elt->first_same_value != src_eqv_elt->first_same_value)
6544 {
6545 /* The REG_EQUAL is indicating that two formerly distinct
6546 classes are now equivalent. So merge them. */
6547 merge_equiv_classes (elt, src_eqv_elt);
6548 src_eqv_hash = HASH (src_eqv, elt->mode);
6549 src_eqv_elt = lookup (src_eqv, src_eqv_hash, elt->mode);
6550 }
6551
6552 src_eqv_here = 0;
6553 }
6554
6555 else if (src_eqv_elt)
6556 elt = src_eqv_elt;
6557
6558 /* Try to find a constant somewhere and record it in `src_const'.
6559 Record its table element, if any, in `src_const_elt'. Look in
6560 any known equivalences first. (If the constant is not in the
6561 table, also set `sets[i].src_const_hash'). */
6562 if (elt)
6563 for (p = elt->first_same_value; p; p = p->next_same_value)
6564 if (p->is_const)
6565 {
6566 src_const = p->exp;
6567 src_const_elt = elt;
6568 break;
6569 }
6570
6571 if (src_const == 0
6572 && (CONSTANT_P (src_folded)
6573 /* Consider (minus (label_ref L1) (label_ref L2)) as
6574 "constant" here so we will record it. This allows us
6575 to fold switch statements when an ADDR_DIFF_VEC is used. */
6576 || (GET_CODE (src_folded) == MINUS
6577 && GET_CODE (XEXP (src_folded, 0)) == LABEL_REF
6578 && GET_CODE (XEXP (src_folded, 1)) == LABEL_REF)))
6579 src_const = src_folded, src_const_elt = elt;
6580 else if (src_const == 0 && src_eqv_here && CONSTANT_P (src_eqv_here))
6581 src_const = src_eqv_here, src_const_elt = src_eqv_elt;
6582
6583 /* If we don't know if the constant is in the table, get its
6584 hash code and look it up. */
6585 if (src_const && src_const_elt == 0)
6586 {
6587 sets[i].src_const_hash = HASH (src_const, mode);
6588 src_const_elt = lookup (src_const, sets[i].src_const_hash, mode);
6589 }
6590
6591 sets[i].src_const = src_const;
6592 sets[i].src_const_elt = src_const_elt;
6593
6594 /* If the constant and our source are both in the table, mark them as
6595 equivalent. Otherwise, if a constant is in the table but the source
6596 isn't, set ELT to it. */
6597 if (src_const_elt && elt
6598 && src_const_elt->first_same_value != elt->first_same_value)
6599 merge_equiv_classes (elt, src_const_elt);
6600 else if (src_const_elt && elt == 0)
6601 elt = src_const_elt;
6602
6603 /* See if there is a register linearly related to a constant
6604 equivalent of SRC. */
6605 if (src_const
6606 && (GET_CODE (src_const) == CONST
6607 || (src_const_elt && src_const_elt->related_value != 0)))
6608 {
6609 src_related = use_related_value (src_const, src_const_elt);
6610 if (src_related)
6611 {
6612 struct table_elt *src_related_elt
6613 = lookup (src_related, HASH (src_related, mode), mode);
6614 if (src_related_elt && elt)
6615 {
6616 if (elt->first_same_value
6617 != src_related_elt->first_same_value)
6618 /* This can occur when we previously saw a CONST
6619 involving a SYMBOL_REF and then see the SYMBOL_REF
6620 twice. Merge the involved classes. */
6621 merge_equiv_classes (elt, src_related_elt);
6622
6623 src_related = 0;
6624 src_related_elt = 0;
6625 }
6626 else if (src_related_elt && elt == 0)
6627 elt = src_related_elt;
6628 }
6629 }
6630
6631 /* See if we have a CONST_INT that is already in a register in a
6632 wider mode. */
6633
6634 if (src_const && src_related == 0 && GET_CODE (src_const) == CONST_INT
6635 && GET_MODE_CLASS (mode) == MODE_INT
6636 && GET_MODE_BITSIZE (mode) < BITS_PER_WORD)
6637 {
6638 enum machine_mode wider_mode;
6639
6640 for (wider_mode = GET_MODE_WIDER_MODE (mode);
6641 GET_MODE_BITSIZE (wider_mode) <= BITS_PER_WORD
6642 && src_related == 0;
6643 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
6644 {
6645 struct table_elt *const_elt
6646 = lookup (src_const, HASH (src_const, wider_mode), wider_mode);
6647
6648 if (const_elt == 0)
6649 continue;
6650
6651 for (const_elt = const_elt->first_same_value;
6652 const_elt; const_elt = const_elt->next_same_value)
6653 if (GET_CODE (const_elt->exp) == REG)
6654 {
6655 src_related = gen_lowpart_if_possible (mode,
6656 const_elt->exp);
6657 break;
6658 }
6659 }
6660 }
6661
6662 /* Another possibility is that we have an AND with a constant in
6663 a mode narrower than a word. If so, it might have been generated
6664 as part of an "if" which would narrow the AND. If we already
6665 have done the AND in a wider mode, we can use a SUBREG of that
6666 value. */
6667
6668 if (flag_expensive_optimizations && ! src_related
6669 && GET_CODE (src) == AND && GET_CODE (XEXP (src, 1)) == CONST_INT
6670 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
6671 {
6672 enum machine_mode tmode;
6673 rtx new_and = gen_rtx (AND, VOIDmode, NULL_RTX, XEXP (src, 1));
6674
6675 for (tmode = GET_MODE_WIDER_MODE (mode);
6676 GET_MODE_SIZE (tmode) <= UNITS_PER_WORD;
6677 tmode = GET_MODE_WIDER_MODE (tmode))
6678 {
6679 rtx inner = gen_lowpart_if_possible (tmode, XEXP (src, 0));
6680 struct table_elt *larger_elt;
6681
6682 if (inner)
6683 {
6684 PUT_MODE (new_and, tmode);
6685 XEXP (new_and, 0) = inner;
6686 larger_elt = lookup (new_and, HASH (new_and, tmode), tmode);
6687 if (larger_elt == 0)
6688 continue;
6689
6690 for (larger_elt = larger_elt->first_same_value;
6691 larger_elt; larger_elt = larger_elt->next_same_value)
6692 if (GET_CODE (larger_elt->exp) == REG)
6693 {
6694 src_related
6695 = gen_lowpart_if_possible (mode, larger_elt->exp);
6696 break;
6697 }
6698
6699 if (src_related)
6700 break;
6701 }
6702 }
6703 }
6704
6705 #ifdef LOAD_EXTEND_OP
6706 /* See if a MEM has already been loaded with a widening operation;
6707 if it has, we can use a subreg of that. Many CISC machines
6708 also have such operations, but this is only likely to be
6709 beneficial these machines. */
6710
6711 if (flag_expensive_optimizations && src_related == 0
6712 && (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
6713 && GET_MODE_CLASS (mode) == MODE_INT
6714 && GET_CODE (src) == MEM && ! do_not_record
6715 && LOAD_EXTEND_OP (mode) != NIL)
6716 {
6717 enum machine_mode tmode;
6718
6719 /* Set what we are trying to extend and the operation it might
6720 have been extended with. */
6721 PUT_CODE (memory_extend_rtx, LOAD_EXTEND_OP (mode));
6722 XEXP (memory_extend_rtx, 0) = src;
6723
6724 for (tmode = GET_MODE_WIDER_MODE (mode);
6725 GET_MODE_SIZE (tmode) <= UNITS_PER_WORD;
6726 tmode = GET_MODE_WIDER_MODE (tmode))
6727 {
6728 struct table_elt *larger_elt;
6729
6730 PUT_MODE (memory_extend_rtx, tmode);
6731 larger_elt = lookup (memory_extend_rtx,
6732 HASH (memory_extend_rtx, tmode), tmode);
6733 if (larger_elt == 0)
6734 continue;
6735
6736 for (larger_elt = larger_elt->first_same_value;
6737 larger_elt; larger_elt = larger_elt->next_same_value)
6738 if (GET_CODE (larger_elt->exp) == REG)
6739 {
6740 src_related = gen_lowpart_if_possible (mode,
6741 larger_elt->exp);
6742 break;
6743 }
6744
6745 if (src_related)
6746 break;
6747 }
6748 }
6749 #endif /* LOAD_EXTEND_OP */
6750
6751 if (src == src_folded)
6752 src_folded = 0;
6753
6754 /* At this point, ELT, if non-zero, points to a class of expressions
6755 equivalent to the source of this SET and SRC, SRC_EQV, SRC_FOLDED,
6756 and SRC_RELATED, if non-zero, each contain additional equivalent
6757 expressions. Prune these latter expressions by deleting expressions
6758 already in the equivalence class.
6759
6760 Check for an equivalent identical to the destination. If found,
6761 this is the preferred equivalent since it will likely lead to
6762 elimination of the insn. Indicate this by placing it in
6763 `src_related'. */
6764
6765 if (elt) elt = elt->first_same_value;
6766 for (p = elt; p; p = p->next_same_value)
6767 {
6768 enum rtx_code code = GET_CODE (p->exp);
6769
6770 /* If the expression is not valid, ignore it. Then we do not
6771 have to check for validity below. In most cases, we can use
6772 `rtx_equal_p', since canonicalization has already been done. */
6773 if (code != REG && ! exp_equiv_p (p->exp, p->exp, 1, 0))
6774 continue;
6775
6776 if (src && GET_CODE (src) == code && rtx_equal_p (src, p->exp))
6777 src = 0;
6778 else if (src_folded && GET_CODE (src_folded) == code
6779 && rtx_equal_p (src_folded, p->exp))
6780 src_folded = 0;
6781 else if (src_eqv_here && GET_CODE (src_eqv_here) == code
6782 && rtx_equal_p (src_eqv_here, p->exp))
6783 src_eqv_here = 0;
6784 else if (src_related && GET_CODE (src_related) == code
6785 && rtx_equal_p (src_related, p->exp))
6786 src_related = 0;
6787
6788 /* This is the same as the destination of the insns, we want
6789 to prefer it. Copy it to src_related. The code below will
6790 then give it a negative cost. */
6791 if (GET_CODE (dest) == code && rtx_equal_p (p->exp, dest))
6792 src_related = dest;
6793
6794 }
6795
6796 /* Find the cheapest valid equivalent, trying all the available
6797 possibilities. Prefer items not in the hash table to ones
6798 that are when they are equal cost. Note that we can never
6799 worsen an insn as the current contents will also succeed.
6800 If we find an equivalent identical to the destination, use it as best,
6801 since this insn will probably be eliminated in that case. */
6802 if (src)
6803 {
6804 if (rtx_equal_p (src, dest))
6805 src_cost = -1;
6806 else
6807 src_cost = COST (src);
6808 }
6809
6810 if (src_eqv_here)
6811 {
6812 if (rtx_equal_p (src_eqv_here, dest))
6813 src_eqv_cost = -1;
6814 else
6815 src_eqv_cost = COST (src_eqv_here);
6816 }
6817
6818 if (src_folded)
6819 {
6820 if (rtx_equal_p (src_folded, dest))
6821 src_folded_cost = -1;
6822 else
6823 src_folded_cost = COST (src_folded);
6824 }
6825
6826 if (src_related)
6827 {
6828 if (rtx_equal_p (src_related, dest))
6829 src_related_cost = -1;
6830 else
6831 src_related_cost = COST (src_related);
6832 }
6833
6834 /* If this was an indirect jump insn, a known label will really be
6835 cheaper even though it looks more expensive. */
6836 if (dest == pc_rtx && src_const && GET_CODE (src_const) == LABEL_REF)
6837 src_folded = src_const, src_folded_cost = -1;
6838
6839 /* Terminate loop when replacement made. This must terminate since
6840 the current contents will be tested and will always be valid. */
6841 while (1)
6842 {
6843 rtx trial;
6844
6845 /* Skip invalid entries. */
6846 while (elt && GET_CODE (elt->exp) != REG
6847 && ! exp_equiv_p (elt->exp, elt->exp, 1, 0))
6848 elt = elt->next_same_value;
6849
6850 if (elt) src_elt_cost = elt->cost;
6851
6852 /* Find cheapest and skip it for the next time. For items
6853 of equal cost, use this order:
6854 src_folded, src, src_eqv, src_related and hash table entry. */
6855 if (src_folded_cost <= src_cost
6856 && src_folded_cost <= src_eqv_cost
6857 && src_folded_cost <= src_related_cost
6858 && src_folded_cost <= src_elt_cost)
6859 {
6860 trial = src_folded, src_folded_cost = 10000;
6861 if (src_folded_force_flag)
6862 trial = force_const_mem (mode, trial);
6863 }
6864 else if (src_cost <= src_eqv_cost
6865 && src_cost <= src_related_cost
6866 && src_cost <= src_elt_cost)
6867 trial = src, src_cost = 10000;
6868 else if (src_eqv_cost <= src_related_cost
6869 && src_eqv_cost <= src_elt_cost)
6870 trial = copy_rtx (src_eqv_here), src_eqv_cost = 10000;
6871 else if (src_related_cost <= src_elt_cost)
6872 trial = copy_rtx (src_related), src_related_cost = 10000;
6873 else
6874 {
6875 trial = copy_rtx (elt->exp);
6876 elt = elt->next_same_value;
6877 src_elt_cost = 10000;
6878 }
6879
6880 /* We don't normally have an insn matching (set (pc) (pc)), so
6881 check for this separately here. We will delete such an
6882 insn below.
6883
6884 Tablejump insns contain a USE of the table, so simply replacing
6885 the operand with the constant won't match. This is simply an
6886 unconditional branch, however, and is therefore valid. Just
6887 insert the substitution here and we will delete and re-emit
6888 the insn later. */
6889
6890 if (n_sets == 1 && dest == pc_rtx
6891 && (trial == pc_rtx
6892 || (GET_CODE (trial) == LABEL_REF
6893 && ! condjump_p (insn))))
6894 {
6895 /* If TRIAL is a label in front of a jump table, we are
6896 really falling through the switch (this is how casesi
6897 insns work), so we must branch around the table. */
6898 if (GET_CODE (trial) == CODE_LABEL
6899 && NEXT_INSN (trial) != 0
6900 && GET_CODE (NEXT_INSN (trial)) == JUMP_INSN
6901 && (GET_CODE (PATTERN (NEXT_INSN (trial))) == ADDR_DIFF_VEC
6902 || GET_CODE (PATTERN (NEXT_INSN (trial))) == ADDR_VEC))
6903
6904 trial = gen_rtx (LABEL_REF, Pmode, get_label_after (trial));
6905
6906 SET_SRC (sets[i].rtl) = trial;
6907 cse_jumps_altered = 1;
6908 break;
6909 }
6910
6911 /* Look for a substitution that makes a valid insn. */
6912 else if (validate_change (insn, &SET_SRC (sets[i].rtl), trial, 0))
6913 {
6914 /* The result of apply_change_group can be ignored; see
6915 canon_reg. */
6916
6917 validate_change (insn, &SET_SRC (sets[i].rtl),
6918 canon_reg (SET_SRC (sets[i].rtl), insn),
6919 1);
6920 apply_change_group ();
6921 break;
6922 }
6923
6924 /* If we previously found constant pool entries for
6925 constants and this is a constant, try making a
6926 pool entry. Put it in src_folded unless we already have done
6927 this since that is where it likely came from. */
6928
6929 else if (constant_pool_entries_cost
6930 && CONSTANT_P (trial)
6931 && ! (GET_CODE (trial) == CONST
6932 && GET_CODE (XEXP (trial, 0)) == TRUNCATE)
6933 && (src_folded == 0
6934 || (GET_CODE (src_folded) != MEM
6935 && ! src_folded_force_flag))
6936 && GET_MODE_CLASS (mode) != MODE_CC)
6937 {
6938 src_folded_force_flag = 1;
6939 src_folded = trial;
6940 src_folded_cost = constant_pool_entries_cost;
6941 }
6942 }
6943
6944 src = SET_SRC (sets[i].rtl);
6945
6946 /* In general, it is good to have a SET with SET_SRC == SET_DEST.
6947 However, there is an important exception: If both are registers
6948 that are not the head of their equivalence class, replace SET_SRC
6949 with the head of the class. If we do not do this, we will have
6950 both registers live over a portion of the basic block. This way,
6951 their lifetimes will likely abut instead of overlapping. */
6952 if (GET_CODE (dest) == REG
6953 && REGNO_QTY_VALID_P (REGNO (dest))
6954 && qty_mode[reg_qty[REGNO (dest)]] == GET_MODE (dest)
6955 && qty_first_reg[reg_qty[REGNO (dest)]] != REGNO (dest)
6956 && GET_CODE (src) == REG && REGNO (src) == REGNO (dest)
6957 /* Don't do this if the original insn had a hard reg as
6958 SET_SRC. */
6959 && (GET_CODE (sets[i].src) != REG
6960 || REGNO (sets[i].src) >= FIRST_PSEUDO_REGISTER))
6961 /* We can't call canon_reg here because it won't do anything if
6962 SRC is a hard register. */
6963 {
6964 int first = qty_first_reg[reg_qty[REGNO (src)]];
6965
6966 src = SET_SRC (sets[i].rtl)
6967 = first >= FIRST_PSEUDO_REGISTER ? regno_reg_rtx[first]
6968 : gen_rtx (REG, GET_MODE (src), first);
6969
6970 /* If we had a constant that is cheaper than what we are now
6971 setting SRC to, use that constant. We ignored it when we
6972 thought we could make this into a no-op. */
6973 if (src_const && COST (src_const) < COST (src)
6974 && validate_change (insn, &SET_SRC (sets[i].rtl), src_const, 0))
6975 src = src_const;
6976 }
6977
6978 /* If we made a change, recompute SRC values. */
6979 if (src != sets[i].src)
6980 {
6981 do_not_record = 0;
6982 hash_arg_in_memory = 0;
6983 hash_arg_in_struct = 0;
6984 sets[i].src = src;
6985 sets[i].src_hash = HASH (src, mode);
6986 sets[i].src_volatile = do_not_record;
6987 sets[i].src_in_memory = hash_arg_in_memory;
6988 sets[i].src_in_struct = hash_arg_in_struct;
6989 sets[i].src_elt = lookup (src, sets[i].src_hash, mode);
6990 }
6991
6992 /* If this is a single SET, we are setting a register, and we have an
6993 equivalent constant, we want to add a REG_NOTE. We don't want
6994 to write a REG_EQUAL note for a constant pseudo since verifying that
6995 that pseudo hasn't been eliminated is a pain. Such a note also
6996 won't help anything. */
6997 if (n_sets == 1 && src_const && GET_CODE (dest) == REG
6998 && GET_CODE (src_const) != REG)
6999 {
7000 tem = find_reg_note (insn, REG_EQUAL, NULL_RTX);
7001
7002 /* Record the actual constant value in a REG_EQUAL note, making
7003 a new one if one does not already exist. */
7004 if (tem)
7005 XEXP (tem, 0) = src_const;
7006 else
7007 REG_NOTES (insn) = gen_rtx (EXPR_LIST, REG_EQUAL,
7008 src_const, REG_NOTES (insn));
7009
7010 /* If storing a constant value in a register that
7011 previously held the constant value 0,
7012 record this fact with a REG_WAS_0 note on this insn.
7013
7014 Note that the *register* is required to have previously held 0,
7015 not just any register in the quantity and we must point to the
7016 insn that set that register to zero.
7017
7018 Rather than track each register individually, we just see if
7019 the last set for this quantity was for this register. */
7020
7021 if (REGNO_QTY_VALID_P (REGNO (dest))
7022 && qty_const[reg_qty[REGNO (dest)]] == const0_rtx)
7023 {
7024 /* See if we previously had a REG_WAS_0 note. */
7025 rtx note = find_reg_note (insn, REG_WAS_0, NULL_RTX);
7026 rtx const_insn = qty_const_insn[reg_qty[REGNO (dest)]];
7027
7028 if ((tem = single_set (const_insn)) != 0
7029 && rtx_equal_p (SET_DEST (tem), dest))
7030 {
7031 if (note)
7032 XEXP (note, 0) = const_insn;
7033 else
7034 REG_NOTES (insn) = gen_rtx (INSN_LIST, REG_WAS_0,
7035 const_insn, REG_NOTES (insn));
7036 }
7037 }
7038 }
7039
7040 /* Now deal with the destination. */
7041 do_not_record = 0;
7042 sets[i].inner_dest_loc = &SET_DEST (sets[0].rtl);
7043
7044 /* Look within any SIGN_EXTRACT or ZERO_EXTRACT
7045 to the MEM or REG within it. */
7046 while (GET_CODE (dest) == SIGN_EXTRACT
7047 || GET_CODE (dest) == ZERO_EXTRACT
7048 || GET_CODE (dest) == SUBREG
7049 || GET_CODE (dest) == STRICT_LOW_PART)
7050 {
7051 sets[i].inner_dest_loc = &XEXP (dest, 0);
7052 dest = XEXP (dest, 0);
7053 }
7054
7055 sets[i].inner_dest = dest;
7056
7057 if (GET_CODE (dest) == MEM)
7058 {
7059 dest = fold_rtx (dest, insn);
7060
7061 /* Decide whether we invalidate everything in memory,
7062 or just things at non-fixed places.
7063 Writing a large aggregate must invalidate everything
7064 because we don't know how long it is. */
7065 note_mem_written (dest, &writes_memory);
7066 }
7067
7068 /* Compute the hash code of the destination now,
7069 before the effects of this instruction are recorded,
7070 since the register values used in the address computation
7071 are those before this instruction. */
7072 sets[i].dest_hash = HASH (dest, mode);
7073
7074 /* Don't enter a bit-field in the hash table
7075 because the value in it after the store
7076 may not equal what was stored, due to truncation. */
7077
7078 if (GET_CODE (SET_DEST (sets[i].rtl)) == ZERO_EXTRACT
7079 || GET_CODE (SET_DEST (sets[i].rtl)) == SIGN_EXTRACT)
7080 {
7081 rtx width = XEXP (SET_DEST (sets[i].rtl), 1);
7082
7083 if (src_const != 0 && GET_CODE (src_const) == CONST_INT
7084 && GET_CODE (width) == CONST_INT
7085 && INTVAL (width) < HOST_BITS_PER_WIDE_INT
7086 && ! (INTVAL (src_const)
7087 & ((HOST_WIDE_INT) (-1) << INTVAL (width))))
7088 /* Exception: if the value is constant,
7089 and it won't be truncated, record it. */
7090 ;
7091 else
7092 {
7093 /* This is chosen so that the destination will be invalidated
7094 but no new value will be recorded.
7095 We must invalidate because sometimes constant
7096 values can be recorded for bitfields. */
7097 sets[i].src_elt = 0;
7098 sets[i].src_volatile = 1;
7099 src_eqv = 0;
7100 src_eqv_elt = 0;
7101 }
7102 }
7103
7104 /* If only one set in a JUMP_INSN and it is now a no-op, we can delete
7105 the insn. */
7106 else if (n_sets == 1 && dest == pc_rtx && src == pc_rtx)
7107 {
7108 PUT_CODE (insn, NOTE);
7109 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
7110 NOTE_SOURCE_FILE (insn) = 0;
7111 cse_jumps_altered = 1;
7112 /* One less use of the label this insn used to jump to. */
7113 --LABEL_NUSES (JUMP_LABEL (insn));
7114 /* No more processing for this set. */
7115 sets[i].rtl = 0;
7116 }
7117
7118 /* If this SET is now setting PC to a label, we know it used to
7119 be a conditional or computed branch. So we see if we can follow
7120 it. If it was a computed branch, delete it and re-emit. */
7121 else if (dest == pc_rtx && GET_CODE (src) == LABEL_REF)
7122 {
7123 rtx p;
7124
7125 /* If this is not in the format for a simple branch and
7126 we are the only SET in it, re-emit it. */
7127 if (! simplejump_p (insn) && n_sets == 1)
7128 {
7129 rtx new = emit_jump_insn_before (gen_jump (XEXP (src, 0)), insn);
7130 JUMP_LABEL (new) = XEXP (src, 0);
7131 LABEL_NUSES (XEXP (src, 0))++;
7132 delete_insn (insn);
7133 insn = new;
7134 }
7135 else
7136 /* Otherwise, force rerecognition, since it probably had
7137 a different pattern before.
7138 This shouldn't really be necessary, since whatever
7139 changed the source value above should have done this.
7140 Until the right place is found, might as well do this here. */
7141 INSN_CODE (insn) = -1;
7142
7143 /* Now that we've converted this jump to an unconditional jump,
7144 there is dead code after it. Delete the dead code until we
7145 reach a BARRIER, the end of the function, or a label. Do
7146 not delete NOTEs except for NOTE_INSN_DELETED since later
7147 phases assume these notes are retained. */
7148
7149 p = insn;
7150
7151 while (NEXT_INSN (p) != 0
7152 && GET_CODE (NEXT_INSN (p)) != BARRIER
7153 && GET_CODE (NEXT_INSN (p)) != CODE_LABEL)
7154 {
7155 if (GET_CODE (NEXT_INSN (p)) != NOTE
7156 || NOTE_LINE_NUMBER (NEXT_INSN (p)) == NOTE_INSN_DELETED)
7157 delete_insn (NEXT_INSN (p));
7158 else
7159 p = NEXT_INSN (p);
7160 }
7161
7162 /* If we don't have a BARRIER immediately after INSN, put one there.
7163 Much code assumes that there are no NOTEs between a JUMP_INSN and
7164 BARRIER. */
7165
7166 if (NEXT_INSN (insn) == 0
7167 || GET_CODE (NEXT_INSN (insn)) != BARRIER)
7168 emit_barrier_before (NEXT_INSN (insn));
7169
7170 /* We might have two BARRIERs separated by notes. Delete the second
7171 one if so. */
7172
7173 if (p != insn && NEXT_INSN (p) != 0
7174 && GET_CODE (NEXT_INSN (p)) == BARRIER)
7175 delete_insn (NEXT_INSN (p));
7176
7177 cse_jumps_altered = 1;
7178 sets[i].rtl = 0;
7179 }
7180
7181 /* If destination is volatile, invalidate it and then do no further
7182 processing for this assignment. */
7183
7184 else if (do_not_record)
7185 {
7186 if (GET_CODE (dest) == REG || GET_CODE (dest) == SUBREG
7187 || GET_CODE (dest) == MEM)
7188 invalidate (dest, VOIDmode);
7189 else if (GET_CODE (dest) == STRICT_LOW_PART
7190 || GET_CODE (dest) == ZERO_EXTRACT)
7191 invalidate (XEXP (dest, 0), GET_MODE (dest));
7192 sets[i].rtl = 0;
7193 }
7194
7195 if (sets[i].rtl != 0 && dest != SET_DEST (sets[i].rtl))
7196 sets[i].dest_hash = HASH (SET_DEST (sets[i].rtl), mode);
7197
7198 #ifdef HAVE_cc0
7199 /* If setting CC0, record what it was set to, or a constant, if it
7200 is equivalent to a constant. If it is being set to a floating-point
7201 value, make a COMPARE with the appropriate constant of 0. If we
7202 don't do this, later code can interpret this as a test against
7203 const0_rtx, which can cause problems if we try to put it into an
7204 insn as a floating-point operand. */
7205 if (dest == cc0_rtx)
7206 {
7207 this_insn_cc0 = src_const && mode != VOIDmode ? src_const : src;
7208 this_insn_cc0_mode = mode;
7209 if (FLOAT_MODE_P (mode))
7210 this_insn_cc0 = gen_rtx (COMPARE, VOIDmode, this_insn_cc0,
7211 CONST0_RTX (mode));
7212 }
7213 #endif
7214 }
7215
7216 /* Now enter all non-volatile source expressions in the hash table
7217 if they are not already present.
7218 Record their equivalence classes in src_elt.
7219 This way we can insert the corresponding destinations into
7220 the same classes even if the actual sources are no longer in them
7221 (having been invalidated). */
7222
7223 if (src_eqv && src_eqv_elt == 0 && sets[0].rtl != 0 && ! src_eqv_volatile
7224 && ! rtx_equal_p (src_eqv, SET_DEST (sets[0].rtl)))
7225 {
7226 register struct table_elt *elt;
7227 register struct table_elt *classp = sets[0].src_elt;
7228 rtx dest = SET_DEST (sets[0].rtl);
7229 enum machine_mode eqvmode = GET_MODE (dest);
7230
7231 if (GET_CODE (dest) == STRICT_LOW_PART)
7232 {
7233 eqvmode = GET_MODE (SUBREG_REG (XEXP (dest, 0)));
7234 classp = 0;
7235 }
7236 if (insert_regs (src_eqv, classp, 0))
7237 {
7238 rehash_using_reg (src_eqv);
7239 src_eqv_hash = HASH (src_eqv, eqvmode);
7240 }
7241 elt = insert (src_eqv, classp, src_eqv_hash, eqvmode);
7242 elt->in_memory = src_eqv_in_memory;
7243 elt->in_struct = src_eqv_in_struct;
7244 src_eqv_elt = elt;
7245
7246 /* Check to see if src_eqv_elt is the same as a set source which
7247 does not yet have an elt, and if so set the elt of the set source
7248 to src_eqv_elt. */
7249 for (i = 0; i < n_sets; i++)
7250 if (sets[i].rtl && sets[i].src_elt == 0
7251 && rtx_equal_p (SET_SRC (sets[i].rtl), src_eqv))
7252 sets[i].src_elt = src_eqv_elt;
7253 }
7254
7255 for (i = 0; i < n_sets; i++)
7256 if (sets[i].rtl && ! sets[i].src_volatile
7257 && ! rtx_equal_p (SET_SRC (sets[i].rtl), SET_DEST (sets[i].rtl)))
7258 {
7259 if (GET_CODE (SET_DEST (sets[i].rtl)) == STRICT_LOW_PART)
7260 {
7261 /* REG_EQUAL in setting a STRICT_LOW_PART
7262 gives an equivalent for the entire destination register,
7263 not just for the subreg being stored in now.
7264 This is a more interesting equivalence, so we arrange later
7265 to treat the entire reg as the destination. */
7266 sets[i].src_elt = src_eqv_elt;
7267 sets[i].src_hash = src_eqv_hash;
7268 }
7269 else
7270 {
7271 /* Insert source and constant equivalent into hash table, if not
7272 already present. */
7273 register struct table_elt *classp = src_eqv_elt;
7274 register rtx src = sets[i].src;
7275 register rtx dest = SET_DEST (sets[i].rtl);
7276 enum machine_mode mode
7277 = GET_MODE (src) == VOIDmode ? GET_MODE (dest) : GET_MODE (src);
7278
7279 if (sets[i].src_elt == 0)
7280 {
7281 register struct table_elt *elt;
7282
7283 /* Note that these insert_regs calls cannot remove
7284 any of the src_elt's, because they would have failed to
7285 match if not still valid. */
7286 if (insert_regs (src, classp, 0))
7287 {
7288 rehash_using_reg (src);
7289 sets[i].src_hash = HASH (src, mode);
7290 }
7291 elt = insert (src, classp, sets[i].src_hash, mode);
7292 elt->in_memory = sets[i].src_in_memory;
7293 elt->in_struct = sets[i].src_in_struct;
7294 sets[i].src_elt = classp = elt;
7295 }
7296
7297 if (sets[i].src_const && sets[i].src_const_elt == 0
7298 && src != sets[i].src_const
7299 && ! rtx_equal_p (sets[i].src_const, src))
7300 sets[i].src_elt = insert (sets[i].src_const, classp,
7301 sets[i].src_const_hash, mode);
7302 }
7303 }
7304 else if (sets[i].src_elt == 0)
7305 /* If we did not insert the source into the hash table (e.g., it was
7306 volatile), note the equivalence class for the REG_EQUAL value, if any,
7307 so that the destination goes into that class. */
7308 sets[i].src_elt = src_eqv_elt;
7309
7310 invalidate_from_clobbers (&writes_memory, x);
7311
7312 /* Some registers are invalidated by subroutine calls. Memory is
7313 invalidated by non-constant calls. */
7314
7315 if (GET_CODE (insn) == CALL_INSN)
7316 {
7317 static struct write_data everything = {0, 1, 1, 1};
7318
7319 if (! CONST_CALL_P (insn))
7320 invalidate_memory (&everything);
7321 invalidate_for_call ();
7322 }
7323
7324 /* Now invalidate everything set by this instruction.
7325 If a SUBREG or other funny destination is being set,
7326 sets[i].rtl is still nonzero, so here we invalidate the reg
7327 a part of which is being set. */
7328
7329 for (i = 0; i < n_sets; i++)
7330 if (sets[i].rtl)
7331 {
7332 /* We can't use the inner dest, because the mode associated with
7333 a ZERO_EXTRACT is significant. */
7334 register rtx dest = SET_DEST (sets[i].rtl);
7335
7336 /* Needed for registers to remove the register from its
7337 previous quantity's chain.
7338 Needed for memory if this is a nonvarying address, unless
7339 we have just done an invalidate_memory that covers even those. */
7340 if (GET_CODE (dest) == REG || GET_CODE (dest) == SUBREG
7341 || (GET_CODE (dest) == MEM && ! writes_memory.all
7342 && ! cse_rtx_addr_varies_p (dest)))
7343 invalidate (dest, VOIDmode);
7344 else if (GET_CODE (dest) == STRICT_LOW_PART
7345 || GET_CODE (dest) == ZERO_EXTRACT)
7346 invalidate (XEXP (dest, 0), GET_MODE (dest));
7347 }
7348
7349 /* Make sure registers mentioned in destinations
7350 are safe for use in an expression to be inserted.
7351 This removes from the hash table
7352 any invalid entry that refers to one of these registers.
7353
7354 We don't care about the return value from mention_regs because
7355 we are going to hash the SET_DEST values unconditionally. */
7356
7357 for (i = 0; i < n_sets; i++)
7358 if (sets[i].rtl && GET_CODE (SET_DEST (sets[i].rtl)) != REG)
7359 mention_regs (SET_DEST (sets[i].rtl));
7360
7361 /* We may have just removed some of the src_elt's from the hash table.
7362 So replace each one with the current head of the same class. */
7363
7364 for (i = 0; i < n_sets; i++)
7365 if (sets[i].rtl)
7366 {
7367 if (sets[i].src_elt && sets[i].src_elt->first_same_value == 0)
7368 /* If elt was removed, find current head of same class,
7369 or 0 if nothing remains of that class. */
7370 {
7371 register struct table_elt *elt = sets[i].src_elt;
7372
7373 while (elt && elt->prev_same_value)
7374 elt = elt->prev_same_value;
7375
7376 while (elt && elt->first_same_value == 0)
7377 elt = elt->next_same_value;
7378 sets[i].src_elt = elt ? elt->first_same_value : 0;
7379 }
7380 }
7381
7382 /* Now insert the destinations into their equivalence classes. */
7383
7384 for (i = 0; i < n_sets; i++)
7385 if (sets[i].rtl)
7386 {
7387 register rtx dest = SET_DEST (sets[i].rtl);
7388 register struct table_elt *elt;
7389
7390 /* Don't record value if we are not supposed to risk allocating
7391 floating-point values in registers that might be wider than
7392 memory. */
7393 if ((flag_float_store
7394 && GET_CODE (dest) == MEM
7395 && FLOAT_MODE_P (GET_MODE (dest)))
7396 /* Don't record values of destinations set inside a libcall block
7397 since we might delete the libcall. Things should have been set
7398 up so we won't want to reuse such a value, but we play it safe
7399 here. */
7400 || in_libcall_block
7401 /* If we didn't put a REG_EQUAL value or a source into the hash
7402 table, there is no point is recording DEST. */
7403 || sets[i].src_elt == 0
7404 /* If DEST is a paradoxical SUBREG and SRC is a ZERO_EXTEND
7405 or SIGN_EXTEND, don't record DEST since it can cause
7406 some tracking to be wrong.
7407
7408 ??? Think about this more later. */
7409 || (GET_CODE (dest) == SUBREG
7410 && (GET_MODE_SIZE (GET_MODE (dest))
7411 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))))
7412 && (GET_CODE (sets[i].src) == SIGN_EXTEND
7413 || GET_CODE (sets[i].src) == ZERO_EXTEND)))
7414 continue;
7415
7416 /* STRICT_LOW_PART isn't part of the value BEING set,
7417 and neither is the SUBREG inside it.
7418 Note that in this case SETS[I].SRC_ELT is really SRC_EQV_ELT. */
7419 if (GET_CODE (dest) == STRICT_LOW_PART)
7420 dest = SUBREG_REG (XEXP (dest, 0));
7421
7422 if (GET_CODE (dest) == REG || GET_CODE (dest) == SUBREG)
7423 /* Registers must also be inserted into chains for quantities. */
7424 if (insert_regs (dest, sets[i].src_elt, 1))
7425 {
7426 /* If `insert_regs' changes something, the hash code must be
7427 recalculated. */
7428 rehash_using_reg (dest);
7429 sets[i].dest_hash = HASH (dest, GET_MODE (dest));
7430 }
7431
7432 elt = insert (dest, sets[i].src_elt,
7433 sets[i].dest_hash, GET_MODE (dest));
7434 elt->in_memory = (GET_CODE (sets[i].inner_dest) == MEM
7435 && (! RTX_UNCHANGING_P (sets[i].inner_dest)
7436 || FIXED_BASE_PLUS_P (XEXP (sets[i].inner_dest,
7437 0))));
7438
7439 if (elt->in_memory)
7440 {
7441 /* This implicitly assumes a whole struct
7442 need not have MEM_IN_STRUCT_P.
7443 But a whole struct is *supposed* to have MEM_IN_STRUCT_P. */
7444 elt->in_struct = (MEM_IN_STRUCT_P (sets[i].inner_dest)
7445 || sets[i].inner_dest != SET_DEST (sets[i].rtl));
7446 }
7447
7448 /* If we have (set (subreg:m1 (reg:m2 foo) 0) (bar:m1)), M1 is no
7449 narrower than M2, and both M1 and M2 are the same number of words,
7450 we are also doing (set (reg:m2 foo) (subreg:m2 (bar:m1) 0)) so
7451 make that equivalence as well.
7452
7453 However, BAR may have equivalences for which gen_lowpart_if_possible
7454 will produce a simpler value than gen_lowpart_if_possible applied to
7455 BAR (e.g., if BAR was ZERO_EXTENDed from M2), so we will scan all
7456 BAR's equivalences. If we don't get a simplified form, make
7457 the SUBREG. It will not be used in an equivalence, but will
7458 cause two similar assignments to be detected.
7459
7460 Note the loop below will find SUBREG_REG (DEST) since we have
7461 already entered SRC and DEST of the SET in the table. */
7462
7463 if (GET_CODE (dest) == SUBREG
7464 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))) - 1)
7465 / UNITS_PER_WORD)
7466 == (GET_MODE_SIZE (GET_MODE (dest)) - 1)/ UNITS_PER_WORD)
7467 && (GET_MODE_SIZE (GET_MODE (dest))
7468 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))))
7469 && sets[i].src_elt != 0)
7470 {
7471 enum machine_mode new_mode = GET_MODE (SUBREG_REG (dest));
7472 struct table_elt *elt, *classp = 0;
7473
7474 for (elt = sets[i].src_elt->first_same_value; elt;
7475 elt = elt->next_same_value)
7476 {
7477 rtx new_src = 0;
7478 unsigned src_hash;
7479 struct table_elt *src_elt;
7480
7481 /* Ignore invalid entries. */
7482 if (GET_CODE (elt->exp) != REG
7483 && ! exp_equiv_p (elt->exp, elt->exp, 1, 0))
7484 continue;
7485
7486 new_src = gen_lowpart_if_possible (new_mode, elt->exp);
7487 if (new_src == 0)
7488 new_src = gen_rtx (SUBREG, new_mode, elt->exp, 0);
7489
7490 src_hash = HASH (new_src, new_mode);
7491 src_elt = lookup (new_src, src_hash, new_mode);
7492
7493 /* Put the new source in the hash table is if isn't
7494 already. */
7495 if (src_elt == 0)
7496 {
7497 if (insert_regs (new_src, classp, 0))
7498 {
7499 rehash_using_reg (new_src);
7500 src_hash = HASH (new_src, new_mode);
7501 }
7502 src_elt = insert (new_src, classp, src_hash, new_mode);
7503 src_elt->in_memory = elt->in_memory;
7504 src_elt->in_struct = elt->in_struct;
7505 }
7506 else if (classp && classp != src_elt->first_same_value)
7507 /* Show that two things that we've seen before are
7508 actually the same. */
7509 merge_equiv_classes (src_elt, classp);
7510
7511 classp = src_elt->first_same_value;
7512 }
7513 }
7514 }
7515
7516 /* Special handling for (set REG0 REG1)
7517 where REG0 is the "cheapest", cheaper than REG1.
7518 After cse, REG1 will probably not be used in the sequel,
7519 so (if easily done) change this insn to (set REG1 REG0) and
7520 replace REG1 with REG0 in the previous insn that computed their value.
7521 Then REG1 will become a dead store and won't cloud the situation
7522 for later optimizations.
7523
7524 Do not make this change if REG1 is a hard register, because it will
7525 then be used in the sequel and we may be changing a two-operand insn
7526 into a three-operand insn.
7527
7528 Also do not do this if we are operating on a copy of INSN. */
7529
7530 if (n_sets == 1 && sets[0].rtl && GET_CODE (SET_DEST (sets[0].rtl)) == REG
7531 && NEXT_INSN (PREV_INSN (insn)) == insn
7532 && GET_CODE (SET_SRC (sets[0].rtl)) == REG
7533 && REGNO (SET_SRC (sets[0].rtl)) >= FIRST_PSEUDO_REGISTER
7534 && REGNO_QTY_VALID_P (REGNO (SET_SRC (sets[0].rtl)))
7535 && (qty_first_reg[reg_qty[REGNO (SET_SRC (sets[0].rtl))]]
7536 == REGNO (SET_DEST (sets[0].rtl))))
7537 {
7538 rtx prev = PREV_INSN (insn);
7539 while (prev && GET_CODE (prev) == NOTE)
7540 prev = PREV_INSN (prev);
7541
7542 if (prev && GET_CODE (prev) == INSN && GET_CODE (PATTERN (prev)) == SET
7543 && SET_DEST (PATTERN (prev)) == SET_SRC (sets[0].rtl))
7544 {
7545 rtx dest = SET_DEST (sets[0].rtl);
7546 rtx note = find_reg_note (prev, REG_EQUIV, NULL_RTX);
7547
7548 validate_change (prev, & SET_DEST (PATTERN (prev)), dest, 1);
7549 validate_change (insn, & SET_DEST (sets[0].rtl),
7550 SET_SRC (sets[0].rtl), 1);
7551 validate_change (insn, & SET_SRC (sets[0].rtl), dest, 1);
7552 apply_change_group ();
7553
7554 /* If REG1 was equivalent to a constant, REG0 is not. */
7555 if (note)
7556 PUT_REG_NOTE_KIND (note, REG_EQUAL);
7557
7558 /* If there was a REG_WAS_0 note on PREV, remove it. Move
7559 any REG_WAS_0 note on INSN to PREV. */
7560 note = find_reg_note (prev, REG_WAS_0, NULL_RTX);
7561 if (note)
7562 remove_note (prev, note);
7563
7564 note = find_reg_note (insn, REG_WAS_0, NULL_RTX);
7565 if (note)
7566 {
7567 remove_note (insn, note);
7568 XEXP (note, 1) = REG_NOTES (prev);
7569 REG_NOTES (prev) = note;
7570 }
7571
7572 /* If INSN has a REG_EQUAL note, and this note mentions REG0,
7573 then we must delete it, because the value in REG0 has changed. */
7574 note = find_reg_note (insn, REG_EQUAL, NULL_RTX);
7575 if (note && reg_mentioned_p (dest, XEXP (note, 0)))
7576 remove_note (insn, note);
7577 }
7578 }
7579
7580 /* If this is a conditional jump insn, record any known equivalences due to
7581 the condition being tested. */
7582
7583 last_jump_equiv_class = 0;
7584 if (GET_CODE (insn) == JUMP_INSN
7585 && n_sets == 1 && GET_CODE (x) == SET
7586 && GET_CODE (SET_SRC (x)) == IF_THEN_ELSE)
7587 record_jump_equiv (insn, 0);
7588
7589 #ifdef HAVE_cc0
7590 /* If the previous insn set CC0 and this insn no longer references CC0,
7591 delete the previous insn. Here we use the fact that nothing expects CC0
7592 to be valid over an insn, which is true until the final pass. */
7593 if (prev_insn && GET_CODE (prev_insn) == INSN
7594 && (tem = single_set (prev_insn)) != 0
7595 && SET_DEST (tem) == cc0_rtx
7596 && ! reg_mentioned_p (cc0_rtx, x))
7597 {
7598 PUT_CODE (prev_insn, NOTE);
7599 NOTE_LINE_NUMBER (prev_insn) = NOTE_INSN_DELETED;
7600 NOTE_SOURCE_FILE (prev_insn) = 0;
7601 }
7602
7603 prev_insn_cc0 = this_insn_cc0;
7604 prev_insn_cc0_mode = this_insn_cc0_mode;
7605 #endif
7606
7607 prev_insn = insn;
7608 }
7609 \f
7610 /* Store 1 in *WRITES_PTR for those categories of memory ref
7611 that must be invalidated when the expression WRITTEN is stored in.
7612 If WRITTEN is null, say everything must be invalidated. */
7613
7614 static void
7615 note_mem_written (written, writes_ptr)
7616 rtx written;
7617 struct write_data *writes_ptr;
7618 {
7619 static struct write_data everything = {0, 1, 1, 1};
7620
7621 if (written == 0)
7622 *writes_ptr = everything;
7623 else if (GET_CODE (written) == MEM)
7624 {
7625 /* Pushing or popping the stack invalidates just the stack pointer. */
7626 rtx addr = XEXP (written, 0);
7627 if ((GET_CODE (addr) == PRE_DEC || GET_CODE (addr) == PRE_INC
7628 || GET_CODE (addr) == POST_DEC || GET_CODE (addr) == POST_INC)
7629 && GET_CODE (XEXP (addr, 0)) == REG
7630 && REGNO (XEXP (addr, 0)) == STACK_POINTER_REGNUM)
7631 {
7632 writes_ptr->sp = 1;
7633 return;
7634 }
7635 else if (GET_MODE (written) == BLKmode)
7636 *writes_ptr = everything;
7637 else if (cse_rtx_addr_varies_p (written))
7638 {
7639 /* A varying address that is a sum indicates an array element,
7640 and that's just as good as a structure element
7641 in implying that we need not invalidate scalar variables.
7642 However, we must allow QImode aliasing of scalars, because the
7643 ANSI C standard allows character pointers to alias anything.
7644 We must also allow AND addresses, because they may generate
7645 accesses outside the object being referenced. This is used to
7646 generate aligned addresses from unaligned addresses, for instance,
7647 the alpha storeqi_unaligned pattern. */
7648 if (! ((MEM_IN_STRUCT_P (written)
7649 || GET_CODE (XEXP (written, 0)) == PLUS)
7650 && GET_MODE (written) != QImode
7651 && GET_CODE (XEXP (written, 0)) != AND))
7652 writes_ptr->all = 1;
7653 writes_ptr->nonscalar = 1;
7654 }
7655 writes_ptr->var = 1;
7656 }
7657 }
7658
7659 /* Perform invalidation on the basis of everything about an insn
7660 except for invalidating the actual places that are SET in it.
7661 This includes the places CLOBBERed, and anything that might
7662 alias with something that is SET or CLOBBERed.
7663
7664 W points to the writes_memory for this insn, a struct write_data
7665 saying which kinds of memory references must be invalidated.
7666 X is the pattern of the insn. */
7667
7668 static void
7669 invalidate_from_clobbers (w, x)
7670 struct write_data *w;
7671 rtx x;
7672 {
7673 /* If W->var is not set, W specifies no action.
7674 If W->all is set, this step gets all memory refs
7675 so they can be ignored in the rest of this function. */
7676 if (w->var)
7677 invalidate_memory (w);
7678
7679 if (w->sp)
7680 {
7681 if (reg_tick[STACK_POINTER_REGNUM] >= 0)
7682 reg_tick[STACK_POINTER_REGNUM]++;
7683
7684 /* This should be *very* rare. */
7685 if (TEST_HARD_REG_BIT (hard_regs_in_table, STACK_POINTER_REGNUM))
7686 invalidate (stack_pointer_rtx, VOIDmode);
7687 }
7688
7689 if (GET_CODE (x) == CLOBBER)
7690 {
7691 rtx ref = XEXP (x, 0);
7692
7693 if (GET_CODE (ref) == REG || GET_CODE (ref) == SUBREG
7694 || (GET_CODE (ref) == MEM && ! w->all))
7695 invalidate (ref, VOIDmode);
7696 else if (GET_CODE (ref) == STRICT_LOW_PART
7697 || GET_CODE (ref) == ZERO_EXTRACT)
7698 invalidate (XEXP (ref, 0), GET_MODE (ref));
7699 }
7700 else if (GET_CODE (x) == PARALLEL)
7701 {
7702 register int i;
7703 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
7704 {
7705 register rtx y = XVECEXP (x, 0, i);
7706 if (GET_CODE (y) == CLOBBER)
7707 {
7708 rtx ref = XEXP (y, 0);
7709 if (ref)
7710 {
7711 if (GET_CODE (ref) == REG || GET_CODE (ref) == SUBREG
7712 || (GET_CODE (ref) == MEM && !w->all))
7713 invalidate (ref, VOIDmode);
7714 else if (GET_CODE (ref) == STRICT_LOW_PART
7715 || GET_CODE (ref) == ZERO_EXTRACT)
7716 invalidate (XEXP (ref, 0), GET_MODE (ref));
7717 }
7718 }
7719 }
7720 }
7721 }
7722 \f
7723 /* Process X, part of the REG_NOTES of an insn. Look at any REG_EQUAL notes
7724 and replace any registers in them with either an equivalent constant
7725 or the canonical form of the register. If we are inside an address,
7726 only do this if the address remains valid.
7727
7728 OBJECT is 0 except when within a MEM in which case it is the MEM.
7729
7730 Return the replacement for X. */
7731
7732 static rtx
7733 cse_process_notes (x, object)
7734 rtx x;
7735 rtx object;
7736 {
7737 enum rtx_code code = GET_CODE (x);
7738 char *fmt = GET_RTX_FORMAT (code);
7739 int i;
7740
7741 switch (code)
7742 {
7743 case CONST_INT:
7744 case CONST:
7745 case SYMBOL_REF:
7746 case LABEL_REF:
7747 case CONST_DOUBLE:
7748 case PC:
7749 case CC0:
7750 case LO_SUM:
7751 return x;
7752
7753 case MEM:
7754 XEXP (x, 0) = cse_process_notes (XEXP (x, 0), x);
7755 return x;
7756
7757 case EXPR_LIST:
7758 case INSN_LIST:
7759 if (REG_NOTE_KIND (x) == REG_EQUAL)
7760 XEXP (x, 0) = cse_process_notes (XEXP (x, 0), NULL_RTX);
7761 if (XEXP (x, 1))
7762 XEXP (x, 1) = cse_process_notes (XEXP (x, 1), NULL_RTX);
7763 return x;
7764
7765 case SIGN_EXTEND:
7766 case ZERO_EXTEND:
7767 case SUBREG:
7768 {
7769 rtx new = cse_process_notes (XEXP (x, 0), object);
7770 /* We don't substitute VOIDmode constants into these rtx,
7771 since they would impede folding. */
7772 if (GET_MODE (new) != VOIDmode)
7773 validate_change (object, &XEXP (x, 0), new, 0);
7774 return x;
7775 }
7776
7777 case REG:
7778 i = reg_qty[REGNO (x)];
7779
7780 /* Return a constant or a constant register. */
7781 if (REGNO_QTY_VALID_P (REGNO (x))
7782 && qty_const[i] != 0
7783 && (CONSTANT_P (qty_const[i])
7784 || GET_CODE (qty_const[i]) == REG))
7785 {
7786 rtx new = gen_lowpart_if_possible (GET_MODE (x), qty_const[i]);
7787 if (new)
7788 return new;
7789 }
7790
7791 /* Otherwise, canonicalize this register. */
7792 return canon_reg (x, NULL_RTX);
7793 }
7794
7795 for (i = 0; i < GET_RTX_LENGTH (code); i++)
7796 if (fmt[i] == 'e')
7797 validate_change (object, &XEXP (x, i),
7798 cse_process_notes (XEXP (x, i), object), 0);
7799
7800 return x;
7801 }
7802 \f
7803 /* Find common subexpressions between the end test of a loop and the beginning
7804 of the loop. LOOP_START is the CODE_LABEL at the start of a loop.
7805
7806 Often we have a loop where an expression in the exit test is used
7807 in the body of the loop. For example "while (*p) *q++ = *p++;".
7808 Because of the way we duplicate the loop exit test in front of the loop,
7809 however, we don't detect that common subexpression. This will be caught
7810 when global cse is implemented, but this is a quite common case.
7811
7812 This function handles the most common cases of these common expressions.
7813 It is called after we have processed the basic block ending with the
7814 NOTE_INSN_LOOP_END note that ends a loop and the previous JUMP_INSN
7815 jumps to a label used only once. */
7816
7817 static void
7818 cse_around_loop (loop_start)
7819 rtx loop_start;
7820 {
7821 rtx insn;
7822 int i;
7823 struct table_elt *p;
7824
7825 /* If the jump at the end of the loop doesn't go to the start, we don't
7826 do anything. */
7827 for (insn = PREV_INSN (loop_start);
7828 insn && (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) >= 0);
7829 insn = PREV_INSN (insn))
7830 ;
7831
7832 if (insn == 0
7833 || GET_CODE (insn) != NOTE
7834 || NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG)
7835 return;
7836
7837 /* If the last insn of the loop (the end test) was an NE comparison,
7838 we will interpret it as an EQ comparison, since we fell through
7839 the loop. Any equivalences resulting from that comparison are
7840 therefore not valid and must be invalidated. */
7841 if (last_jump_equiv_class)
7842 for (p = last_jump_equiv_class->first_same_value; p;
7843 p = p->next_same_value)
7844 if (GET_CODE (p->exp) == MEM || GET_CODE (p->exp) == REG
7845 || (GET_CODE (p->exp) == SUBREG
7846 && GET_CODE (SUBREG_REG (p->exp)) == REG))
7847 invalidate (p->exp, VOIDmode);
7848 else if (GET_CODE (p->exp) == STRICT_LOW_PART
7849 || GET_CODE (p->exp) == ZERO_EXTRACT)
7850 invalidate (XEXP (p->exp, 0), GET_MODE (p->exp));
7851
7852 /* Process insns starting after LOOP_START until we hit a CALL_INSN or
7853 a CODE_LABEL (we could handle a CALL_INSN, but it isn't worth it).
7854
7855 The only thing we do with SET_DEST is invalidate entries, so we
7856 can safely process each SET in order. It is slightly less efficient
7857 to do so, but we only want to handle the most common cases. */
7858
7859 for (insn = NEXT_INSN (loop_start);
7860 GET_CODE (insn) != CALL_INSN && GET_CODE (insn) != CODE_LABEL
7861 && ! (GET_CODE (insn) == NOTE
7862 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END);
7863 insn = NEXT_INSN (insn))
7864 {
7865 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
7866 && (GET_CODE (PATTERN (insn)) == SET
7867 || GET_CODE (PATTERN (insn)) == CLOBBER))
7868 cse_set_around_loop (PATTERN (insn), insn, loop_start);
7869 else if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
7870 && GET_CODE (PATTERN (insn)) == PARALLEL)
7871 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
7872 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET
7873 || GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == CLOBBER)
7874 cse_set_around_loop (XVECEXP (PATTERN (insn), 0, i), insn,
7875 loop_start);
7876 }
7877 }
7878 \f
7879 /* Variable used for communications between the next two routines. */
7880
7881 static struct write_data skipped_writes_memory;
7882
7883 /* Process one SET of an insn that was skipped. We ignore CLOBBERs
7884 since they are done elsewhere. This function is called via note_stores. */
7885
7886 static void
7887 invalidate_skipped_set (dest, set)
7888 rtx set;
7889 rtx dest;
7890 {
7891 if (GET_CODE (dest) == MEM)
7892 note_mem_written (dest, &skipped_writes_memory);
7893
7894 /* There are times when an address can appear varying and be a PLUS
7895 during this scan when it would be a fixed address were we to know
7896 the proper equivalences. So promote "nonscalar" to be "all". */
7897 if (skipped_writes_memory.nonscalar)
7898 skipped_writes_memory.all = 1;
7899
7900 if (GET_CODE (set) == CLOBBER
7901 #ifdef HAVE_cc0
7902 || dest == cc0_rtx
7903 #endif
7904 || dest == pc_rtx)
7905 return;
7906
7907 if (GET_CODE (dest) == REG || GET_CODE (dest) == SUBREG
7908 || (! skipped_writes_memory.all && ! cse_rtx_addr_varies_p (dest)))
7909 invalidate (dest, VOIDmode);
7910 else if (GET_CODE (dest) == STRICT_LOW_PART
7911 || GET_CODE (dest) == ZERO_EXTRACT)
7912 invalidate (XEXP (dest, 0), GET_MODE (dest));
7913 }
7914
7915 /* Invalidate all insns from START up to the end of the function or the
7916 next label. This called when we wish to CSE around a block that is
7917 conditionally executed. */
7918
7919 static void
7920 invalidate_skipped_block (start)
7921 rtx start;
7922 {
7923 rtx insn;
7924 static struct write_data init = {0, 0, 0, 0};
7925 static struct write_data everything = {0, 1, 1, 1};
7926
7927 for (insn = start; insn && GET_CODE (insn) != CODE_LABEL;
7928 insn = NEXT_INSN (insn))
7929 {
7930 if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
7931 continue;
7932
7933 skipped_writes_memory = init;
7934
7935 if (GET_CODE (insn) == CALL_INSN)
7936 {
7937 invalidate_for_call ();
7938 skipped_writes_memory = everything;
7939 }
7940
7941 note_stores (PATTERN (insn), invalidate_skipped_set);
7942 invalidate_from_clobbers (&skipped_writes_memory, PATTERN (insn));
7943 }
7944 }
7945 \f
7946 /* Used for communication between the following two routines; contains a
7947 value to be checked for modification. */
7948
7949 static rtx cse_check_loop_start_value;
7950
7951 /* If modifying X will modify the value in CSE_CHECK_LOOP_START_VALUE,
7952 indicate that fact by setting CSE_CHECK_LOOP_START_VALUE to 0. */
7953
7954 static void
7955 cse_check_loop_start (x, set)
7956 rtx x;
7957 rtx set;
7958 {
7959 if (cse_check_loop_start_value == 0
7960 || GET_CODE (x) == CC0 || GET_CODE (x) == PC)
7961 return;
7962
7963 if ((GET_CODE (x) == MEM && GET_CODE (cse_check_loop_start_value) == MEM)
7964 || reg_overlap_mentioned_p (x, cse_check_loop_start_value))
7965 cse_check_loop_start_value = 0;
7966 }
7967
7968 /* X is a SET or CLOBBER contained in INSN that was found near the start of
7969 a loop that starts with the label at LOOP_START.
7970
7971 If X is a SET, we see if its SET_SRC is currently in our hash table.
7972 If so, we see if it has a value equal to some register used only in the
7973 loop exit code (as marked by jump.c).
7974
7975 If those two conditions are true, we search backwards from the start of
7976 the loop to see if that same value was loaded into a register that still
7977 retains its value at the start of the loop.
7978
7979 If so, we insert an insn after the load to copy the destination of that
7980 load into the equivalent register and (try to) replace our SET_SRC with that
7981 register.
7982
7983 In any event, we invalidate whatever this SET or CLOBBER modifies. */
7984
7985 static void
7986 cse_set_around_loop (x, insn, loop_start)
7987 rtx x;
7988 rtx insn;
7989 rtx loop_start;
7990 {
7991 struct table_elt *src_elt;
7992 static struct write_data init = {0, 0, 0, 0};
7993 struct write_data writes_memory;
7994
7995 writes_memory = init;
7996
7997 /* If this is a SET, see if we can replace SET_SRC, but ignore SETs that
7998 are setting PC or CC0 or whose SET_SRC is already a register. */
7999 if (GET_CODE (x) == SET
8000 && GET_CODE (SET_DEST (x)) != PC && GET_CODE (SET_DEST (x)) != CC0
8001 && GET_CODE (SET_SRC (x)) != REG)
8002 {
8003 src_elt = lookup (SET_SRC (x),
8004 HASH (SET_SRC (x), GET_MODE (SET_DEST (x))),
8005 GET_MODE (SET_DEST (x)));
8006
8007 if (src_elt)
8008 for (src_elt = src_elt->first_same_value; src_elt;
8009 src_elt = src_elt->next_same_value)
8010 if (GET_CODE (src_elt->exp) == REG && REG_LOOP_TEST_P (src_elt->exp)
8011 && COST (src_elt->exp) < COST (SET_SRC (x)))
8012 {
8013 rtx p, set;
8014
8015 /* Look for an insn in front of LOOP_START that sets
8016 something in the desired mode to SET_SRC (x) before we hit
8017 a label or CALL_INSN. */
8018
8019 for (p = prev_nonnote_insn (loop_start);
8020 p && GET_CODE (p) != CALL_INSN
8021 && GET_CODE (p) != CODE_LABEL;
8022 p = prev_nonnote_insn (p))
8023 if ((set = single_set (p)) != 0
8024 && GET_CODE (SET_DEST (set)) == REG
8025 && GET_MODE (SET_DEST (set)) == src_elt->mode
8026 && rtx_equal_p (SET_SRC (set), SET_SRC (x)))
8027 {
8028 /* We now have to ensure that nothing between P
8029 and LOOP_START modified anything referenced in
8030 SET_SRC (x). We know that nothing within the loop
8031 can modify it, or we would have invalidated it in
8032 the hash table. */
8033 rtx q;
8034
8035 cse_check_loop_start_value = SET_SRC (x);
8036 for (q = p; q != loop_start; q = NEXT_INSN (q))
8037 if (GET_RTX_CLASS (GET_CODE (q)) == 'i')
8038 note_stores (PATTERN (q), cse_check_loop_start);
8039
8040 /* If nothing was changed and we can replace our
8041 SET_SRC, add an insn after P to copy its destination
8042 to what we will be replacing SET_SRC with. */
8043 if (cse_check_loop_start_value
8044 && validate_change (insn, &SET_SRC (x),
8045 src_elt->exp, 0))
8046 emit_insn_after (gen_move_insn (src_elt->exp,
8047 SET_DEST (set)),
8048 p);
8049 break;
8050 }
8051 }
8052 }
8053
8054 /* Now invalidate anything modified by X. */
8055 note_mem_written (SET_DEST (x), &writes_memory);
8056
8057 if (writes_memory.var)
8058 invalidate_memory (&writes_memory);
8059
8060 /* See comment on similar code in cse_insn for explanation of these
8061 tests. */
8062 if (GET_CODE (SET_DEST (x)) == REG || GET_CODE (SET_DEST (x)) == SUBREG
8063 || (GET_CODE (SET_DEST (x)) == MEM && ! writes_memory.all
8064 && ! cse_rtx_addr_varies_p (SET_DEST (x))))
8065 invalidate (SET_DEST (x), VOIDmode);
8066 else if (GET_CODE (SET_DEST (x)) == STRICT_LOW_PART
8067 || GET_CODE (SET_DEST (x)) == ZERO_EXTRACT)
8068 invalidate (XEXP (SET_DEST (x), 0), GET_MODE (SET_DEST (x)));
8069 }
8070 \f
8071 /* Find the end of INSN's basic block and return its range,
8072 the total number of SETs in all the insns of the block, the last insn of the
8073 block, and the branch path.
8074
8075 The branch path indicates which branches should be followed. If a non-zero
8076 path size is specified, the block should be rescanned and a different set
8077 of branches will be taken. The branch path is only used if
8078 FLAG_CSE_FOLLOW_JUMPS or FLAG_CSE_SKIP_BLOCKS is non-zero.
8079
8080 DATA is a pointer to a struct cse_basic_block_data, defined below, that is
8081 used to describe the block. It is filled in with the information about
8082 the current block. The incoming structure's branch path, if any, is used
8083 to construct the output branch path. */
8084
8085 void
8086 cse_end_of_basic_block (insn, data, follow_jumps, after_loop, skip_blocks)
8087 rtx insn;
8088 struct cse_basic_block_data *data;
8089 int follow_jumps;
8090 int after_loop;
8091 int skip_blocks;
8092 {
8093 rtx p = insn, q;
8094 int nsets = 0;
8095 int low_cuid = INSN_CUID (insn), high_cuid = INSN_CUID (insn);
8096 rtx next = GET_RTX_CLASS (GET_CODE (insn)) == 'i' ? insn : next_real_insn (insn);
8097 int path_size = data->path_size;
8098 int path_entry = 0;
8099 int i;
8100
8101 /* Update the previous branch path, if any. If the last branch was
8102 previously TAKEN, mark it NOT_TAKEN. If it was previously NOT_TAKEN,
8103 shorten the path by one and look at the previous branch. We know that
8104 at least one branch must have been taken if PATH_SIZE is non-zero. */
8105 while (path_size > 0)
8106 {
8107 if (data->path[path_size - 1].status != NOT_TAKEN)
8108 {
8109 data->path[path_size - 1].status = NOT_TAKEN;
8110 break;
8111 }
8112 else
8113 path_size--;
8114 }
8115
8116 /* Scan to end of this basic block. */
8117 while (p && GET_CODE (p) != CODE_LABEL)
8118 {
8119 /* Don't cse out the end of a loop. This makes a difference
8120 only for the unusual loops that always execute at least once;
8121 all other loops have labels there so we will stop in any case.
8122 Cse'ing out the end of the loop is dangerous because it
8123 might cause an invariant expression inside the loop
8124 to be reused after the end of the loop. This would make it
8125 hard to move the expression out of the loop in loop.c,
8126 especially if it is one of several equivalent expressions
8127 and loop.c would like to eliminate it.
8128
8129 If we are running after loop.c has finished, we can ignore
8130 the NOTE_INSN_LOOP_END. */
8131
8132 if (! after_loop && GET_CODE (p) == NOTE
8133 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
8134 break;
8135
8136 /* Don't cse over a call to setjmp; on some machines (eg vax)
8137 the regs restored by the longjmp come from
8138 a later time than the setjmp. */
8139 if (GET_CODE (p) == NOTE
8140 && NOTE_LINE_NUMBER (p) == NOTE_INSN_SETJMP)
8141 break;
8142
8143 /* A PARALLEL can have lots of SETs in it,
8144 especially if it is really an ASM_OPERANDS. */
8145 if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
8146 && GET_CODE (PATTERN (p)) == PARALLEL)
8147 nsets += XVECLEN (PATTERN (p), 0);
8148 else if (GET_CODE (p) != NOTE)
8149 nsets += 1;
8150
8151 /* Ignore insns made by CSE; they cannot affect the boundaries of
8152 the basic block. */
8153
8154 if (INSN_UID (p) <= max_uid && INSN_CUID (p) > high_cuid)
8155 high_cuid = INSN_CUID (p);
8156 if (INSN_UID (p) <= max_uid && INSN_CUID (p) < low_cuid)
8157 low_cuid = INSN_CUID (p);
8158
8159 /* See if this insn is in our branch path. If it is and we are to
8160 take it, do so. */
8161 if (path_entry < path_size && data->path[path_entry].branch == p)
8162 {
8163 if (data->path[path_entry].status != NOT_TAKEN)
8164 p = JUMP_LABEL (p);
8165
8166 /* Point to next entry in path, if any. */
8167 path_entry++;
8168 }
8169
8170 /* If this is a conditional jump, we can follow it if -fcse-follow-jumps
8171 was specified, we haven't reached our maximum path length, there are
8172 insns following the target of the jump, this is the only use of the
8173 jump label, and the target label is preceded by a BARRIER.
8174
8175 Alternatively, we can follow the jump if it branches around a
8176 block of code and there are no other branches into the block.
8177 In this case invalidate_skipped_block will be called to invalidate any
8178 registers set in the block when following the jump. */
8179
8180 else if ((follow_jumps || skip_blocks) && path_size < PATHLENGTH - 1
8181 && GET_CODE (p) == JUMP_INSN
8182 && GET_CODE (PATTERN (p)) == SET
8183 && GET_CODE (SET_SRC (PATTERN (p))) == IF_THEN_ELSE
8184 && LABEL_NUSES (JUMP_LABEL (p)) == 1
8185 && NEXT_INSN (JUMP_LABEL (p)) != 0)
8186 {
8187 for (q = PREV_INSN (JUMP_LABEL (p)); q; q = PREV_INSN (q))
8188 if ((GET_CODE (q) != NOTE
8189 || NOTE_LINE_NUMBER (q) == NOTE_INSN_LOOP_END
8190 || NOTE_LINE_NUMBER (q) == NOTE_INSN_SETJMP)
8191 && (GET_CODE (q) != CODE_LABEL || LABEL_NUSES (q) != 0))
8192 break;
8193
8194 /* If we ran into a BARRIER, this code is an extension of the
8195 basic block when the branch is taken. */
8196 if (follow_jumps && q != 0 && GET_CODE (q) == BARRIER)
8197 {
8198 /* Don't allow ourself to keep walking around an
8199 always-executed loop. */
8200 if (next_real_insn (q) == next)
8201 {
8202 p = NEXT_INSN (p);
8203 continue;
8204 }
8205
8206 /* Similarly, don't put a branch in our path more than once. */
8207 for (i = 0; i < path_entry; i++)
8208 if (data->path[i].branch == p)
8209 break;
8210
8211 if (i != path_entry)
8212 break;
8213
8214 data->path[path_entry].branch = p;
8215 data->path[path_entry++].status = TAKEN;
8216
8217 /* This branch now ends our path. It was possible that we
8218 didn't see this branch the last time around (when the
8219 insn in front of the target was a JUMP_INSN that was
8220 turned into a no-op). */
8221 path_size = path_entry;
8222
8223 p = JUMP_LABEL (p);
8224 /* Mark block so we won't scan it again later. */
8225 PUT_MODE (NEXT_INSN (p), QImode);
8226 }
8227 /* Detect a branch around a block of code. */
8228 else if (skip_blocks && q != 0 && GET_CODE (q) != CODE_LABEL)
8229 {
8230 register rtx tmp;
8231
8232 if (next_real_insn (q) == next)
8233 {
8234 p = NEXT_INSN (p);
8235 continue;
8236 }
8237
8238 for (i = 0; i < path_entry; i++)
8239 if (data->path[i].branch == p)
8240 break;
8241
8242 if (i != path_entry)
8243 break;
8244
8245 /* This is no_labels_between_p (p, q) with an added check for
8246 reaching the end of a function (in case Q precedes P). */
8247 for (tmp = NEXT_INSN (p); tmp && tmp != q; tmp = NEXT_INSN (tmp))
8248 if (GET_CODE (tmp) == CODE_LABEL)
8249 break;
8250
8251 if (tmp == q)
8252 {
8253 data->path[path_entry].branch = p;
8254 data->path[path_entry++].status = AROUND;
8255
8256 path_size = path_entry;
8257
8258 p = JUMP_LABEL (p);
8259 /* Mark block so we won't scan it again later. */
8260 PUT_MODE (NEXT_INSN (p), QImode);
8261 }
8262 }
8263 }
8264 p = NEXT_INSN (p);
8265 }
8266
8267 data->low_cuid = low_cuid;
8268 data->high_cuid = high_cuid;
8269 data->nsets = nsets;
8270 data->last = p;
8271
8272 /* If all jumps in the path are not taken, set our path length to zero
8273 so a rescan won't be done. */
8274 for (i = path_size - 1; i >= 0; i--)
8275 if (data->path[i].status != NOT_TAKEN)
8276 break;
8277
8278 if (i == -1)
8279 data->path_size = 0;
8280 else
8281 data->path_size = path_size;
8282
8283 /* End the current branch path. */
8284 data->path[path_size].branch = 0;
8285 }
8286 \f
8287 /* Perform cse on the instructions of a function.
8288 F is the first instruction.
8289 NREGS is one plus the highest pseudo-reg number used in the instruction.
8290
8291 AFTER_LOOP is 1 if this is the cse call done after loop optimization
8292 (only if -frerun-cse-after-loop).
8293
8294 Returns 1 if jump_optimize should be redone due to simplifications
8295 in conditional jump instructions. */
8296
8297 int
8298 cse_main (f, nregs, after_loop, file)
8299 rtx f;
8300 int nregs;
8301 int after_loop;
8302 FILE *file;
8303 {
8304 struct cse_basic_block_data val;
8305 register rtx insn = f;
8306 register int i;
8307
8308 cse_jumps_altered = 0;
8309 recorded_label_ref = 0;
8310 constant_pool_entries_cost = 0;
8311 val.path_size = 0;
8312
8313 init_recog ();
8314
8315 max_reg = nregs;
8316
8317 all_minus_one = (int *) alloca (nregs * sizeof (int));
8318 consec_ints = (int *) alloca (nregs * sizeof (int));
8319
8320 for (i = 0; i < nregs; i++)
8321 {
8322 all_minus_one[i] = -1;
8323 consec_ints[i] = i;
8324 }
8325
8326 reg_next_eqv = (int *) alloca (nregs * sizeof (int));
8327 reg_prev_eqv = (int *) alloca (nregs * sizeof (int));
8328 reg_qty = (int *) alloca (nregs * sizeof (int));
8329 reg_in_table = (int *) alloca (nregs * sizeof (int));
8330 reg_tick = (int *) alloca (nregs * sizeof (int));
8331
8332 #ifdef LOAD_EXTEND_OP
8333
8334 /* Allocate scratch rtl here. cse_insn will fill in the memory reference
8335 and change the code and mode as appropriate. */
8336 memory_extend_rtx = gen_rtx (ZERO_EXTEND, VOIDmode, 0);
8337 #endif
8338
8339 /* Discard all the free elements of the previous function
8340 since they are allocated in the temporarily obstack. */
8341 bzero ((char *) table, sizeof table);
8342 free_element_chain = 0;
8343 n_elements_made = 0;
8344
8345 /* Find the largest uid. */
8346
8347 max_uid = get_max_uid ();
8348 uid_cuid = (int *) alloca ((max_uid + 1) * sizeof (int));
8349 bzero ((char *) uid_cuid, (max_uid + 1) * sizeof (int));
8350
8351 /* Compute the mapping from uids to cuids.
8352 CUIDs are numbers assigned to insns, like uids,
8353 except that cuids increase monotonically through the code.
8354 Don't assign cuids to line-number NOTEs, so that the distance in cuids
8355 between two insns is not affected by -g. */
8356
8357 for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
8358 {
8359 if (GET_CODE (insn) != NOTE
8360 || NOTE_LINE_NUMBER (insn) < 0)
8361 INSN_CUID (insn) = ++i;
8362 else
8363 /* Give a line number note the same cuid as preceding insn. */
8364 INSN_CUID (insn) = i;
8365 }
8366
8367 /* Initialize which registers are clobbered by calls. */
8368
8369 CLEAR_HARD_REG_SET (regs_invalidated_by_call);
8370
8371 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
8372 if ((call_used_regs[i]
8373 /* Used to check !fixed_regs[i] here, but that isn't safe;
8374 fixed regs are still call-clobbered, and sched can get
8375 confused if they can "live across calls".
8376
8377 The frame pointer is always preserved across calls. The arg
8378 pointer is if it is fixed. The stack pointer usually is, unless
8379 RETURN_POPS_ARGS, in which case an explicit CLOBBER
8380 will be present. If we are generating PIC code, the PIC offset
8381 table register is preserved across calls. */
8382
8383 && i != STACK_POINTER_REGNUM
8384 && i != FRAME_POINTER_REGNUM
8385 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
8386 && i != HARD_FRAME_POINTER_REGNUM
8387 #endif
8388 #if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
8389 && ! (i == ARG_POINTER_REGNUM && fixed_regs[i])
8390 #endif
8391 #if defined (PIC_OFFSET_TABLE_REGNUM) && !defined (PIC_OFFSET_TABLE_REG_CALL_CLOBBERED)
8392 && ! (i == PIC_OFFSET_TABLE_REGNUM && flag_pic)
8393 #endif
8394 )
8395 || global_regs[i])
8396 SET_HARD_REG_BIT (regs_invalidated_by_call, i);
8397
8398 /* Loop over basic blocks.
8399 Compute the maximum number of qty's needed for each basic block
8400 (which is 2 for each SET). */
8401 insn = f;
8402 while (insn)
8403 {
8404 cse_end_of_basic_block (insn, &val, flag_cse_follow_jumps, after_loop,
8405 flag_cse_skip_blocks);
8406
8407 /* If this basic block was already processed or has no sets, skip it. */
8408 if (val.nsets == 0 || GET_MODE (insn) == QImode)
8409 {
8410 PUT_MODE (insn, VOIDmode);
8411 insn = (val.last ? NEXT_INSN (val.last) : 0);
8412 val.path_size = 0;
8413 continue;
8414 }
8415
8416 cse_basic_block_start = val.low_cuid;
8417 cse_basic_block_end = val.high_cuid;
8418 max_qty = val.nsets * 2;
8419
8420 if (file)
8421 fprintf (file, ";; Processing block from %d to %d, %d sets.\n",
8422 INSN_UID (insn), val.last ? INSN_UID (val.last) : 0,
8423 val.nsets);
8424
8425 /* Make MAX_QTY bigger to give us room to optimize
8426 past the end of this basic block, if that should prove useful. */
8427 if (max_qty < 500)
8428 max_qty = 500;
8429
8430 max_qty += max_reg;
8431
8432 /* If this basic block is being extended by following certain jumps,
8433 (see `cse_end_of_basic_block'), we reprocess the code from the start.
8434 Otherwise, we start after this basic block. */
8435 if (val.path_size > 0)
8436 cse_basic_block (insn, val.last, val.path, 0);
8437 else
8438 {
8439 int old_cse_jumps_altered = cse_jumps_altered;
8440 rtx temp;
8441
8442 /* When cse changes a conditional jump to an unconditional
8443 jump, we want to reprocess the block, since it will give
8444 us a new branch path to investigate. */
8445 cse_jumps_altered = 0;
8446 temp = cse_basic_block (insn, val.last, val.path, ! after_loop);
8447 if (cse_jumps_altered == 0
8448 || (flag_cse_follow_jumps == 0 && flag_cse_skip_blocks == 0))
8449 insn = temp;
8450
8451 cse_jumps_altered |= old_cse_jumps_altered;
8452 }
8453
8454 #ifdef USE_C_ALLOCA
8455 alloca (0);
8456 #endif
8457 }
8458
8459 /* Tell refers_to_mem_p that qty_const info is not available. */
8460 qty_const = 0;
8461
8462 if (max_elements_made < n_elements_made)
8463 max_elements_made = n_elements_made;
8464
8465 return cse_jumps_altered || recorded_label_ref;
8466 }
8467
8468 /* Process a single basic block. FROM and TO and the limits of the basic
8469 block. NEXT_BRANCH points to the branch path when following jumps or
8470 a null path when not following jumps.
8471
8472 AROUND_LOOP is non-zero if we are to try to cse around to the start of a
8473 loop. This is true when we are being called for the last time on a
8474 block and this CSE pass is before loop.c. */
8475
8476 static rtx
8477 cse_basic_block (from, to, next_branch, around_loop)
8478 register rtx from, to;
8479 struct branch_path *next_branch;
8480 int around_loop;
8481 {
8482 register rtx insn;
8483 int to_usage = 0;
8484 int in_libcall_block = 0;
8485
8486 /* Each of these arrays is undefined before max_reg, so only allocate
8487 the space actually needed and adjust the start below. */
8488
8489 qty_first_reg = (int *) alloca ((max_qty - max_reg) * sizeof (int));
8490 qty_last_reg = (int *) alloca ((max_qty - max_reg) * sizeof (int));
8491 qty_mode= (enum machine_mode *) alloca ((max_qty - max_reg) * sizeof (enum machine_mode));
8492 qty_const = (rtx *) alloca ((max_qty - max_reg) * sizeof (rtx));
8493 qty_const_insn = (rtx *) alloca ((max_qty - max_reg) * sizeof (rtx));
8494 qty_comparison_code
8495 = (enum rtx_code *) alloca ((max_qty - max_reg) * sizeof (enum rtx_code));
8496 qty_comparison_qty = (int *) alloca ((max_qty - max_reg) * sizeof (int));
8497 qty_comparison_const = (rtx *) alloca ((max_qty - max_reg) * sizeof (rtx));
8498
8499 qty_first_reg -= max_reg;
8500 qty_last_reg -= max_reg;
8501 qty_mode -= max_reg;
8502 qty_const -= max_reg;
8503 qty_const_insn -= max_reg;
8504 qty_comparison_code -= max_reg;
8505 qty_comparison_qty -= max_reg;
8506 qty_comparison_const -= max_reg;
8507
8508 new_basic_block ();
8509
8510 /* TO might be a label. If so, protect it from being deleted. */
8511 if (to != 0 && GET_CODE (to) == CODE_LABEL)
8512 ++LABEL_NUSES (to);
8513
8514 for (insn = from; insn != to; insn = NEXT_INSN (insn))
8515 {
8516 register enum rtx_code code;
8517
8518 /* See if this is a branch that is part of the path. If so, and it is
8519 to be taken, do so. */
8520 if (next_branch->branch == insn)
8521 {
8522 enum taken status = next_branch++->status;
8523 if (status != NOT_TAKEN)
8524 {
8525 if (status == TAKEN)
8526 record_jump_equiv (insn, 1);
8527 else
8528 invalidate_skipped_block (NEXT_INSN (insn));
8529
8530 /* Set the last insn as the jump insn; it doesn't affect cc0.
8531 Then follow this branch. */
8532 #ifdef HAVE_cc0
8533 prev_insn_cc0 = 0;
8534 #endif
8535 prev_insn = insn;
8536 insn = JUMP_LABEL (insn);
8537 continue;
8538 }
8539 }
8540
8541 code = GET_CODE (insn);
8542 if (GET_MODE (insn) == QImode)
8543 PUT_MODE (insn, VOIDmode);
8544
8545 if (GET_RTX_CLASS (code) == 'i')
8546 {
8547 /* Process notes first so we have all notes in canonical forms when
8548 looking for duplicate operations. */
8549
8550 if (REG_NOTES (insn))
8551 REG_NOTES (insn) = cse_process_notes (REG_NOTES (insn), NULL_RTX);
8552
8553 /* Track when we are inside in LIBCALL block. Inside such a block,
8554 we do not want to record destinations. The last insn of a
8555 LIBCALL block is not considered to be part of the block, since
8556 its destination is the result of the block and hence should be
8557 recorded. */
8558
8559 if (find_reg_note (insn, REG_LIBCALL, NULL_RTX))
8560 in_libcall_block = 1;
8561 else if (find_reg_note (insn, REG_RETVAL, NULL_RTX))
8562 in_libcall_block = 0;
8563
8564 cse_insn (insn, in_libcall_block);
8565 }
8566
8567 /* If INSN is now an unconditional jump, skip to the end of our
8568 basic block by pretending that we just did the last insn in the
8569 basic block. If we are jumping to the end of our block, show
8570 that we can have one usage of TO. */
8571
8572 if (simplejump_p (insn))
8573 {
8574 if (to == 0)
8575 return 0;
8576
8577 if (JUMP_LABEL (insn) == to)
8578 to_usage = 1;
8579
8580 /* Maybe TO was deleted because the jump is unconditional.
8581 If so, there is nothing left in this basic block. */
8582 /* ??? Perhaps it would be smarter to set TO
8583 to whatever follows this insn,
8584 and pretend the basic block had always ended here. */
8585 if (INSN_DELETED_P (to))
8586 break;
8587
8588 insn = PREV_INSN (to);
8589 }
8590
8591 /* See if it is ok to keep on going past the label
8592 which used to end our basic block. Remember that we incremented
8593 the count of that label, so we decrement it here. If we made
8594 a jump unconditional, TO_USAGE will be one; in that case, we don't
8595 want to count the use in that jump. */
8596
8597 if (to != 0 && NEXT_INSN (insn) == to
8598 && GET_CODE (to) == CODE_LABEL && --LABEL_NUSES (to) == to_usage)
8599 {
8600 struct cse_basic_block_data val;
8601 rtx prev;
8602
8603 insn = NEXT_INSN (to);
8604
8605 if (LABEL_NUSES (to) == 0)
8606 insn = delete_insn (to);
8607
8608 /* If TO was the last insn in the function, we are done. */
8609 if (insn == 0)
8610 return 0;
8611
8612 /* If TO was preceded by a BARRIER we are done with this block
8613 because it has no continuation. */
8614 prev = prev_nonnote_insn (to);
8615 if (prev && GET_CODE (prev) == BARRIER)
8616 return insn;
8617
8618 /* Find the end of the following block. Note that we won't be
8619 following branches in this case. */
8620 to_usage = 0;
8621 val.path_size = 0;
8622 cse_end_of_basic_block (insn, &val, 0, 0, 0);
8623
8624 /* If the tables we allocated have enough space left
8625 to handle all the SETs in the next basic block,
8626 continue through it. Otherwise, return,
8627 and that block will be scanned individually. */
8628 if (val.nsets * 2 + next_qty > max_qty)
8629 break;
8630
8631 cse_basic_block_start = val.low_cuid;
8632 cse_basic_block_end = val.high_cuid;
8633 to = val.last;
8634
8635 /* Prevent TO from being deleted if it is a label. */
8636 if (to != 0 && GET_CODE (to) == CODE_LABEL)
8637 ++LABEL_NUSES (to);
8638
8639 /* Back up so we process the first insn in the extension. */
8640 insn = PREV_INSN (insn);
8641 }
8642 }
8643
8644 if (next_qty > max_qty)
8645 abort ();
8646
8647 /* If we are running before loop.c, we stopped on a NOTE_INSN_LOOP_END, and
8648 the previous insn is the only insn that branches to the head of a loop,
8649 we can cse into the loop. Don't do this if we changed the jump
8650 structure of a loop unless we aren't going to be following jumps. */
8651
8652 if ((cse_jumps_altered == 0
8653 || (flag_cse_follow_jumps == 0 && flag_cse_skip_blocks == 0))
8654 && around_loop && to != 0
8655 && GET_CODE (to) == NOTE && NOTE_LINE_NUMBER (to) == NOTE_INSN_LOOP_END
8656 && GET_CODE (PREV_INSN (to)) == JUMP_INSN
8657 && JUMP_LABEL (PREV_INSN (to)) != 0
8658 && LABEL_NUSES (JUMP_LABEL (PREV_INSN (to))) == 1)
8659 cse_around_loop (JUMP_LABEL (PREV_INSN (to)));
8660
8661 return to ? NEXT_INSN (to) : 0;
8662 }
8663 \f
8664 /* Count the number of times registers are used (not set) in X.
8665 COUNTS is an array in which we accumulate the count, INCR is how much
8666 we count each register usage.
8667
8668 Don't count a usage of DEST, which is the SET_DEST of a SET which
8669 contains X in its SET_SRC. This is because such a SET does not
8670 modify the liveness of DEST. */
8671
8672 static void
8673 count_reg_usage (x, counts, dest, incr)
8674 rtx x;
8675 int *counts;
8676 rtx dest;
8677 int incr;
8678 {
8679 enum rtx_code code;
8680 char *fmt;
8681 int i, j;
8682
8683 if (x == 0)
8684 return;
8685
8686 switch (code = GET_CODE (x))
8687 {
8688 case REG:
8689 if (x != dest)
8690 counts[REGNO (x)] += incr;
8691 return;
8692
8693 case PC:
8694 case CC0:
8695 case CONST:
8696 case CONST_INT:
8697 case CONST_DOUBLE:
8698 case SYMBOL_REF:
8699 case LABEL_REF:
8700 case CLOBBER:
8701 return;
8702
8703 case SET:
8704 /* Unless we are setting a REG, count everything in SET_DEST. */
8705 if (GET_CODE (SET_DEST (x)) != REG)
8706 count_reg_usage (SET_DEST (x), counts, NULL_RTX, incr);
8707
8708 /* If SRC has side-effects, then we can't delete this insn, so the
8709 usage of SET_DEST inside SRC counts.
8710
8711 ??? Strictly-speaking, we might be preserving this insn
8712 because some other SET has side-effects, but that's hard
8713 to do and can't happen now. */
8714 count_reg_usage (SET_SRC (x), counts,
8715 side_effects_p (SET_SRC (x)) ? NULL_RTX : SET_DEST (x),
8716 incr);
8717 return;
8718
8719 case CALL_INSN:
8720 count_reg_usage (CALL_INSN_FUNCTION_USAGE (x), counts, NULL_RTX, incr);
8721
8722 /* ... falls through ... */
8723 case INSN:
8724 case JUMP_INSN:
8725 count_reg_usage (PATTERN (x), counts, NULL_RTX, incr);
8726
8727 /* Things used in a REG_EQUAL note aren't dead since loop may try to
8728 use them. */
8729
8730 count_reg_usage (REG_NOTES (x), counts, NULL_RTX, incr);
8731 return;
8732
8733 case EXPR_LIST:
8734 case INSN_LIST:
8735 if (REG_NOTE_KIND (x) == REG_EQUAL
8736 || GET_CODE (XEXP (x,0)) == USE)
8737 count_reg_usage (XEXP (x, 0), counts, NULL_RTX, incr);
8738 count_reg_usage (XEXP (x, 1), counts, NULL_RTX, incr);
8739 return;
8740 }
8741
8742 fmt = GET_RTX_FORMAT (code);
8743 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8744 {
8745 if (fmt[i] == 'e')
8746 count_reg_usage (XEXP (x, i), counts, dest, incr);
8747 else if (fmt[i] == 'E')
8748 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8749 count_reg_usage (XVECEXP (x, i, j), counts, dest, incr);
8750 }
8751 }
8752 \f
8753 /* Scan all the insns and delete any that are dead; i.e., they store a register
8754 that is never used or they copy a register to itself.
8755
8756 This is used to remove insns made obviously dead by cse. It improves the
8757 heuristics in loop since it won't try to move dead invariants out of loops
8758 or make givs for dead quantities. The remaining passes of the compilation
8759 are also sped up. */
8760
8761 void
8762 delete_dead_from_cse (insns, nreg)
8763 rtx insns;
8764 int nreg;
8765 {
8766 int *counts = (int *) alloca (nreg * sizeof (int));
8767 rtx insn, prev;
8768 rtx tem;
8769 int i;
8770 int in_libcall = 0;
8771
8772 /* First count the number of times each register is used. */
8773 bzero ((char *) counts, sizeof (int) * nreg);
8774 for (insn = next_real_insn (insns); insn; insn = next_real_insn (insn))
8775 count_reg_usage (insn, counts, NULL_RTX, 1);
8776
8777 /* Go from the last insn to the first and delete insns that only set unused
8778 registers or copy a register to itself. As we delete an insn, remove
8779 usage counts for registers it uses. */
8780 for (insn = prev_real_insn (get_last_insn ()); insn; insn = prev)
8781 {
8782 int live_insn = 0;
8783
8784 prev = prev_real_insn (insn);
8785
8786 /* Don't delete any insns that are part of a libcall block.
8787 Flow or loop might get confused if we did that. Remember
8788 that we are scanning backwards. */
8789 if (find_reg_note (insn, REG_RETVAL, NULL_RTX))
8790 in_libcall = 1;
8791
8792 if (in_libcall)
8793 live_insn = 1;
8794 else if (GET_CODE (PATTERN (insn)) == SET)
8795 {
8796 if (GET_CODE (SET_DEST (PATTERN (insn))) == REG
8797 && SET_DEST (PATTERN (insn)) == SET_SRC (PATTERN (insn)))
8798 ;
8799
8800 #ifdef HAVE_cc0
8801 else if (GET_CODE (SET_DEST (PATTERN (insn))) == CC0
8802 && ! side_effects_p (SET_SRC (PATTERN (insn)))
8803 && ((tem = next_nonnote_insn (insn)) == 0
8804 || GET_RTX_CLASS (GET_CODE (tem)) != 'i'
8805 || ! reg_referenced_p (cc0_rtx, PATTERN (tem))))
8806 ;
8807 #endif
8808 else if (GET_CODE (SET_DEST (PATTERN (insn))) != REG
8809 || REGNO (SET_DEST (PATTERN (insn))) < FIRST_PSEUDO_REGISTER
8810 || counts[REGNO (SET_DEST (PATTERN (insn)))] != 0
8811 || side_effects_p (SET_SRC (PATTERN (insn))))
8812 live_insn = 1;
8813 }
8814 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
8815 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
8816 {
8817 rtx elt = XVECEXP (PATTERN (insn), 0, i);
8818
8819 if (GET_CODE (elt) == SET)
8820 {
8821 if (GET_CODE (SET_DEST (elt)) == REG
8822 && SET_DEST (elt) == SET_SRC (elt))
8823 ;
8824
8825 #ifdef HAVE_cc0
8826 else if (GET_CODE (SET_DEST (elt)) == CC0
8827 && ! side_effects_p (SET_SRC (elt))
8828 && ((tem = next_nonnote_insn (insn)) == 0
8829 || GET_RTX_CLASS (GET_CODE (tem)) != 'i'
8830 || ! reg_referenced_p (cc0_rtx, PATTERN (tem))))
8831 ;
8832 #endif
8833 else if (GET_CODE (SET_DEST (elt)) != REG
8834 || REGNO (SET_DEST (elt)) < FIRST_PSEUDO_REGISTER
8835 || counts[REGNO (SET_DEST (elt))] != 0
8836 || side_effects_p (SET_SRC (elt)))
8837 live_insn = 1;
8838 }
8839 else if (GET_CODE (elt) != CLOBBER && GET_CODE (elt) != USE)
8840 live_insn = 1;
8841 }
8842 else
8843 live_insn = 1;
8844
8845 /* If this is a dead insn, delete it and show registers in it aren't
8846 being used. */
8847
8848 if (! live_insn)
8849 {
8850 count_reg_usage (insn, counts, NULL_RTX, -1);
8851 delete_insn (insn);
8852 }
8853
8854 if (find_reg_note (insn, REG_LIBCALL, NULL_RTX))
8855 in_libcall = 0;
8856 }
8857 }