(insert): Set cse_jumps_altered when inserting a LABEL_REF.
[gcc.git] / gcc / cse.c
1 /* Common subexpression elimination for GNU compiler.
2 Copyright (C) 1987, 88, 89, 92, 93, 94, 1995 Free Software Foundation, Inc.
3
4 This file is part of GNU CC.
5
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
10
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
20
21
22 #include "config.h"
23 /* Must precede rtl.h for FFS. */
24 #include <stdio.h>
25
26 #include "rtl.h"
27 #include "regs.h"
28 #include "hard-reg-set.h"
29 #include "flags.h"
30 #include "real.h"
31 #include "insn-config.h"
32 #include "recog.h"
33
34 #include <setjmp.h>
35
36 /* The basic idea of common subexpression elimination is to go
37 through the code, keeping a record of expressions that would
38 have the same value at the current scan point, and replacing
39 expressions encountered with the cheapest equivalent expression.
40
41 It is too complicated to keep track of the different possibilities
42 when control paths merge; so, at each label, we forget all that is
43 known and start fresh. This can be described as processing each
44 basic block separately. Note, however, that these are not quite
45 the same as the basic blocks found by a later pass and used for
46 data flow analysis and register packing. We do not need to start fresh
47 after a conditional jump instruction if there is no label there.
48
49 We use two data structures to record the equivalent expressions:
50 a hash table for most expressions, and several vectors together
51 with "quantity numbers" to record equivalent (pseudo) registers.
52
53 The use of the special data structure for registers is desirable
54 because it is faster. It is possible because registers references
55 contain a fairly small number, the register number, taken from
56 a contiguously allocated series, and two register references are
57 identical if they have the same number. General expressions
58 do not have any such thing, so the only way to retrieve the
59 information recorded on an expression other than a register
60 is to keep it in a hash table.
61
62 Registers and "quantity numbers":
63
64 At the start of each basic block, all of the (hardware and pseudo)
65 registers used in the function are given distinct quantity
66 numbers to indicate their contents. During scan, when the code
67 copies one register into another, we copy the quantity number.
68 When a register is loaded in any other way, we allocate a new
69 quantity number to describe the value generated by this operation.
70 `reg_qty' records what quantity a register is currently thought
71 of as containing.
72
73 All real quantity numbers are greater than or equal to `max_reg'.
74 If register N has not been assigned a quantity, reg_qty[N] will equal N.
75
76 Quantity numbers below `max_reg' do not exist and none of the `qty_...'
77 variables should be referenced with an index below `max_reg'.
78
79 We also maintain a bidirectional chain of registers for each
80 quantity number. `qty_first_reg', `qty_last_reg',
81 `reg_next_eqv' and `reg_prev_eqv' hold these chains.
82
83 The first register in a chain is the one whose lifespan is least local.
84 Among equals, it is the one that was seen first.
85 We replace any equivalent register with that one.
86
87 If two registers have the same quantity number, it must be true that
88 REG expressions with `qty_mode' must be in the hash table for both
89 registers and must be in the same class.
90
91 The converse is not true. Since hard registers may be referenced in
92 any mode, two REG expressions might be equivalent in the hash table
93 but not have the same quantity number if the quantity number of one
94 of the registers is not the same mode as those expressions.
95
96 Constants and quantity numbers
97
98 When a quantity has a known constant value, that value is stored
99 in the appropriate element of qty_const. This is in addition to
100 putting the constant in the hash table as is usual for non-regs.
101
102 Whether a reg or a constant is preferred is determined by the configuration
103 macro CONST_COSTS and will often depend on the constant value. In any
104 event, expressions containing constants can be simplified, by fold_rtx.
105
106 When a quantity has a known nearly constant value (such as an address
107 of a stack slot), that value is stored in the appropriate element
108 of qty_const.
109
110 Integer constants don't have a machine mode. However, cse
111 determines the intended machine mode from the destination
112 of the instruction that moves the constant. The machine mode
113 is recorded in the hash table along with the actual RTL
114 constant expression so that different modes are kept separate.
115
116 Other expressions:
117
118 To record known equivalences among expressions in general
119 we use a hash table called `table'. It has a fixed number of buckets
120 that contain chains of `struct table_elt' elements for expressions.
121 These chains connect the elements whose expressions have the same
122 hash codes.
123
124 Other chains through the same elements connect the elements which
125 currently have equivalent values.
126
127 Register references in an expression are canonicalized before hashing
128 the expression. This is done using `reg_qty' and `qty_first_reg'.
129 The hash code of a register reference is computed using the quantity
130 number, not the register number.
131
132 When the value of an expression changes, it is necessary to remove from the
133 hash table not just that expression but all expressions whose values
134 could be different as a result.
135
136 1. If the value changing is in memory, except in special cases
137 ANYTHING referring to memory could be changed. That is because
138 nobody knows where a pointer does not point.
139 The function `invalidate_memory' removes what is necessary.
140
141 The special cases are when the address is constant or is
142 a constant plus a fixed register such as the frame pointer
143 or a static chain pointer. When such addresses are stored in,
144 we can tell exactly which other such addresses must be invalidated
145 due to overlap. `invalidate' does this.
146 All expressions that refer to non-constant
147 memory addresses are also invalidated. `invalidate_memory' does this.
148
149 2. If the value changing is a register, all expressions
150 containing references to that register, and only those,
151 must be removed.
152
153 Because searching the entire hash table for expressions that contain
154 a register is very slow, we try to figure out when it isn't necessary.
155 Precisely, this is necessary only when expressions have been
156 entered in the hash table using this register, and then the value has
157 changed, and then another expression wants to be added to refer to
158 the register's new value. This sequence of circumstances is rare
159 within any one basic block.
160
161 The vectors `reg_tick' and `reg_in_table' are used to detect this case.
162 reg_tick[i] is incremented whenever a value is stored in register i.
163 reg_in_table[i] holds -1 if no references to register i have been
164 entered in the table; otherwise, it contains the value reg_tick[i] had
165 when the references were entered. If we want to enter a reference
166 and reg_in_table[i] != reg_tick[i], we must scan and remove old references.
167 Until we want to enter a new entry, the mere fact that the two vectors
168 don't match makes the entries be ignored if anyone tries to match them.
169
170 Registers themselves are entered in the hash table as well as in
171 the equivalent-register chains. However, the vectors `reg_tick'
172 and `reg_in_table' do not apply to expressions which are simple
173 register references. These expressions are removed from the table
174 immediately when they become invalid, and this can be done even if
175 we do not immediately search for all the expressions that refer to
176 the register.
177
178 A CLOBBER rtx in an instruction invalidates its operand for further
179 reuse. A CLOBBER or SET rtx whose operand is a MEM:BLK
180 invalidates everything that resides in memory.
181
182 Related expressions:
183
184 Constant expressions that differ only by an additive integer
185 are called related. When a constant expression is put in
186 the table, the related expression with no constant term
187 is also entered. These are made to point at each other
188 so that it is possible to find out if there exists any
189 register equivalent to an expression related to a given expression. */
190
191 /* One plus largest register number used in this function. */
192
193 static int max_reg;
194
195 /* Length of vectors indexed by quantity number.
196 We know in advance we will not need a quantity number this big. */
197
198 static int max_qty;
199
200 /* Next quantity number to be allocated.
201 This is 1 + the largest number needed so far. */
202
203 static int next_qty;
204
205 /* Indexed by quantity number, gives the first (or last) (pseudo) register
206 in the chain of registers that currently contain this quantity. */
207
208 static int *qty_first_reg;
209 static int *qty_last_reg;
210
211 /* Index by quantity number, gives the mode of the quantity. */
212
213 static enum machine_mode *qty_mode;
214
215 /* Indexed by quantity number, gives the rtx of the constant value of the
216 quantity, or zero if it does not have a known value.
217 A sum of the frame pointer (or arg pointer) plus a constant
218 can also be entered here. */
219
220 static rtx *qty_const;
221
222 /* Indexed by qty number, gives the insn that stored the constant value
223 recorded in `qty_const'. */
224
225 static rtx *qty_const_insn;
226
227 /* The next three variables are used to track when a comparison between a
228 quantity and some constant or register has been passed. In that case, we
229 know the results of the comparison in case we see it again. These variables
230 record a comparison that is known to be true. */
231
232 /* Indexed by qty number, gives the rtx code of a comparison with a known
233 result involving this quantity. If none, it is UNKNOWN. */
234 static enum rtx_code *qty_comparison_code;
235
236 /* Indexed by qty number, gives the constant being compared against in a
237 comparison of known result. If no such comparison, it is undefined.
238 If the comparison is not with a constant, it is zero. */
239
240 static rtx *qty_comparison_const;
241
242 /* Indexed by qty number, gives the quantity being compared against in a
243 comparison of known result. If no such comparison, if it undefined.
244 If the comparison is not with a register, it is -1. */
245
246 static int *qty_comparison_qty;
247
248 #ifdef HAVE_cc0
249 /* For machines that have a CC0, we do not record its value in the hash
250 table since its use is guaranteed to be the insn immediately following
251 its definition and any other insn is presumed to invalidate it.
252
253 Instead, we store below the value last assigned to CC0. If it should
254 happen to be a constant, it is stored in preference to the actual
255 assigned value. In case it is a constant, we store the mode in which
256 the constant should be interpreted. */
257
258 static rtx prev_insn_cc0;
259 static enum machine_mode prev_insn_cc0_mode;
260 #endif
261
262 /* Previous actual insn. 0 if at first insn of basic block. */
263
264 static rtx prev_insn;
265
266 /* Insn being scanned. */
267
268 static rtx this_insn;
269
270 /* Index by (pseudo) register number, gives the quantity number
271 of the register's current contents. */
272
273 static int *reg_qty;
274
275 /* Index by (pseudo) register number, gives the number of the next (or
276 previous) (pseudo) register in the chain of registers sharing the same
277 value.
278
279 Or -1 if this register is at the end of the chain.
280
281 If reg_qty[N] == N, reg_next_eqv[N] is undefined. */
282
283 static int *reg_next_eqv;
284 static int *reg_prev_eqv;
285
286 /* Index by (pseudo) register number, gives the number of times
287 that register has been altered in the current basic block. */
288
289 static int *reg_tick;
290
291 /* Index by (pseudo) register number, gives the reg_tick value at which
292 rtx's containing this register are valid in the hash table.
293 If this does not equal the current reg_tick value, such expressions
294 existing in the hash table are invalid.
295 If this is -1, no expressions containing this register have been
296 entered in the table. */
297
298 static int *reg_in_table;
299
300 /* A HARD_REG_SET containing all the hard registers for which there is
301 currently a REG expression in the hash table. Note the difference
302 from the above variables, which indicate if the REG is mentioned in some
303 expression in the table. */
304
305 static HARD_REG_SET hard_regs_in_table;
306
307 /* A HARD_REG_SET containing all the hard registers that are invalidated
308 by a CALL_INSN. */
309
310 static HARD_REG_SET regs_invalidated_by_call;
311
312 /* Two vectors of ints:
313 one containing max_reg -1's; the other max_reg + 500 (an approximation
314 for max_qty) elements where element i contains i.
315 These are used to initialize various other vectors fast. */
316
317 static int *all_minus_one;
318 static int *consec_ints;
319
320 /* CUID of insn that starts the basic block currently being cse-processed. */
321
322 static int cse_basic_block_start;
323
324 /* CUID of insn that ends the basic block currently being cse-processed. */
325
326 static int cse_basic_block_end;
327
328 /* Vector mapping INSN_UIDs to cuids.
329 The cuids are like uids but increase monotonically always.
330 We use them to see whether a reg is used outside a given basic block. */
331
332 static int *uid_cuid;
333
334 /* Highest UID in UID_CUID. */
335 static int max_uid;
336
337 /* Get the cuid of an insn. */
338
339 #define INSN_CUID(INSN) (uid_cuid[INSN_UID (INSN)])
340
341 /* Nonzero if cse has altered conditional jump insns
342 in such a way that jump optimization should be redone. */
343
344 static int cse_jumps_altered;
345
346 /* canon_hash stores 1 in do_not_record
347 if it notices a reference to CC0, PC, or some other volatile
348 subexpression. */
349
350 static int do_not_record;
351
352 #ifdef LOAD_EXTEND_OP
353
354 /* Scratch rtl used when looking for load-extended copy of a MEM. */
355 static rtx memory_extend_rtx;
356 #endif
357
358 /* canon_hash stores 1 in hash_arg_in_memory
359 if it notices a reference to memory within the expression being hashed. */
360
361 static int hash_arg_in_memory;
362
363 /* canon_hash stores 1 in hash_arg_in_struct
364 if it notices a reference to memory that's part of a structure. */
365
366 static int hash_arg_in_struct;
367
368 /* The hash table contains buckets which are chains of `struct table_elt's,
369 each recording one expression's information.
370 That expression is in the `exp' field.
371
372 Those elements with the same hash code are chained in both directions
373 through the `next_same_hash' and `prev_same_hash' fields.
374
375 Each set of expressions with equivalent values
376 are on a two-way chain through the `next_same_value'
377 and `prev_same_value' fields, and all point with
378 the `first_same_value' field at the first element in
379 that chain. The chain is in order of increasing cost.
380 Each element's cost value is in its `cost' field.
381
382 The `in_memory' field is nonzero for elements that
383 involve any reference to memory. These elements are removed
384 whenever a write is done to an unidentified location in memory.
385 To be safe, we assume that a memory address is unidentified unless
386 the address is either a symbol constant or a constant plus
387 the frame pointer or argument pointer.
388
389 The `in_struct' field is nonzero for elements that
390 involve any reference to memory inside a structure or array.
391
392 The `related_value' field is used to connect related expressions
393 (that differ by adding an integer).
394 The related expressions are chained in a circular fashion.
395 `related_value' is zero for expressions for which this
396 chain is not useful.
397
398 The `cost' field stores the cost of this element's expression.
399
400 The `is_const' flag is set if the element is a constant (including
401 a fixed address).
402
403 The `flag' field is used as a temporary during some search routines.
404
405 The `mode' field is usually the same as GET_MODE (`exp'), but
406 if `exp' is a CONST_INT and has no machine mode then the `mode'
407 field is the mode it was being used as. Each constant is
408 recorded separately for each mode it is used with. */
409
410
411 struct table_elt
412 {
413 rtx exp;
414 struct table_elt *next_same_hash;
415 struct table_elt *prev_same_hash;
416 struct table_elt *next_same_value;
417 struct table_elt *prev_same_value;
418 struct table_elt *first_same_value;
419 struct table_elt *related_value;
420 int cost;
421 enum machine_mode mode;
422 char in_memory;
423 char in_struct;
424 char is_const;
425 char flag;
426 };
427
428 /* We don't want a lot of buckets, because we rarely have very many
429 things stored in the hash table, and a lot of buckets slows
430 down a lot of loops that happen frequently. */
431 #define NBUCKETS 31
432
433 /* Compute hash code of X in mode M. Special-case case where X is a pseudo
434 register (hard registers may require `do_not_record' to be set). */
435
436 #define HASH(X, M) \
437 (GET_CODE (X) == REG && REGNO (X) >= FIRST_PSEUDO_REGISTER \
438 ? (((unsigned) REG << 7) + (unsigned) reg_qty[REGNO (X)]) % NBUCKETS \
439 : canon_hash (X, M) % NBUCKETS)
440
441 /* Determine whether register number N is considered a fixed register for CSE.
442 It is desirable to replace other regs with fixed regs, to reduce need for
443 non-fixed hard regs.
444 A reg wins if it is either the frame pointer or designated as fixed,
445 but not if it is an overlapping register. */
446 #ifdef OVERLAPPING_REGNO_P
447 #define FIXED_REGNO_P(N) \
448 (((N) == FRAME_POINTER_REGNUM || (N) == HARD_FRAME_POINTER_REGNUM \
449 || fixed_regs[N] || global_regs[N]) \
450 && ! OVERLAPPING_REGNO_P ((N)))
451 #else
452 #define FIXED_REGNO_P(N) \
453 ((N) == FRAME_POINTER_REGNUM || (N) == HARD_FRAME_POINTER_REGNUM \
454 || fixed_regs[N] || global_regs[N])
455 #endif
456
457 /* Compute cost of X, as stored in the `cost' field of a table_elt. Fixed
458 hard registers and pointers into the frame are the cheapest with a cost
459 of 0. Next come pseudos with a cost of one and other hard registers with
460 a cost of 2. Aside from these special cases, call `rtx_cost'. */
461
462 #define CHEAP_REGNO(N) \
463 ((N) == FRAME_POINTER_REGNUM || (N) == HARD_FRAME_POINTER_REGNUM \
464 || (N) == STACK_POINTER_REGNUM || (N) == ARG_POINTER_REGNUM \
465 || ((N) >= FIRST_VIRTUAL_REGISTER && (N) <= LAST_VIRTUAL_REGISTER) \
466 || ((N) < FIRST_PSEUDO_REGISTER \
467 && FIXED_REGNO_P (N) && REGNO_REG_CLASS (N) != NO_REGS))
468
469 /* A register is cheap if it is a user variable assigned to the register
470 or if its register number always corresponds to a cheap register. */
471
472 #define CHEAP_REG(N) \
473 ((REG_USERVAR_P (N) && REGNO (N) < FIRST_PSEUDO_REGISTER) \
474 || CHEAP_REGNO (REGNO (N)))
475
476 #define COST(X) \
477 (GET_CODE (X) == REG \
478 ? (CHEAP_REG (X) ? 0 \
479 : REGNO (X) >= FIRST_PSEUDO_REGISTER ? 1 \
480 : 2) \
481 : rtx_cost (X, SET) * 2)
482
483 /* Determine if the quantity number for register X represents a valid index
484 into the `qty_...' variables. */
485
486 #define REGNO_QTY_VALID_P(N) (reg_qty[N] != (N))
487
488 static struct table_elt *table[NBUCKETS];
489
490 /* Chain of `struct table_elt's made so far for this function
491 but currently removed from the table. */
492
493 static struct table_elt *free_element_chain;
494
495 /* Number of `struct table_elt' structures made so far for this function. */
496
497 static int n_elements_made;
498
499 /* Maximum value `n_elements_made' has had so far in this compilation
500 for functions previously processed. */
501
502 static int max_elements_made;
503
504 /* Surviving equivalence class when two equivalence classes are merged
505 by recording the effects of a jump in the last insn. Zero if the
506 last insn was not a conditional jump. */
507
508 static struct table_elt *last_jump_equiv_class;
509
510 /* Set to the cost of a constant pool reference if one was found for a
511 symbolic constant. If this was found, it means we should try to
512 convert constants into constant pool entries if they don't fit in
513 the insn. */
514
515 static int constant_pool_entries_cost;
516
517 /* Bits describing what kind of values in memory must be invalidated
518 for a particular instruction. If all three bits are zero,
519 no memory refs need to be invalidated. Each bit is more powerful
520 than the preceding ones, and if a bit is set then the preceding
521 bits are also set.
522
523 Here is how the bits are set:
524 Pushing onto the stack invalidates only the stack pointer,
525 writing at a fixed address invalidates only variable addresses,
526 writing in a structure element at variable address
527 invalidates all but scalar variables,
528 and writing in anything else at variable address invalidates everything. */
529
530 struct write_data
531 {
532 int sp : 1; /* Invalidate stack pointer. */
533 int var : 1; /* Invalidate variable addresses. */
534 int nonscalar : 1; /* Invalidate all but scalar variables. */
535 int all : 1; /* Invalidate all memory refs. */
536 };
537
538 /* Define maximum length of a branch path. */
539
540 #define PATHLENGTH 10
541
542 /* This data describes a block that will be processed by cse_basic_block. */
543
544 struct cse_basic_block_data {
545 /* Lowest CUID value of insns in block. */
546 int low_cuid;
547 /* Highest CUID value of insns in block. */
548 int high_cuid;
549 /* Total number of SETs in block. */
550 int nsets;
551 /* Last insn in the block. */
552 rtx last;
553 /* Size of current branch path, if any. */
554 int path_size;
555 /* Current branch path, indicating which branches will be taken. */
556 struct branch_path {
557 /* The branch insn. */
558 rtx branch;
559 /* Whether it should be taken or not. AROUND is the same as taken
560 except that it is used when the destination label is not preceded
561 by a BARRIER. */
562 enum taken {TAKEN, NOT_TAKEN, AROUND} status;
563 } path[PATHLENGTH];
564 };
565
566 /* Nonzero if X has the form (PLUS frame-pointer integer). We check for
567 virtual regs here because the simplify_*_operation routines are called
568 by integrate.c, which is called before virtual register instantiation. */
569
570 #define FIXED_BASE_PLUS_P(X) \
571 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
572 || (X) == arg_pointer_rtx \
573 || (X) == virtual_stack_vars_rtx \
574 || (X) == virtual_incoming_args_rtx \
575 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
576 && (XEXP (X, 0) == frame_pointer_rtx \
577 || XEXP (X, 0) == hard_frame_pointer_rtx \
578 || XEXP (X, 0) == arg_pointer_rtx \
579 || XEXP (X, 0) == virtual_stack_vars_rtx \
580 || XEXP (X, 0) == virtual_incoming_args_rtx)))
581
582 /* Similar, but also allows reference to the stack pointer.
583
584 This used to include FIXED_BASE_PLUS_P, however, we can't assume that
585 arg_pointer_rtx by itself is nonzero, because on at least one machine,
586 the i960, the arg pointer is zero when it is unused. */
587
588 #define NONZERO_BASE_PLUS_P(X) \
589 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
590 || (X) == virtual_stack_vars_rtx \
591 || (X) == virtual_incoming_args_rtx \
592 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
593 && (XEXP (X, 0) == frame_pointer_rtx \
594 || XEXP (X, 0) == hard_frame_pointer_rtx \
595 || XEXP (X, 0) == arg_pointer_rtx \
596 || XEXP (X, 0) == virtual_stack_vars_rtx \
597 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
598 || (X) == stack_pointer_rtx \
599 || (X) == virtual_stack_dynamic_rtx \
600 || (X) == virtual_outgoing_args_rtx \
601 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
602 && (XEXP (X, 0) == stack_pointer_rtx \
603 || XEXP (X, 0) == virtual_stack_dynamic_rtx \
604 || XEXP (X, 0) == virtual_outgoing_args_rtx)))
605
606 static void new_basic_block PROTO((void));
607 static void make_new_qty PROTO((int));
608 static void make_regs_eqv PROTO((int, int));
609 static void delete_reg_equiv PROTO((int));
610 static int mention_regs PROTO((rtx));
611 static int insert_regs PROTO((rtx, struct table_elt *, int));
612 static void free_element PROTO((struct table_elt *));
613 static void remove_from_table PROTO((struct table_elt *, unsigned));
614 static struct table_elt *get_element PROTO((void));
615 static struct table_elt *lookup PROTO((rtx, unsigned, enum machine_mode)),
616 *lookup_for_remove PROTO((rtx, unsigned, enum machine_mode));
617 static rtx lookup_as_function PROTO((rtx, enum rtx_code));
618 static struct table_elt *insert PROTO((rtx, struct table_elt *, unsigned,
619 enum machine_mode));
620 static void merge_equiv_classes PROTO((struct table_elt *,
621 struct table_elt *));
622 static void invalidate PROTO((rtx, enum machine_mode));
623 static void remove_invalid_refs PROTO((int));
624 static void rehash_using_reg PROTO((rtx));
625 static void invalidate_memory PROTO((struct write_data *));
626 static void invalidate_for_call PROTO((void));
627 static rtx use_related_value PROTO((rtx, struct table_elt *));
628 static unsigned canon_hash PROTO((rtx, enum machine_mode));
629 static unsigned safe_hash PROTO((rtx, enum machine_mode));
630 static int exp_equiv_p PROTO((rtx, rtx, int, int));
631 static void set_nonvarying_address_components PROTO((rtx, int, rtx *,
632 HOST_WIDE_INT *,
633 HOST_WIDE_INT *));
634 static int refers_to_p PROTO((rtx, rtx));
635 static int refers_to_mem_p PROTO((rtx, rtx, HOST_WIDE_INT,
636 HOST_WIDE_INT));
637 static int cse_rtx_addr_varies_p PROTO((rtx));
638 static rtx canon_reg PROTO((rtx, rtx));
639 static void find_best_addr PROTO((rtx, rtx *));
640 static enum rtx_code find_comparison_args PROTO((enum rtx_code, rtx *, rtx *,
641 enum machine_mode *,
642 enum machine_mode *));
643 static rtx cse_gen_binary PROTO((enum rtx_code, enum machine_mode,
644 rtx, rtx));
645 static rtx simplify_plus_minus PROTO((enum rtx_code, enum machine_mode,
646 rtx, rtx));
647 static rtx fold_rtx PROTO((rtx, rtx));
648 static rtx equiv_constant PROTO((rtx));
649 static void record_jump_equiv PROTO((rtx, int));
650 static void record_jump_cond PROTO((enum rtx_code, enum machine_mode,
651 rtx, rtx, int));
652 static void cse_insn PROTO((rtx, int));
653 static void note_mem_written PROTO((rtx, struct write_data *));
654 static void invalidate_from_clobbers PROTO((struct write_data *, rtx));
655 static rtx cse_process_notes PROTO((rtx, rtx));
656 static void cse_around_loop PROTO((rtx));
657 static void invalidate_skipped_set PROTO((rtx, rtx));
658 static void invalidate_skipped_block PROTO((rtx));
659 static void cse_check_loop_start PROTO((rtx, rtx));
660 static void cse_set_around_loop PROTO((rtx, rtx, rtx));
661 static rtx cse_basic_block PROTO((rtx, rtx, struct branch_path *, int));
662 static void count_reg_usage PROTO((rtx, int *, rtx, int));
663
664 extern int rtx_equal_function_value_matters;
665 \f
666 /* Return an estimate of the cost of computing rtx X.
667 One use is in cse, to decide which expression to keep in the hash table.
668 Another is in rtl generation, to pick the cheapest way to multiply.
669 Other uses like the latter are expected in the future. */
670
671 /* Return the right cost to give to an operation
672 to make the cost of the corresponding register-to-register instruction
673 N times that of a fast register-to-register instruction. */
674
675 #define COSTS_N_INSNS(N) ((N) * 4 - 2)
676
677 int
678 rtx_cost (x, outer_code)
679 rtx x;
680 enum rtx_code outer_code;
681 {
682 register int i, j;
683 register enum rtx_code code;
684 register char *fmt;
685 register int total;
686
687 if (x == 0)
688 return 0;
689
690 /* Compute the default costs of certain things.
691 Note that RTX_COSTS can override the defaults. */
692
693 code = GET_CODE (x);
694 switch (code)
695 {
696 case MULT:
697 /* Count multiplication by 2**n as a shift,
698 because if we are considering it, we would output it as a shift. */
699 if (GET_CODE (XEXP (x, 1)) == CONST_INT
700 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
701 total = 2;
702 else
703 total = COSTS_N_INSNS (5);
704 break;
705 case DIV:
706 case UDIV:
707 case MOD:
708 case UMOD:
709 total = COSTS_N_INSNS (7);
710 break;
711 case USE:
712 /* Used in loop.c and combine.c as a marker. */
713 total = 0;
714 break;
715 case ASM_OPERANDS:
716 /* We don't want these to be used in substitutions because
717 we have no way of validating the resulting insn. So assign
718 anything containing an ASM_OPERANDS a very high cost. */
719 total = 1000;
720 break;
721 default:
722 total = 2;
723 }
724
725 switch (code)
726 {
727 case REG:
728 return ! CHEAP_REG (x);
729
730 case SUBREG:
731 /* If we can't tie these modes, make this expensive. The larger
732 the mode, the more expensive it is. */
733 if (! MODES_TIEABLE_P (GET_MODE (x), GET_MODE (SUBREG_REG (x))))
734 return COSTS_N_INSNS (2
735 + GET_MODE_SIZE (GET_MODE (x)) / UNITS_PER_WORD);
736 return 2;
737 #ifdef RTX_COSTS
738 RTX_COSTS (x, code, outer_code);
739 #endif
740 CONST_COSTS (x, code, outer_code);
741 }
742
743 /* Sum the costs of the sub-rtx's, plus cost of this operation,
744 which is already in total. */
745
746 fmt = GET_RTX_FORMAT (code);
747 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
748 if (fmt[i] == 'e')
749 total += rtx_cost (XEXP (x, i), code);
750 else if (fmt[i] == 'E')
751 for (j = 0; j < XVECLEN (x, i); j++)
752 total += rtx_cost (XVECEXP (x, i, j), code);
753
754 return total;
755 }
756 \f
757 /* Clear the hash table and initialize each register with its own quantity,
758 for a new basic block. */
759
760 static void
761 new_basic_block ()
762 {
763 register int i;
764
765 next_qty = max_reg;
766
767 bzero ((char *) reg_tick, max_reg * sizeof (int));
768
769 bcopy ((char *) all_minus_one, (char *) reg_in_table,
770 max_reg * sizeof (int));
771 bcopy ((char *) consec_ints, (char *) reg_qty, max_reg * sizeof (int));
772 CLEAR_HARD_REG_SET (hard_regs_in_table);
773
774 /* The per-quantity values used to be initialized here, but it is
775 much faster to initialize each as it is made in `make_new_qty'. */
776
777 for (i = 0; i < NBUCKETS; i++)
778 {
779 register struct table_elt *this, *next;
780 for (this = table[i]; this; this = next)
781 {
782 next = this->next_same_hash;
783 free_element (this);
784 }
785 }
786
787 bzero ((char *) table, sizeof table);
788
789 prev_insn = 0;
790
791 #ifdef HAVE_cc0
792 prev_insn_cc0 = 0;
793 #endif
794 }
795
796 /* Say that register REG contains a quantity not in any register before
797 and initialize that quantity. */
798
799 static void
800 make_new_qty (reg)
801 register int reg;
802 {
803 register int q;
804
805 if (next_qty >= max_qty)
806 abort ();
807
808 q = reg_qty[reg] = next_qty++;
809 qty_first_reg[q] = reg;
810 qty_last_reg[q] = reg;
811 qty_const[q] = qty_const_insn[q] = 0;
812 qty_comparison_code[q] = UNKNOWN;
813
814 reg_next_eqv[reg] = reg_prev_eqv[reg] = -1;
815 }
816
817 /* Make reg NEW equivalent to reg OLD.
818 OLD is not changing; NEW is. */
819
820 static void
821 make_regs_eqv (new, old)
822 register int new, old;
823 {
824 register int lastr, firstr;
825 register int q = reg_qty[old];
826
827 /* Nothing should become eqv until it has a "non-invalid" qty number. */
828 if (! REGNO_QTY_VALID_P (old))
829 abort ();
830
831 reg_qty[new] = q;
832 firstr = qty_first_reg[q];
833 lastr = qty_last_reg[q];
834
835 /* Prefer fixed hard registers to anything. Prefer pseudo regs to other
836 hard regs. Among pseudos, if NEW will live longer than any other reg
837 of the same qty, and that is beyond the current basic block,
838 make it the new canonical replacement for this qty. */
839 if (! (firstr < FIRST_PSEUDO_REGISTER && FIXED_REGNO_P (firstr))
840 /* Certain fixed registers might be of the class NO_REGS. This means
841 that not only can they not be allocated by the compiler, but
842 they cannot be used in substitutions or canonicalizations
843 either. */
844 && (new >= FIRST_PSEUDO_REGISTER || REGNO_REG_CLASS (new) != NO_REGS)
845 && ((new < FIRST_PSEUDO_REGISTER && FIXED_REGNO_P (new))
846 || (new >= FIRST_PSEUDO_REGISTER
847 && (firstr < FIRST_PSEUDO_REGISTER
848 || ((uid_cuid[regno_last_uid[new]] > cse_basic_block_end
849 || (uid_cuid[regno_first_uid[new]]
850 < cse_basic_block_start))
851 && (uid_cuid[regno_last_uid[new]]
852 > uid_cuid[regno_last_uid[firstr]]))))))
853 {
854 reg_prev_eqv[firstr] = new;
855 reg_next_eqv[new] = firstr;
856 reg_prev_eqv[new] = -1;
857 qty_first_reg[q] = new;
858 }
859 else
860 {
861 /* If NEW is a hard reg (known to be non-fixed), insert at end.
862 Otherwise, insert before any non-fixed hard regs that are at the
863 end. Registers of class NO_REGS cannot be used as an
864 equivalent for anything. */
865 while (lastr < FIRST_PSEUDO_REGISTER && reg_prev_eqv[lastr] >= 0
866 && (REGNO_REG_CLASS (lastr) == NO_REGS || ! FIXED_REGNO_P (lastr))
867 && new >= FIRST_PSEUDO_REGISTER)
868 lastr = reg_prev_eqv[lastr];
869 reg_next_eqv[new] = reg_next_eqv[lastr];
870 if (reg_next_eqv[lastr] >= 0)
871 reg_prev_eqv[reg_next_eqv[lastr]] = new;
872 else
873 qty_last_reg[q] = new;
874 reg_next_eqv[lastr] = new;
875 reg_prev_eqv[new] = lastr;
876 }
877 }
878
879 /* Remove REG from its equivalence class. */
880
881 static void
882 delete_reg_equiv (reg)
883 register int reg;
884 {
885 register int q = reg_qty[reg];
886 register int p, n;
887
888 /* If invalid, do nothing. */
889 if (q == reg)
890 return;
891
892 p = reg_prev_eqv[reg];
893 n = reg_next_eqv[reg];
894
895 if (n != -1)
896 reg_prev_eqv[n] = p;
897 else
898 qty_last_reg[q] = p;
899 if (p != -1)
900 reg_next_eqv[p] = n;
901 else
902 qty_first_reg[q] = n;
903
904 reg_qty[reg] = reg;
905 }
906
907 /* Remove any invalid expressions from the hash table
908 that refer to any of the registers contained in expression X.
909
910 Make sure that newly inserted references to those registers
911 as subexpressions will be considered valid.
912
913 mention_regs is not called when a register itself
914 is being stored in the table.
915
916 Return 1 if we have done something that may have changed the hash code
917 of X. */
918
919 static int
920 mention_regs (x)
921 rtx x;
922 {
923 register enum rtx_code code;
924 register int i, j;
925 register char *fmt;
926 register int changed = 0;
927
928 if (x == 0)
929 return 0;
930
931 code = GET_CODE (x);
932 if (code == REG)
933 {
934 register int regno = REGNO (x);
935 register int endregno
936 = regno + (regno >= FIRST_PSEUDO_REGISTER ? 1
937 : HARD_REGNO_NREGS (regno, GET_MODE (x)));
938 int i;
939
940 for (i = regno; i < endregno; i++)
941 {
942 if (reg_in_table[i] >= 0 && reg_in_table[i] != reg_tick[i])
943 remove_invalid_refs (i);
944
945 reg_in_table[i] = reg_tick[i];
946 }
947
948 return 0;
949 }
950
951 /* If X is a comparison or a COMPARE and either operand is a register
952 that does not have a quantity, give it one. This is so that a later
953 call to record_jump_equiv won't cause X to be assigned a different
954 hash code and not found in the table after that call.
955
956 It is not necessary to do this here, since rehash_using_reg can
957 fix up the table later, but doing this here eliminates the need to
958 call that expensive function in the most common case where the only
959 use of the register is in the comparison. */
960
961 if (code == COMPARE || GET_RTX_CLASS (code) == '<')
962 {
963 if (GET_CODE (XEXP (x, 0)) == REG
964 && ! REGNO_QTY_VALID_P (REGNO (XEXP (x, 0))))
965 if (insert_regs (XEXP (x, 0), NULL_PTR, 0))
966 {
967 rehash_using_reg (XEXP (x, 0));
968 changed = 1;
969 }
970
971 if (GET_CODE (XEXP (x, 1)) == REG
972 && ! REGNO_QTY_VALID_P (REGNO (XEXP (x, 1))))
973 if (insert_regs (XEXP (x, 1), NULL_PTR, 0))
974 {
975 rehash_using_reg (XEXP (x, 1));
976 changed = 1;
977 }
978 }
979
980 fmt = GET_RTX_FORMAT (code);
981 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
982 if (fmt[i] == 'e')
983 changed |= mention_regs (XEXP (x, i));
984 else if (fmt[i] == 'E')
985 for (j = 0; j < XVECLEN (x, i); j++)
986 changed |= mention_regs (XVECEXP (x, i, j));
987
988 return changed;
989 }
990
991 /* Update the register quantities for inserting X into the hash table
992 with a value equivalent to CLASSP.
993 (If the class does not contain a REG, it is irrelevant.)
994 If MODIFIED is nonzero, X is a destination; it is being modified.
995 Note that delete_reg_equiv should be called on a register
996 before insert_regs is done on that register with MODIFIED != 0.
997
998 Nonzero value means that elements of reg_qty have changed
999 so X's hash code may be different. */
1000
1001 static int
1002 insert_regs (x, classp, modified)
1003 rtx x;
1004 struct table_elt *classp;
1005 int modified;
1006 {
1007 if (GET_CODE (x) == REG)
1008 {
1009 register int regno = REGNO (x);
1010
1011 /* If REGNO is in the equivalence table already but is of the
1012 wrong mode for that equivalence, don't do anything here. */
1013
1014 if (REGNO_QTY_VALID_P (regno)
1015 && qty_mode[reg_qty[regno]] != GET_MODE (x))
1016 return 0;
1017
1018 if (modified || ! REGNO_QTY_VALID_P (regno))
1019 {
1020 if (classp)
1021 for (classp = classp->first_same_value;
1022 classp != 0;
1023 classp = classp->next_same_value)
1024 if (GET_CODE (classp->exp) == REG
1025 && GET_MODE (classp->exp) == GET_MODE (x))
1026 {
1027 make_regs_eqv (regno, REGNO (classp->exp));
1028 return 1;
1029 }
1030
1031 make_new_qty (regno);
1032 qty_mode[reg_qty[regno]] = GET_MODE (x);
1033 return 1;
1034 }
1035
1036 return 0;
1037 }
1038
1039 /* If X is a SUBREG, we will likely be inserting the inner register in the
1040 table. If that register doesn't have an assigned quantity number at
1041 this point but does later, the insertion that we will be doing now will
1042 not be accessible because its hash code will have changed. So assign
1043 a quantity number now. */
1044
1045 else if (GET_CODE (x) == SUBREG && GET_CODE (SUBREG_REG (x)) == REG
1046 && ! REGNO_QTY_VALID_P (REGNO (SUBREG_REG (x))))
1047 {
1048 insert_regs (SUBREG_REG (x), NULL_PTR, 0);
1049 mention_regs (SUBREG_REG (x));
1050 return 1;
1051 }
1052 else
1053 return mention_regs (x);
1054 }
1055 \f
1056 /* Look in or update the hash table. */
1057
1058 /* Put the element ELT on the list of free elements. */
1059
1060 static void
1061 free_element (elt)
1062 struct table_elt *elt;
1063 {
1064 elt->next_same_hash = free_element_chain;
1065 free_element_chain = elt;
1066 }
1067
1068 /* Return an element that is free for use. */
1069
1070 static struct table_elt *
1071 get_element ()
1072 {
1073 struct table_elt *elt = free_element_chain;
1074 if (elt)
1075 {
1076 free_element_chain = elt->next_same_hash;
1077 return elt;
1078 }
1079 n_elements_made++;
1080 return (struct table_elt *) oballoc (sizeof (struct table_elt));
1081 }
1082
1083 /* Remove table element ELT from use in the table.
1084 HASH is its hash code, made using the HASH macro.
1085 It's an argument because often that is known in advance
1086 and we save much time not recomputing it. */
1087
1088 static void
1089 remove_from_table (elt, hash)
1090 register struct table_elt *elt;
1091 unsigned hash;
1092 {
1093 if (elt == 0)
1094 return;
1095
1096 /* Mark this element as removed. See cse_insn. */
1097 elt->first_same_value = 0;
1098
1099 /* Remove the table element from its equivalence class. */
1100
1101 {
1102 register struct table_elt *prev = elt->prev_same_value;
1103 register struct table_elt *next = elt->next_same_value;
1104
1105 if (next) next->prev_same_value = prev;
1106
1107 if (prev)
1108 prev->next_same_value = next;
1109 else
1110 {
1111 register struct table_elt *newfirst = next;
1112 while (next)
1113 {
1114 next->first_same_value = newfirst;
1115 next = next->next_same_value;
1116 }
1117 }
1118 }
1119
1120 /* Remove the table element from its hash bucket. */
1121
1122 {
1123 register struct table_elt *prev = elt->prev_same_hash;
1124 register struct table_elt *next = elt->next_same_hash;
1125
1126 if (next) next->prev_same_hash = prev;
1127
1128 if (prev)
1129 prev->next_same_hash = next;
1130 else if (table[hash] == elt)
1131 table[hash] = next;
1132 else
1133 {
1134 /* This entry is not in the proper hash bucket. This can happen
1135 when two classes were merged by `merge_equiv_classes'. Search
1136 for the hash bucket that it heads. This happens only very
1137 rarely, so the cost is acceptable. */
1138 for (hash = 0; hash < NBUCKETS; hash++)
1139 if (table[hash] == elt)
1140 table[hash] = next;
1141 }
1142 }
1143
1144 /* Remove the table element from its related-value circular chain. */
1145
1146 if (elt->related_value != 0 && elt->related_value != elt)
1147 {
1148 register struct table_elt *p = elt->related_value;
1149 while (p->related_value != elt)
1150 p = p->related_value;
1151 p->related_value = elt->related_value;
1152 if (p->related_value == p)
1153 p->related_value = 0;
1154 }
1155
1156 free_element (elt);
1157 }
1158
1159 /* Look up X in the hash table and return its table element,
1160 or 0 if X is not in the table.
1161
1162 MODE is the machine-mode of X, or if X is an integer constant
1163 with VOIDmode then MODE is the mode with which X will be used.
1164
1165 Here we are satisfied to find an expression whose tree structure
1166 looks like X. */
1167
1168 static struct table_elt *
1169 lookup (x, hash, mode)
1170 rtx x;
1171 unsigned hash;
1172 enum machine_mode mode;
1173 {
1174 register struct table_elt *p;
1175
1176 for (p = table[hash]; p; p = p->next_same_hash)
1177 if (mode == p->mode && ((x == p->exp && GET_CODE (x) == REG)
1178 || exp_equiv_p (x, p->exp, GET_CODE (x) != REG, 0)))
1179 return p;
1180
1181 return 0;
1182 }
1183
1184 /* Like `lookup' but don't care whether the table element uses invalid regs.
1185 Also ignore discrepancies in the machine mode of a register. */
1186
1187 static struct table_elt *
1188 lookup_for_remove (x, hash, mode)
1189 rtx x;
1190 unsigned hash;
1191 enum machine_mode mode;
1192 {
1193 register struct table_elt *p;
1194
1195 if (GET_CODE (x) == REG)
1196 {
1197 int regno = REGNO (x);
1198 /* Don't check the machine mode when comparing registers;
1199 invalidating (REG:SI 0) also invalidates (REG:DF 0). */
1200 for (p = table[hash]; p; p = p->next_same_hash)
1201 if (GET_CODE (p->exp) == REG
1202 && REGNO (p->exp) == regno)
1203 return p;
1204 }
1205 else
1206 {
1207 for (p = table[hash]; p; p = p->next_same_hash)
1208 if (mode == p->mode && (x == p->exp || exp_equiv_p (x, p->exp, 0, 0)))
1209 return p;
1210 }
1211
1212 return 0;
1213 }
1214
1215 /* Look for an expression equivalent to X and with code CODE.
1216 If one is found, return that expression. */
1217
1218 static rtx
1219 lookup_as_function (x, code)
1220 rtx x;
1221 enum rtx_code code;
1222 {
1223 register struct table_elt *p = lookup (x, safe_hash (x, VOIDmode) % NBUCKETS,
1224 GET_MODE (x));
1225 if (p == 0)
1226 return 0;
1227
1228 for (p = p->first_same_value; p; p = p->next_same_value)
1229 {
1230 if (GET_CODE (p->exp) == code
1231 /* Make sure this is a valid entry in the table. */
1232 && exp_equiv_p (p->exp, p->exp, 1, 0))
1233 return p->exp;
1234 }
1235
1236 return 0;
1237 }
1238
1239 /* Insert X in the hash table, assuming HASH is its hash code
1240 and CLASSP is an element of the class it should go in
1241 (or 0 if a new class should be made).
1242 It is inserted at the proper position to keep the class in
1243 the order cheapest first.
1244
1245 MODE is the machine-mode of X, or if X is an integer constant
1246 with VOIDmode then MODE is the mode with which X will be used.
1247
1248 For elements of equal cheapness, the most recent one
1249 goes in front, except that the first element in the list
1250 remains first unless a cheaper element is added. The order of
1251 pseudo-registers does not matter, as canon_reg will be called to
1252 find the cheapest when a register is retrieved from the table.
1253
1254 The in_memory field in the hash table element is set to 0.
1255 The caller must set it nonzero if appropriate.
1256
1257 You should call insert_regs (X, CLASSP, MODIFY) before calling here,
1258 and if insert_regs returns a nonzero value
1259 you must then recompute its hash code before calling here.
1260
1261 If necessary, update table showing constant values of quantities. */
1262
1263 #define CHEAPER(X,Y) ((X)->cost < (Y)->cost)
1264
1265 static struct table_elt *
1266 insert (x, classp, hash, mode)
1267 register rtx x;
1268 register struct table_elt *classp;
1269 unsigned hash;
1270 enum machine_mode mode;
1271 {
1272 register struct table_elt *elt;
1273
1274 /* If X is a register and we haven't made a quantity for it,
1275 something is wrong. */
1276 if (GET_CODE (x) == REG && ! REGNO_QTY_VALID_P (REGNO (x)))
1277 abort ();
1278
1279 /* If X is a hard register, show it is being put in the table. */
1280 if (GET_CODE (x) == REG && REGNO (x) < FIRST_PSEUDO_REGISTER)
1281 {
1282 int regno = REGNO (x);
1283 int endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (x));
1284 int i;
1285
1286 for (i = regno; i < endregno; i++)
1287 SET_HARD_REG_BIT (hard_regs_in_table, i);
1288 }
1289
1290 /* If X is a label, show we are altering jumps. We don't KNOW
1291 we are, but we might be putting it into a insn which would
1292 then need a new REG_LABEL note. Be conservative and say
1293 we alter jumps here; we usually will in this case anyway. */
1294 if (GET_CODE (x) == LABEL_REF
1295 || (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
1296 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF))
1297 cse_jumps_altered = 1;
1298
1299 /* Put an element for X into the right hash bucket. */
1300
1301 elt = get_element ();
1302 elt->exp = x;
1303 elt->cost = COST (x);
1304 elt->next_same_value = 0;
1305 elt->prev_same_value = 0;
1306 elt->next_same_hash = table[hash];
1307 elt->prev_same_hash = 0;
1308 elt->related_value = 0;
1309 elt->in_memory = 0;
1310 elt->mode = mode;
1311 elt->is_const = (CONSTANT_P (x)
1312 /* GNU C++ takes advantage of this for `this'
1313 (and other const values). */
1314 || (RTX_UNCHANGING_P (x)
1315 && GET_CODE (x) == REG
1316 && REGNO (x) >= FIRST_PSEUDO_REGISTER)
1317 || FIXED_BASE_PLUS_P (x));
1318
1319 if (table[hash])
1320 table[hash]->prev_same_hash = elt;
1321 table[hash] = elt;
1322
1323 /* Put it into the proper value-class. */
1324 if (classp)
1325 {
1326 classp = classp->first_same_value;
1327 if (CHEAPER (elt, classp))
1328 /* Insert at the head of the class */
1329 {
1330 register struct table_elt *p;
1331 elt->next_same_value = classp;
1332 classp->prev_same_value = elt;
1333 elt->first_same_value = elt;
1334
1335 for (p = classp; p; p = p->next_same_value)
1336 p->first_same_value = elt;
1337 }
1338 else
1339 {
1340 /* Insert not at head of the class. */
1341 /* Put it after the last element cheaper than X. */
1342 register struct table_elt *p, *next;
1343 for (p = classp; (next = p->next_same_value) && CHEAPER (next, elt);
1344 p = next);
1345 /* Put it after P and before NEXT. */
1346 elt->next_same_value = next;
1347 if (next)
1348 next->prev_same_value = elt;
1349 elt->prev_same_value = p;
1350 p->next_same_value = elt;
1351 elt->first_same_value = classp;
1352 }
1353 }
1354 else
1355 elt->first_same_value = elt;
1356
1357 /* If this is a constant being set equivalent to a register or a register
1358 being set equivalent to a constant, note the constant equivalence.
1359
1360 If this is a constant, it cannot be equivalent to a different constant,
1361 and a constant is the only thing that can be cheaper than a register. So
1362 we know the register is the head of the class (before the constant was
1363 inserted).
1364
1365 If this is a register that is not already known equivalent to a
1366 constant, we must check the entire class.
1367
1368 If this is a register that is already known equivalent to an insn,
1369 update `qty_const_insn' to show that `this_insn' is the latest
1370 insn making that quantity equivalent to the constant. */
1371
1372 if (elt->is_const && classp && GET_CODE (classp->exp) == REG)
1373 {
1374 qty_const[reg_qty[REGNO (classp->exp)]]
1375 = gen_lowpart_if_possible (qty_mode[reg_qty[REGNO (classp->exp)]], x);
1376 qty_const_insn[reg_qty[REGNO (classp->exp)]] = this_insn;
1377 }
1378
1379 else if (GET_CODE (x) == REG && classp && ! qty_const[reg_qty[REGNO (x)]])
1380 {
1381 register struct table_elt *p;
1382
1383 for (p = classp; p != 0; p = p->next_same_value)
1384 {
1385 if (p->is_const)
1386 {
1387 qty_const[reg_qty[REGNO (x)]]
1388 = gen_lowpart_if_possible (GET_MODE (x), p->exp);
1389 qty_const_insn[reg_qty[REGNO (x)]] = this_insn;
1390 break;
1391 }
1392 }
1393 }
1394
1395 else if (GET_CODE (x) == REG && qty_const[reg_qty[REGNO (x)]]
1396 && GET_MODE (x) == qty_mode[reg_qty[REGNO (x)]])
1397 qty_const_insn[reg_qty[REGNO (x)]] = this_insn;
1398
1399 /* If this is a constant with symbolic value,
1400 and it has a term with an explicit integer value,
1401 link it up with related expressions. */
1402 if (GET_CODE (x) == CONST)
1403 {
1404 rtx subexp = get_related_value (x);
1405 unsigned subhash;
1406 struct table_elt *subelt, *subelt_prev;
1407
1408 if (subexp != 0)
1409 {
1410 /* Get the integer-free subexpression in the hash table. */
1411 subhash = safe_hash (subexp, mode) % NBUCKETS;
1412 subelt = lookup (subexp, subhash, mode);
1413 if (subelt == 0)
1414 subelt = insert (subexp, NULL_PTR, subhash, mode);
1415 /* Initialize SUBELT's circular chain if it has none. */
1416 if (subelt->related_value == 0)
1417 subelt->related_value = subelt;
1418 /* Find the element in the circular chain that precedes SUBELT. */
1419 subelt_prev = subelt;
1420 while (subelt_prev->related_value != subelt)
1421 subelt_prev = subelt_prev->related_value;
1422 /* Put new ELT into SUBELT's circular chain just before SUBELT.
1423 This way the element that follows SUBELT is the oldest one. */
1424 elt->related_value = subelt_prev->related_value;
1425 subelt_prev->related_value = elt;
1426 }
1427 }
1428
1429 return elt;
1430 }
1431 \f
1432 /* Given two equivalence classes, CLASS1 and CLASS2, put all the entries from
1433 CLASS2 into CLASS1. This is done when we have reached an insn which makes
1434 the two classes equivalent.
1435
1436 CLASS1 will be the surviving class; CLASS2 should not be used after this
1437 call.
1438
1439 Any invalid entries in CLASS2 will not be copied. */
1440
1441 static void
1442 merge_equiv_classes (class1, class2)
1443 struct table_elt *class1, *class2;
1444 {
1445 struct table_elt *elt, *next, *new;
1446
1447 /* Ensure we start with the head of the classes. */
1448 class1 = class1->first_same_value;
1449 class2 = class2->first_same_value;
1450
1451 /* If they were already equal, forget it. */
1452 if (class1 == class2)
1453 return;
1454
1455 for (elt = class2; elt; elt = next)
1456 {
1457 unsigned hash;
1458 rtx exp = elt->exp;
1459 enum machine_mode mode = elt->mode;
1460
1461 next = elt->next_same_value;
1462
1463 /* Remove old entry, make a new one in CLASS1's class.
1464 Don't do this for invalid entries as we cannot find their
1465 hash code (it also isn't necessary). */
1466 if (GET_CODE (exp) == REG || exp_equiv_p (exp, exp, 1, 0))
1467 {
1468 hash_arg_in_memory = 0;
1469 hash_arg_in_struct = 0;
1470 hash = HASH (exp, mode);
1471
1472 if (GET_CODE (exp) == REG)
1473 delete_reg_equiv (REGNO (exp));
1474
1475 remove_from_table (elt, hash);
1476
1477 if (insert_regs (exp, class1, 0))
1478 {
1479 rehash_using_reg (exp);
1480 hash = HASH (exp, mode);
1481 }
1482 new = insert (exp, class1, hash, mode);
1483 new->in_memory = hash_arg_in_memory;
1484 new->in_struct = hash_arg_in_struct;
1485 }
1486 }
1487 }
1488 \f
1489 /* Remove from the hash table, or mark as invalid,
1490 all expressions whose values could be altered by storing in X.
1491 X is a register, a subreg, or a memory reference with nonvarying address
1492 (because, when a memory reference with a varying address is stored in,
1493 all memory references are removed by invalidate_memory
1494 so specific invalidation is superfluous).
1495 FULL_MODE, if not VOIDmode, indicates that this much should be invalidated
1496 instead of just the amount indicated by the mode of X. This is only used
1497 for bitfield stores into memory.
1498
1499 A nonvarying address may be just a register or just
1500 a symbol reference, or it may be either of those plus
1501 a numeric offset. */
1502
1503 static void
1504 invalidate (x, full_mode)
1505 rtx x;
1506 enum machine_mode full_mode;
1507 {
1508 register int i;
1509 register struct table_elt *p;
1510 rtx base;
1511 HOST_WIDE_INT start, end;
1512
1513 /* If X is a register, dependencies on its contents
1514 are recorded through the qty number mechanism.
1515 Just change the qty number of the register,
1516 mark it as invalid for expressions that refer to it,
1517 and remove it itself. */
1518
1519 if (GET_CODE (x) == REG)
1520 {
1521 register int regno = REGNO (x);
1522 register unsigned hash = HASH (x, GET_MODE (x));
1523
1524 /* Remove REGNO from any quantity list it might be on and indicate
1525 that it's value might have changed. If it is a pseudo, remove its
1526 entry from the hash table.
1527
1528 For a hard register, we do the first two actions above for any
1529 additional hard registers corresponding to X. Then, if any of these
1530 registers are in the table, we must remove any REG entries that
1531 overlap these registers. */
1532
1533 delete_reg_equiv (regno);
1534 reg_tick[regno]++;
1535
1536 if (regno >= FIRST_PSEUDO_REGISTER)
1537 remove_from_table (lookup_for_remove (x, hash, GET_MODE (x)), hash);
1538 else
1539 {
1540 HOST_WIDE_INT in_table
1541 = TEST_HARD_REG_BIT (hard_regs_in_table, regno);
1542 int endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (x));
1543 int tregno, tendregno;
1544 register struct table_elt *p, *next;
1545
1546 CLEAR_HARD_REG_BIT (hard_regs_in_table, regno);
1547
1548 for (i = regno + 1; i < endregno; i++)
1549 {
1550 in_table |= TEST_HARD_REG_BIT (hard_regs_in_table, i);
1551 CLEAR_HARD_REG_BIT (hard_regs_in_table, i);
1552 delete_reg_equiv (i);
1553 reg_tick[i]++;
1554 }
1555
1556 if (in_table)
1557 for (hash = 0; hash < NBUCKETS; hash++)
1558 for (p = table[hash]; p; p = next)
1559 {
1560 next = p->next_same_hash;
1561
1562 if (GET_CODE (p->exp) != REG
1563 || REGNO (p->exp) >= FIRST_PSEUDO_REGISTER)
1564 continue;
1565
1566 tregno = REGNO (p->exp);
1567 tendregno
1568 = tregno + HARD_REGNO_NREGS (tregno, GET_MODE (p->exp));
1569 if (tendregno > regno && tregno < endregno)
1570 remove_from_table (p, hash);
1571 }
1572 }
1573
1574 return;
1575 }
1576
1577 if (GET_CODE (x) == SUBREG)
1578 {
1579 if (GET_CODE (SUBREG_REG (x)) != REG)
1580 abort ();
1581 invalidate (SUBREG_REG (x), VOIDmode);
1582 return;
1583 }
1584
1585 /* X is not a register; it must be a memory reference with
1586 a nonvarying address. Remove all hash table elements
1587 that refer to overlapping pieces of memory. */
1588
1589 if (GET_CODE (x) != MEM)
1590 abort ();
1591
1592 if (full_mode == VOIDmode)
1593 full_mode = GET_MODE (x);
1594
1595 set_nonvarying_address_components (XEXP (x, 0), GET_MODE_SIZE (full_mode),
1596 &base, &start, &end);
1597
1598 for (i = 0; i < NBUCKETS; i++)
1599 {
1600 register struct table_elt *next;
1601 for (p = table[i]; p; p = next)
1602 {
1603 next = p->next_same_hash;
1604 if (refers_to_mem_p (p->exp, base, start, end))
1605 remove_from_table (p, i);
1606 }
1607 }
1608 }
1609
1610 /* Remove all expressions that refer to register REGNO,
1611 since they are already invalid, and we are about to
1612 mark that register valid again and don't want the old
1613 expressions to reappear as valid. */
1614
1615 static void
1616 remove_invalid_refs (regno)
1617 int regno;
1618 {
1619 register int i;
1620 register struct table_elt *p, *next;
1621
1622 for (i = 0; i < NBUCKETS; i++)
1623 for (p = table[i]; p; p = next)
1624 {
1625 next = p->next_same_hash;
1626 if (GET_CODE (p->exp) != REG
1627 && refers_to_regno_p (regno, regno + 1, p->exp, NULL_PTR))
1628 remove_from_table (p, i);
1629 }
1630 }
1631 \f
1632 /* Recompute the hash codes of any valid entries in the hash table that
1633 reference X, if X is a register, or SUBREG_REG (X) if X is a SUBREG.
1634
1635 This is called when we make a jump equivalence. */
1636
1637 static void
1638 rehash_using_reg (x)
1639 rtx x;
1640 {
1641 int i;
1642 struct table_elt *p, *next;
1643 unsigned hash;
1644
1645 if (GET_CODE (x) == SUBREG)
1646 x = SUBREG_REG (x);
1647
1648 /* If X is not a register or if the register is known not to be in any
1649 valid entries in the table, we have no work to do. */
1650
1651 if (GET_CODE (x) != REG
1652 || reg_in_table[REGNO (x)] < 0
1653 || reg_in_table[REGNO (x)] != reg_tick[REGNO (x)])
1654 return;
1655
1656 /* Scan all hash chains looking for valid entries that mention X.
1657 If we find one and it is in the wrong hash chain, move it. We can skip
1658 objects that are registers, since they are handled specially. */
1659
1660 for (i = 0; i < NBUCKETS; i++)
1661 for (p = table[i]; p; p = next)
1662 {
1663 next = p->next_same_hash;
1664 if (GET_CODE (p->exp) != REG && reg_mentioned_p (x, p->exp)
1665 && exp_equiv_p (p->exp, p->exp, 1, 0)
1666 && i != (hash = safe_hash (p->exp, p->mode) % NBUCKETS))
1667 {
1668 if (p->next_same_hash)
1669 p->next_same_hash->prev_same_hash = p->prev_same_hash;
1670
1671 if (p->prev_same_hash)
1672 p->prev_same_hash->next_same_hash = p->next_same_hash;
1673 else
1674 table[i] = p->next_same_hash;
1675
1676 p->next_same_hash = table[hash];
1677 p->prev_same_hash = 0;
1678 if (table[hash])
1679 table[hash]->prev_same_hash = p;
1680 table[hash] = p;
1681 }
1682 }
1683 }
1684 \f
1685 /* Remove from the hash table all expressions that reference memory,
1686 or some of them as specified by *WRITES. */
1687
1688 static void
1689 invalidate_memory (writes)
1690 struct write_data *writes;
1691 {
1692 register int i;
1693 register struct table_elt *p, *next;
1694 int all = writes->all;
1695 int nonscalar = writes->nonscalar;
1696
1697 for (i = 0; i < NBUCKETS; i++)
1698 for (p = table[i]; p; p = next)
1699 {
1700 next = p->next_same_hash;
1701 if (p->in_memory
1702 && (all
1703 || (nonscalar && p->in_struct)
1704 || cse_rtx_addr_varies_p (p->exp)))
1705 remove_from_table (p, i);
1706 }
1707 }
1708 \f
1709 /* Remove from the hash table any expression that is a call-clobbered
1710 register. Also update their TICK values. */
1711
1712 static void
1713 invalidate_for_call ()
1714 {
1715 int regno, endregno;
1716 int i;
1717 unsigned hash;
1718 struct table_elt *p, *next;
1719 int in_table = 0;
1720
1721 /* Go through all the hard registers. For each that is clobbered in
1722 a CALL_INSN, remove the register from quantity chains and update
1723 reg_tick if defined. Also see if any of these registers is currently
1724 in the table. */
1725
1726 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
1727 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
1728 {
1729 delete_reg_equiv (regno);
1730 if (reg_tick[regno] >= 0)
1731 reg_tick[regno]++;
1732
1733 in_table |= (TEST_HARD_REG_BIT (hard_regs_in_table, regno) != 0);
1734 }
1735
1736 /* In the case where we have no call-clobbered hard registers in the
1737 table, we are done. Otherwise, scan the table and remove any
1738 entry that overlaps a call-clobbered register. */
1739
1740 if (in_table)
1741 for (hash = 0; hash < NBUCKETS; hash++)
1742 for (p = table[hash]; p; p = next)
1743 {
1744 next = p->next_same_hash;
1745
1746 if (GET_CODE (p->exp) != REG
1747 || REGNO (p->exp) >= FIRST_PSEUDO_REGISTER)
1748 continue;
1749
1750 regno = REGNO (p->exp);
1751 endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (p->exp));
1752
1753 for (i = regno; i < endregno; i++)
1754 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
1755 {
1756 remove_from_table (p, hash);
1757 break;
1758 }
1759 }
1760 }
1761 \f
1762 /* Given an expression X of type CONST,
1763 and ELT which is its table entry (or 0 if it
1764 is not in the hash table),
1765 return an alternate expression for X as a register plus integer.
1766 If none can be found, return 0. */
1767
1768 static rtx
1769 use_related_value (x, elt)
1770 rtx x;
1771 struct table_elt *elt;
1772 {
1773 register struct table_elt *relt = 0;
1774 register struct table_elt *p, *q;
1775 HOST_WIDE_INT offset;
1776
1777 /* First, is there anything related known?
1778 If we have a table element, we can tell from that.
1779 Otherwise, must look it up. */
1780
1781 if (elt != 0 && elt->related_value != 0)
1782 relt = elt;
1783 else if (elt == 0 && GET_CODE (x) == CONST)
1784 {
1785 rtx subexp = get_related_value (x);
1786 if (subexp != 0)
1787 relt = lookup (subexp,
1788 safe_hash (subexp, GET_MODE (subexp)) % NBUCKETS,
1789 GET_MODE (subexp));
1790 }
1791
1792 if (relt == 0)
1793 return 0;
1794
1795 /* Search all related table entries for one that has an
1796 equivalent register. */
1797
1798 p = relt;
1799 while (1)
1800 {
1801 /* This loop is strange in that it is executed in two different cases.
1802 The first is when X is already in the table. Then it is searching
1803 the RELATED_VALUE list of X's class (RELT). The second case is when
1804 X is not in the table. Then RELT points to a class for the related
1805 value.
1806
1807 Ensure that, whatever case we are in, that we ignore classes that have
1808 the same value as X. */
1809
1810 if (rtx_equal_p (x, p->exp))
1811 q = 0;
1812 else
1813 for (q = p->first_same_value; q; q = q->next_same_value)
1814 if (GET_CODE (q->exp) == REG)
1815 break;
1816
1817 if (q)
1818 break;
1819
1820 p = p->related_value;
1821
1822 /* We went all the way around, so there is nothing to be found.
1823 Alternatively, perhaps RELT was in the table for some other reason
1824 and it has no related values recorded. */
1825 if (p == relt || p == 0)
1826 break;
1827 }
1828
1829 if (q == 0)
1830 return 0;
1831
1832 offset = (get_integer_term (x) - get_integer_term (p->exp));
1833 /* Note: OFFSET may be 0 if P->xexp and X are related by commutativity. */
1834 return plus_constant (q->exp, offset);
1835 }
1836 \f
1837 /* Hash an rtx. We are careful to make sure the value is never negative.
1838 Equivalent registers hash identically.
1839 MODE is used in hashing for CONST_INTs only;
1840 otherwise the mode of X is used.
1841
1842 Store 1 in do_not_record if any subexpression is volatile.
1843
1844 Store 1 in hash_arg_in_memory if X contains a MEM rtx
1845 which does not have the RTX_UNCHANGING_P bit set.
1846 In this case, also store 1 in hash_arg_in_struct
1847 if there is a MEM rtx which has the MEM_IN_STRUCT_P bit set.
1848
1849 Note that cse_insn knows that the hash code of a MEM expression
1850 is just (int) MEM plus the hash code of the address. */
1851
1852 static unsigned
1853 canon_hash (x, mode)
1854 rtx x;
1855 enum machine_mode mode;
1856 {
1857 register int i, j;
1858 register unsigned hash = 0;
1859 register enum rtx_code code;
1860 register char *fmt;
1861
1862 /* repeat is used to turn tail-recursion into iteration. */
1863 repeat:
1864 if (x == 0)
1865 return hash;
1866
1867 code = GET_CODE (x);
1868 switch (code)
1869 {
1870 case REG:
1871 {
1872 register int regno = REGNO (x);
1873
1874 /* On some machines, we can't record any non-fixed hard register,
1875 because extending its life will cause reload problems. We
1876 consider ap, fp, and sp to be fixed for this purpose.
1877 On all machines, we can't record any global registers. */
1878
1879 if (regno < FIRST_PSEUDO_REGISTER
1880 && (global_regs[regno]
1881 #ifdef SMALL_REGISTER_CLASSES
1882 || (! fixed_regs[regno]
1883 && regno != FRAME_POINTER_REGNUM
1884 && regno != HARD_FRAME_POINTER_REGNUM
1885 && regno != ARG_POINTER_REGNUM
1886 && regno != STACK_POINTER_REGNUM)
1887 #endif
1888 ))
1889 {
1890 do_not_record = 1;
1891 return 0;
1892 }
1893 hash += ((unsigned) REG << 7) + (unsigned) reg_qty[regno];
1894 return hash;
1895 }
1896
1897 case CONST_INT:
1898 {
1899 unsigned HOST_WIDE_INT tem = INTVAL (x);
1900 hash += ((unsigned) CONST_INT << 7) + (unsigned) mode + tem;
1901 return hash;
1902 }
1903
1904 case CONST_DOUBLE:
1905 /* This is like the general case, except that it only counts
1906 the integers representing the constant. */
1907 hash += (unsigned) code + (unsigned) GET_MODE (x);
1908 if (GET_MODE (x) != VOIDmode)
1909 for (i = 2; i < GET_RTX_LENGTH (CONST_DOUBLE); i++)
1910 {
1911 unsigned tem = XINT (x, i);
1912 hash += tem;
1913 }
1914 else
1915 hash += ((unsigned) CONST_DOUBLE_LOW (x)
1916 + (unsigned) CONST_DOUBLE_HIGH (x));
1917 return hash;
1918
1919 /* Assume there is only one rtx object for any given label. */
1920 case LABEL_REF:
1921 hash
1922 += ((unsigned) LABEL_REF << 7) + (unsigned HOST_WIDE_INT) XEXP (x, 0);
1923 return hash;
1924
1925 case SYMBOL_REF:
1926 hash
1927 += ((unsigned) SYMBOL_REF << 7) + (unsigned HOST_WIDE_INT) XSTR (x, 0);
1928 return hash;
1929
1930 case MEM:
1931 if (MEM_VOLATILE_P (x))
1932 {
1933 do_not_record = 1;
1934 return 0;
1935 }
1936 if (! RTX_UNCHANGING_P (x))
1937 {
1938 hash_arg_in_memory = 1;
1939 if (MEM_IN_STRUCT_P (x)) hash_arg_in_struct = 1;
1940 }
1941 /* Now that we have already found this special case,
1942 might as well speed it up as much as possible. */
1943 hash += (unsigned) MEM;
1944 x = XEXP (x, 0);
1945 goto repeat;
1946
1947 case PRE_DEC:
1948 case PRE_INC:
1949 case POST_DEC:
1950 case POST_INC:
1951 case PC:
1952 case CC0:
1953 case CALL:
1954 case UNSPEC_VOLATILE:
1955 do_not_record = 1;
1956 return 0;
1957
1958 case ASM_OPERANDS:
1959 if (MEM_VOLATILE_P (x))
1960 {
1961 do_not_record = 1;
1962 return 0;
1963 }
1964 }
1965
1966 i = GET_RTX_LENGTH (code) - 1;
1967 hash += (unsigned) code + (unsigned) GET_MODE (x);
1968 fmt = GET_RTX_FORMAT (code);
1969 for (; i >= 0; i--)
1970 {
1971 if (fmt[i] == 'e')
1972 {
1973 rtx tem = XEXP (x, i);
1974
1975 /* If we are about to do the last recursive call
1976 needed at this level, change it into iteration.
1977 This function is called enough to be worth it. */
1978 if (i == 0)
1979 {
1980 x = tem;
1981 goto repeat;
1982 }
1983 hash += canon_hash (tem, 0);
1984 }
1985 else if (fmt[i] == 'E')
1986 for (j = 0; j < XVECLEN (x, i); j++)
1987 hash += canon_hash (XVECEXP (x, i, j), 0);
1988 else if (fmt[i] == 's')
1989 {
1990 register unsigned char *p = (unsigned char *) XSTR (x, i);
1991 if (p)
1992 while (*p)
1993 hash += *p++;
1994 }
1995 else if (fmt[i] == 'i')
1996 {
1997 register unsigned tem = XINT (x, i);
1998 hash += tem;
1999 }
2000 else
2001 abort ();
2002 }
2003 return hash;
2004 }
2005
2006 /* Like canon_hash but with no side effects. */
2007
2008 static unsigned
2009 safe_hash (x, mode)
2010 rtx x;
2011 enum machine_mode mode;
2012 {
2013 int save_do_not_record = do_not_record;
2014 int save_hash_arg_in_memory = hash_arg_in_memory;
2015 int save_hash_arg_in_struct = hash_arg_in_struct;
2016 unsigned hash = canon_hash (x, mode);
2017 hash_arg_in_memory = save_hash_arg_in_memory;
2018 hash_arg_in_struct = save_hash_arg_in_struct;
2019 do_not_record = save_do_not_record;
2020 return hash;
2021 }
2022 \f
2023 /* Return 1 iff X and Y would canonicalize into the same thing,
2024 without actually constructing the canonicalization of either one.
2025 If VALIDATE is nonzero,
2026 we assume X is an expression being processed from the rtl
2027 and Y was found in the hash table. We check register refs
2028 in Y for being marked as valid.
2029
2030 If EQUAL_VALUES is nonzero, we allow a register to match a constant value
2031 that is known to be in the register. Ordinarily, we don't allow them
2032 to match, because letting them match would cause unpredictable results
2033 in all the places that search a hash table chain for an equivalent
2034 for a given value. A possible equivalent that has different structure
2035 has its hash code computed from different data. Whether the hash code
2036 is the same as that of the the given value is pure luck. */
2037
2038 static int
2039 exp_equiv_p (x, y, validate, equal_values)
2040 rtx x, y;
2041 int validate;
2042 int equal_values;
2043 {
2044 register int i, j;
2045 register enum rtx_code code;
2046 register char *fmt;
2047
2048 /* Note: it is incorrect to assume an expression is equivalent to itself
2049 if VALIDATE is nonzero. */
2050 if (x == y && !validate)
2051 return 1;
2052 if (x == 0 || y == 0)
2053 return x == y;
2054
2055 code = GET_CODE (x);
2056 if (code != GET_CODE (y))
2057 {
2058 if (!equal_values)
2059 return 0;
2060
2061 /* If X is a constant and Y is a register or vice versa, they may be
2062 equivalent. We only have to validate if Y is a register. */
2063 if (CONSTANT_P (x) && GET_CODE (y) == REG
2064 && REGNO_QTY_VALID_P (REGNO (y))
2065 && GET_MODE (y) == qty_mode[reg_qty[REGNO (y)]]
2066 && rtx_equal_p (x, qty_const[reg_qty[REGNO (y)]])
2067 && (! validate || reg_in_table[REGNO (y)] == reg_tick[REGNO (y)]))
2068 return 1;
2069
2070 if (CONSTANT_P (y) && code == REG
2071 && REGNO_QTY_VALID_P (REGNO (x))
2072 && GET_MODE (x) == qty_mode[reg_qty[REGNO (x)]]
2073 && rtx_equal_p (y, qty_const[reg_qty[REGNO (x)]]))
2074 return 1;
2075
2076 return 0;
2077 }
2078
2079 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent. */
2080 if (GET_MODE (x) != GET_MODE (y))
2081 return 0;
2082
2083 switch (code)
2084 {
2085 case PC:
2086 case CC0:
2087 return x == y;
2088
2089 case CONST_INT:
2090 return INTVAL (x) == INTVAL (y);
2091
2092 case LABEL_REF:
2093 return XEXP (x, 0) == XEXP (y, 0);
2094
2095 case SYMBOL_REF:
2096 return XSTR (x, 0) == XSTR (y, 0);
2097
2098 case REG:
2099 {
2100 int regno = REGNO (y);
2101 int endregno
2102 = regno + (regno >= FIRST_PSEUDO_REGISTER ? 1
2103 : HARD_REGNO_NREGS (regno, GET_MODE (y)));
2104 int i;
2105
2106 /* If the quantities are not the same, the expressions are not
2107 equivalent. If there are and we are not to validate, they
2108 are equivalent. Otherwise, ensure all regs are up-to-date. */
2109
2110 if (reg_qty[REGNO (x)] != reg_qty[regno])
2111 return 0;
2112
2113 if (! validate)
2114 return 1;
2115
2116 for (i = regno; i < endregno; i++)
2117 if (reg_in_table[i] != reg_tick[i])
2118 return 0;
2119
2120 return 1;
2121 }
2122
2123 /* For commutative operations, check both orders. */
2124 case PLUS:
2125 case MULT:
2126 case AND:
2127 case IOR:
2128 case XOR:
2129 case NE:
2130 case EQ:
2131 return ((exp_equiv_p (XEXP (x, 0), XEXP (y, 0), validate, equal_values)
2132 && exp_equiv_p (XEXP (x, 1), XEXP (y, 1),
2133 validate, equal_values))
2134 || (exp_equiv_p (XEXP (x, 0), XEXP (y, 1),
2135 validate, equal_values)
2136 && exp_equiv_p (XEXP (x, 1), XEXP (y, 0),
2137 validate, equal_values)));
2138 }
2139
2140 /* Compare the elements. If any pair of corresponding elements
2141 fail to match, return 0 for the whole things. */
2142
2143 fmt = GET_RTX_FORMAT (code);
2144 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2145 {
2146 switch (fmt[i])
2147 {
2148 case 'e':
2149 if (! exp_equiv_p (XEXP (x, i), XEXP (y, i), validate, equal_values))
2150 return 0;
2151 break;
2152
2153 case 'E':
2154 if (XVECLEN (x, i) != XVECLEN (y, i))
2155 return 0;
2156 for (j = 0; j < XVECLEN (x, i); j++)
2157 if (! exp_equiv_p (XVECEXP (x, i, j), XVECEXP (y, i, j),
2158 validate, equal_values))
2159 return 0;
2160 break;
2161
2162 case 's':
2163 if (strcmp (XSTR (x, i), XSTR (y, i)))
2164 return 0;
2165 break;
2166
2167 case 'i':
2168 if (XINT (x, i) != XINT (y, i))
2169 return 0;
2170 break;
2171
2172 case 'w':
2173 if (XWINT (x, i) != XWINT (y, i))
2174 return 0;
2175 break;
2176
2177 case '0':
2178 break;
2179
2180 default:
2181 abort ();
2182 }
2183 }
2184
2185 return 1;
2186 }
2187 \f
2188 /* Return 1 iff any subexpression of X matches Y.
2189 Here we do not require that X or Y be valid (for registers referred to)
2190 for being in the hash table. */
2191
2192 static int
2193 refers_to_p (x, y)
2194 rtx x, y;
2195 {
2196 register int i;
2197 register enum rtx_code code;
2198 register char *fmt;
2199
2200 repeat:
2201 if (x == y)
2202 return 1;
2203 if (x == 0 || y == 0)
2204 return 0;
2205
2206 code = GET_CODE (x);
2207 /* If X as a whole has the same code as Y, they may match.
2208 If so, return 1. */
2209 if (code == GET_CODE (y))
2210 {
2211 if (exp_equiv_p (x, y, 0, 1))
2212 return 1;
2213 }
2214
2215 /* X does not match, so try its subexpressions. */
2216
2217 fmt = GET_RTX_FORMAT (code);
2218 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2219 if (fmt[i] == 'e')
2220 {
2221 if (i == 0)
2222 {
2223 x = XEXP (x, 0);
2224 goto repeat;
2225 }
2226 else
2227 if (refers_to_p (XEXP (x, i), y))
2228 return 1;
2229 }
2230 else if (fmt[i] == 'E')
2231 {
2232 int j;
2233 for (j = 0; j < XVECLEN (x, i); j++)
2234 if (refers_to_p (XVECEXP (x, i, j), y))
2235 return 1;
2236 }
2237
2238 return 0;
2239 }
2240 \f
2241 /* Given ADDR and SIZE (a memory address, and the size of the memory reference),
2242 set PBASE, PSTART, and PEND which correspond to the base of the address,
2243 the starting offset, and ending offset respectively.
2244
2245 ADDR is known to be a nonvarying address. */
2246
2247 /* ??? Despite what the comments say, this function is in fact frequently
2248 passed varying addresses. This does not appear to cause any problems. */
2249
2250 static void
2251 set_nonvarying_address_components (addr, size, pbase, pstart, pend)
2252 rtx addr;
2253 int size;
2254 rtx *pbase;
2255 HOST_WIDE_INT *pstart, *pend;
2256 {
2257 rtx base;
2258 HOST_WIDE_INT start, end;
2259
2260 base = addr;
2261 start = 0;
2262 end = 0;
2263
2264 /* Registers with nonvarying addresses usually have constant equivalents;
2265 but the frame pointer register is also possible. */
2266 if (GET_CODE (base) == REG
2267 && qty_const != 0
2268 && REGNO_QTY_VALID_P (REGNO (base))
2269 && qty_mode[reg_qty[REGNO (base)]] == GET_MODE (base)
2270 && qty_const[reg_qty[REGNO (base)]] != 0)
2271 base = qty_const[reg_qty[REGNO (base)]];
2272 else if (GET_CODE (base) == PLUS
2273 && GET_CODE (XEXP (base, 1)) == CONST_INT
2274 && GET_CODE (XEXP (base, 0)) == REG
2275 && qty_const != 0
2276 && REGNO_QTY_VALID_P (REGNO (XEXP (base, 0)))
2277 && (qty_mode[reg_qty[REGNO (XEXP (base, 0))]]
2278 == GET_MODE (XEXP (base, 0)))
2279 && qty_const[reg_qty[REGNO (XEXP (base, 0))]])
2280 {
2281 start = INTVAL (XEXP (base, 1));
2282 base = qty_const[reg_qty[REGNO (XEXP (base, 0))]];
2283 }
2284 /* This can happen as the result of virtual register instantiation,
2285 if the initial offset is too large to be a valid address. */
2286 else if (GET_CODE (base) == PLUS
2287 && GET_CODE (XEXP (base, 0)) == REG
2288 && GET_CODE (XEXP (base, 1)) == REG
2289 && qty_const != 0
2290 && REGNO_QTY_VALID_P (REGNO (XEXP (base, 0)))
2291 && (qty_mode[reg_qty[REGNO (XEXP (base, 0))]]
2292 == GET_MODE (XEXP (base, 0)))
2293 && qty_const[reg_qty[REGNO (XEXP (base, 0))]]
2294 && REGNO_QTY_VALID_P (REGNO (XEXP (base, 1)))
2295 && (qty_mode[reg_qty[REGNO (XEXP (base, 1))]]
2296 == GET_MODE (XEXP (base, 1)))
2297 && qty_const[reg_qty[REGNO (XEXP (base, 1))]])
2298 {
2299 rtx tem = qty_const[reg_qty[REGNO (XEXP (base, 1))]];
2300 base = qty_const[reg_qty[REGNO (XEXP (base, 0))]];
2301
2302 /* One of the two values must be a constant. */
2303 if (GET_CODE (base) != CONST_INT)
2304 {
2305 if (GET_CODE (tem) != CONST_INT)
2306 abort ();
2307 start = INTVAL (tem);
2308 }
2309 else
2310 {
2311 start = INTVAL (base);
2312 base = tem;
2313 }
2314 }
2315
2316 /* Handle everything that we can find inside an address that has been
2317 viewed as constant. */
2318
2319 while (1)
2320 {
2321 /* If no part of this switch does a "continue", the code outside
2322 will exit this loop. */
2323
2324 switch (GET_CODE (base))
2325 {
2326 case LO_SUM:
2327 /* By definition, operand1 of a LO_SUM is the associated constant
2328 address. Use the associated constant address as the base
2329 instead. */
2330 base = XEXP (base, 1);
2331 continue;
2332
2333 case CONST:
2334 /* Strip off CONST. */
2335 base = XEXP (base, 0);
2336 continue;
2337
2338 case PLUS:
2339 if (GET_CODE (XEXP (base, 1)) == CONST_INT)
2340 {
2341 start += INTVAL (XEXP (base, 1));
2342 base = XEXP (base, 0);
2343 continue;
2344 }
2345 break;
2346
2347 case AND:
2348 /* Handle the case of an AND which is the negative of a power of
2349 two. This is used to represent unaligned memory operations. */
2350 if (GET_CODE (XEXP (base, 1)) == CONST_INT
2351 && exact_log2 (- INTVAL (XEXP (base, 1))) > 0)
2352 {
2353 set_nonvarying_address_components (XEXP (base, 0), size,
2354 pbase, pstart, pend);
2355
2356 /* Assume the worst misalignment. START is affected, but not
2357 END, so compensate but adjusting SIZE. Don't lose any
2358 constant we already had. */
2359
2360 size = *pend - *pstart - INTVAL (XEXP (base, 1)) - 1;
2361 start += *pstart - INTVAL (XEXP (base, 1)) - 1;
2362 base = *pbase;
2363 }
2364 break;
2365 }
2366
2367 break;
2368 }
2369
2370 if (GET_CODE (base) == CONST_INT)
2371 {
2372 start += INTVAL (base);
2373 base = const0_rtx;
2374 }
2375
2376 end = start + size;
2377
2378 /* Set the return values. */
2379 *pbase = base;
2380 *pstart = start;
2381 *pend = end;
2382 }
2383
2384 /* Return 1 iff any subexpression of X refers to memory
2385 at an address of BASE plus some offset
2386 such that any of the bytes' offsets fall between START (inclusive)
2387 and END (exclusive).
2388
2389 The value is undefined if X is a varying address (as determined by
2390 cse_rtx_addr_varies_p). This function is not used in such cases.
2391
2392 When used in the cse pass, `qty_const' is nonzero, and it is used
2393 to treat an address that is a register with a known constant value
2394 as if it were that constant value.
2395 In the loop pass, `qty_const' is zero, so this is not done. */
2396
2397 static int
2398 refers_to_mem_p (x, base, start, end)
2399 rtx x, base;
2400 HOST_WIDE_INT start, end;
2401 {
2402 register HOST_WIDE_INT i;
2403 register enum rtx_code code;
2404 register char *fmt;
2405
2406 repeat:
2407 if (x == 0)
2408 return 0;
2409
2410 code = GET_CODE (x);
2411 if (code == MEM)
2412 {
2413 register rtx addr = XEXP (x, 0); /* Get the address. */
2414 rtx mybase;
2415 HOST_WIDE_INT mystart, myend;
2416
2417 set_nonvarying_address_components (addr, GET_MODE_SIZE (GET_MODE (x)),
2418 &mybase, &mystart, &myend);
2419
2420
2421 /* refers_to_mem_p is never called with varying addresses.
2422 If the base addresses are not equal, there is no chance
2423 of the memory addresses conflicting. */
2424 if (! rtx_equal_p (mybase, base))
2425 return 0;
2426
2427 return myend > start && mystart < end;
2428 }
2429
2430 /* X does not match, so try its subexpressions. */
2431
2432 fmt = GET_RTX_FORMAT (code);
2433 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2434 if (fmt[i] == 'e')
2435 {
2436 if (i == 0)
2437 {
2438 x = XEXP (x, 0);
2439 goto repeat;
2440 }
2441 else
2442 if (refers_to_mem_p (XEXP (x, i), base, start, end))
2443 return 1;
2444 }
2445 else if (fmt[i] == 'E')
2446 {
2447 int j;
2448 for (j = 0; j < XVECLEN (x, i); j++)
2449 if (refers_to_mem_p (XVECEXP (x, i, j), base, start, end))
2450 return 1;
2451 }
2452
2453 return 0;
2454 }
2455
2456 /* Nonzero if X refers to memory at a varying address;
2457 except that a register which has at the moment a known constant value
2458 isn't considered variable. */
2459
2460 static int
2461 cse_rtx_addr_varies_p (x)
2462 rtx x;
2463 {
2464 /* We need not check for X and the equivalence class being of the same
2465 mode because if X is equivalent to a constant in some mode, it
2466 doesn't vary in any mode. */
2467
2468 if (GET_CODE (x) == MEM
2469 && GET_CODE (XEXP (x, 0)) == REG
2470 && REGNO_QTY_VALID_P (REGNO (XEXP (x, 0)))
2471 && GET_MODE (XEXP (x, 0)) == qty_mode[reg_qty[REGNO (XEXP (x, 0))]]
2472 && qty_const[reg_qty[REGNO (XEXP (x, 0))]] != 0)
2473 return 0;
2474
2475 if (GET_CODE (x) == MEM
2476 && GET_CODE (XEXP (x, 0)) == PLUS
2477 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2478 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
2479 && REGNO_QTY_VALID_P (REGNO (XEXP (XEXP (x, 0), 0)))
2480 && (GET_MODE (XEXP (XEXP (x, 0), 0))
2481 == qty_mode[reg_qty[REGNO (XEXP (XEXP (x, 0), 0))]])
2482 && qty_const[reg_qty[REGNO (XEXP (XEXP (x, 0), 0))]])
2483 return 0;
2484
2485 /* This can happen as the result of virtual register instantiation, if
2486 the initial constant is too large to be a valid address. This gives
2487 us a three instruction sequence, load large offset into a register,
2488 load fp minus a constant into a register, then a MEM which is the
2489 sum of the two `constant' registers. */
2490 if (GET_CODE (x) == MEM
2491 && GET_CODE (XEXP (x, 0)) == PLUS
2492 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
2493 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG
2494 && REGNO_QTY_VALID_P (REGNO (XEXP (XEXP (x, 0), 0)))
2495 && (GET_MODE (XEXP (XEXP (x, 0), 0))
2496 == qty_mode[reg_qty[REGNO (XEXP (XEXP (x, 0), 0))]])
2497 && qty_const[reg_qty[REGNO (XEXP (XEXP (x, 0), 0))]]
2498 && REGNO_QTY_VALID_P (REGNO (XEXP (XEXP (x, 0), 1)))
2499 && (GET_MODE (XEXP (XEXP (x, 0), 1))
2500 == qty_mode[reg_qty[REGNO (XEXP (XEXP (x, 0), 1))]])
2501 && qty_const[reg_qty[REGNO (XEXP (XEXP (x, 0), 1))]])
2502 return 0;
2503
2504 return rtx_addr_varies_p (x);
2505 }
2506 \f
2507 /* Canonicalize an expression:
2508 replace each register reference inside it
2509 with the "oldest" equivalent register.
2510
2511 If INSN is non-zero and we are replacing a pseudo with a hard register
2512 or vice versa, validate_change is used to ensure that INSN remains valid
2513 after we make our substitution. The calls are made with IN_GROUP non-zero
2514 so apply_change_group must be called upon the outermost return from this
2515 function (unless INSN is zero). The result of apply_change_group can
2516 generally be discarded since the changes we are making are optional. */
2517
2518 static rtx
2519 canon_reg (x, insn)
2520 rtx x;
2521 rtx insn;
2522 {
2523 register int i;
2524 register enum rtx_code code;
2525 register char *fmt;
2526
2527 if (x == 0)
2528 return x;
2529
2530 code = GET_CODE (x);
2531 switch (code)
2532 {
2533 case PC:
2534 case CC0:
2535 case CONST:
2536 case CONST_INT:
2537 case CONST_DOUBLE:
2538 case SYMBOL_REF:
2539 case LABEL_REF:
2540 case ADDR_VEC:
2541 case ADDR_DIFF_VEC:
2542 return x;
2543
2544 case REG:
2545 {
2546 register int first;
2547
2548 /* Never replace a hard reg, because hard regs can appear
2549 in more than one machine mode, and we must preserve the mode
2550 of each occurrence. Also, some hard regs appear in
2551 MEMs that are shared and mustn't be altered. Don't try to
2552 replace any reg that maps to a reg of class NO_REGS. */
2553 if (REGNO (x) < FIRST_PSEUDO_REGISTER
2554 || ! REGNO_QTY_VALID_P (REGNO (x)))
2555 return x;
2556
2557 first = qty_first_reg[reg_qty[REGNO (x)]];
2558 return (first >= FIRST_PSEUDO_REGISTER ? regno_reg_rtx[first]
2559 : REGNO_REG_CLASS (first) == NO_REGS ? x
2560 : gen_rtx (REG, qty_mode[reg_qty[REGNO (x)]], first));
2561 }
2562 }
2563
2564 fmt = GET_RTX_FORMAT (code);
2565 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2566 {
2567 register int j;
2568
2569 if (fmt[i] == 'e')
2570 {
2571 rtx new = canon_reg (XEXP (x, i), insn);
2572
2573 /* If replacing pseudo with hard reg or vice versa, ensure the
2574 insn remains valid. Likewise if the insn has MATCH_DUPs. */
2575 if (insn != 0 && new != 0
2576 && GET_CODE (new) == REG && GET_CODE (XEXP (x, i)) == REG
2577 && (((REGNO (new) < FIRST_PSEUDO_REGISTER)
2578 != (REGNO (XEXP (x, i)) < FIRST_PSEUDO_REGISTER))
2579 || insn_n_dups[recog_memoized (insn)] > 0))
2580 validate_change (insn, &XEXP (x, i), new, 1);
2581 else
2582 XEXP (x, i) = new;
2583 }
2584 else if (fmt[i] == 'E')
2585 for (j = 0; j < XVECLEN (x, i); j++)
2586 XVECEXP (x, i, j) = canon_reg (XVECEXP (x, i, j), insn);
2587 }
2588
2589 return x;
2590 }
2591 \f
2592 /* LOC is a location with INSN that is an operand address (the contents of
2593 a MEM). Find the best equivalent address to use that is valid for this
2594 insn.
2595
2596 On most CISC machines, complicated address modes are costly, and rtx_cost
2597 is a good approximation for that cost. However, most RISC machines have
2598 only a few (usually only one) memory reference formats. If an address is
2599 valid at all, it is often just as cheap as any other address. Hence, for
2600 RISC machines, we use the configuration macro `ADDRESS_COST' to compare the
2601 costs of various addresses. For two addresses of equal cost, choose the one
2602 with the highest `rtx_cost' value as that has the potential of eliminating
2603 the most insns. For equal costs, we choose the first in the equivalence
2604 class. Note that we ignore the fact that pseudo registers are cheaper
2605 than hard registers here because we would also prefer the pseudo registers.
2606 */
2607
2608 static void
2609 find_best_addr (insn, loc)
2610 rtx insn;
2611 rtx *loc;
2612 {
2613 struct table_elt *elt, *p;
2614 rtx addr = *loc;
2615 int our_cost;
2616 int found_better = 1;
2617 int save_do_not_record = do_not_record;
2618 int save_hash_arg_in_memory = hash_arg_in_memory;
2619 int save_hash_arg_in_struct = hash_arg_in_struct;
2620 int addr_volatile;
2621 int regno;
2622 unsigned hash;
2623
2624 /* Do not try to replace constant addresses or addresses of local and
2625 argument slots. These MEM expressions are made only once and inserted
2626 in many instructions, as well as being used to control symbol table
2627 output. It is not safe to clobber them.
2628
2629 There are some uncommon cases where the address is already in a register
2630 for some reason, but we cannot take advantage of that because we have
2631 no easy way to unshare the MEM. In addition, looking up all stack
2632 addresses is costly. */
2633 if ((GET_CODE (addr) == PLUS
2634 && GET_CODE (XEXP (addr, 0)) == REG
2635 && GET_CODE (XEXP (addr, 1)) == CONST_INT
2636 && (regno = REGNO (XEXP (addr, 0)),
2637 regno == FRAME_POINTER_REGNUM || regno == HARD_FRAME_POINTER_REGNUM
2638 || regno == ARG_POINTER_REGNUM))
2639 || (GET_CODE (addr) == REG
2640 && (regno = REGNO (addr), regno == FRAME_POINTER_REGNUM
2641 || regno == HARD_FRAME_POINTER_REGNUM
2642 || regno == ARG_POINTER_REGNUM))
2643 || CONSTANT_ADDRESS_P (addr))
2644 return;
2645
2646 /* If this address is not simply a register, try to fold it. This will
2647 sometimes simplify the expression. Many simplifications
2648 will not be valid, but some, usually applying the associative rule, will
2649 be valid and produce better code. */
2650 if (GET_CODE (addr) != REG
2651 && validate_change (insn, loc, fold_rtx (addr, insn), 0))
2652 addr = *loc;
2653
2654 /* If this address is not in the hash table, we can't look for equivalences
2655 of the whole address. Also, ignore if volatile. */
2656
2657 do_not_record = 0;
2658 hash = HASH (addr, Pmode);
2659 addr_volatile = do_not_record;
2660 do_not_record = save_do_not_record;
2661 hash_arg_in_memory = save_hash_arg_in_memory;
2662 hash_arg_in_struct = save_hash_arg_in_struct;
2663
2664 if (addr_volatile)
2665 return;
2666
2667 elt = lookup (addr, hash, Pmode);
2668
2669 #ifndef ADDRESS_COST
2670 if (elt)
2671 {
2672 our_cost = elt->cost;
2673
2674 /* Find the lowest cost below ours that works. */
2675 for (elt = elt->first_same_value; elt; elt = elt->next_same_value)
2676 if (elt->cost < our_cost
2677 && (GET_CODE (elt->exp) == REG
2678 || exp_equiv_p (elt->exp, elt->exp, 1, 0))
2679 && validate_change (insn, loc,
2680 canon_reg (copy_rtx (elt->exp), NULL_RTX), 0))
2681 return;
2682 }
2683 #else
2684
2685 if (elt)
2686 {
2687 /* We need to find the best (under the criteria documented above) entry
2688 in the class that is valid. We use the `flag' field to indicate
2689 choices that were invalid and iterate until we can't find a better
2690 one that hasn't already been tried. */
2691
2692 for (p = elt->first_same_value; p; p = p->next_same_value)
2693 p->flag = 0;
2694
2695 while (found_better)
2696 {
2697 int best_addr_cost = ADDRESS_COST (*loc);
2698 int best_rtx_cost = (elt->cost + 1) >> 1;
2699 struct table_elt *best_elt = elt;
2700
2701 found_better = 0;
2702 for (p = elt->first_same_value; p; p = p->next_same_value)
2703 if (! p->flag
2704 && (GET_CODE (p->exp) == REG
2705 || exp_equiv_p (p->exp, p->exp, 1, 0))
2706 && (ADDRESS_COST (p->exp) < best_addr_cost
2707 || (ADDRESS_COST (p->exp) == best_addr_cost
2708 && (p->cost + 1) >> 1 > best_rtx_cost)))
2709 {
2710 found_better = 1;
2711 best_addr_cost = ADDRESS_COST (p->exp);
2712 best_rtx_cost = (p->cost + 1) >> 1;
2713 best_elt = p;
2714 }
2715
2716 if (found_better)
2717 {
2718 if (validate_change (insn, loc,
2719 canon_reg (copy_rtx (best_elt->exp),
2720 NULL_RTX), 0))
2721 return;
2722 else
2723 best_elt->flag = 1;
2724 }
2725 }
2726 }
2727
2728 /* If the address is a binary operation with the first operand a register
2729 and the second a constant, do the same as above, but looking for
2730 equivalences of the register. Then try to simplify before checking for
2731 the best address to use. This catches a few cases: First is when we
2732 have REG+const and the register is another REG+const. We can often merge
2733 the constants and eliminate one insn and one register. It may also be
2734 that a machine has a cheap REG+REG+const. Finally, this improves the
2735 code on the Alpha for unaligned byte stores. */
2736
2737 if (flag_expensive_optimizations
2738 && (GET_RTX_CLASS (GET_CODE (*loc)) == '2'
2739 || GET_RTX_CLASS (GET_CODE (*loc)) == 'c')
2740 && GET_CODE (XEXP (*loc, 0)) == REG
2741 && GET_CODE (XEXP (*loc, 1)) == CONST_INT)
2742 {
2743 rtx c = XEXP (*loc, 1);
2744
2745 do_not_record = 0;
2746 hash = HASH (XEXP (*loc, 0), Pmode);
2747 do_not_record = save_do_not_record;
2748 hash_arg_in_memory = save_hash_arg_in_memory;
2749 hash_arg_in_struct = save_hash_arg_in_struct;
2750
2751 elt = lookup (XEXP (*loc, 0), hash, Pmode);
2752 if (elt == 0)
2753 return;
2754
2755 /* We need to find the best (under the criteria documented above) entry
2756 in the class that is valid. We use the `flag' field to indicate
2757 choices that were invalid and iterate until we can't find a better
2758 one that hasn't already been tried. */
2759
2760 for (p = elt->first_same_value; p; p = p->next_same_value)
2761 p->flag = 0;
2762
2763 while (found_better)
2764 {
2765 int best_addr_cost = ADDRESS_COST (*loc);
2766 int best_rtx_cost = (COST (*loc) + 1) >> 1;
2767 struct table_elt *best_elt = elt;
2768 rtx best_rtx = *loc;
2769 int count;
2770
2771 /* This is at worst case an O(n^2) algorithm, so limit our search
2772 to the first 32 elements on the list. This avoids trouble
2773 compiling code with very long basic blocks that can easily
2774 call cse_gen_binary so many times that we run out of memory. */
2775
2776 found_better = 0;
2777 for (p = elt->first_same_value, count = 0;
2778 p && count < 32;
2779 p = p->next_same_value, count++)
2780 if (! p->flag
2781 && (GET_CODE (p->exp) == REG
2782 || exp_equiv_p (p->exp, p->exp, 1, 0)))
2783 {
2784 rtx new = cse_gen_binary (GET_CODE (*loc), Pmode, p->exp, c);
2785
2786 if ((ADDRESS_COST (new) < best_addr_cost
2787 || (ADDRESS_COST (new) == best_addr_cost
2788 && (COST (new) + 1) >> 1 > best_rtx_cost)))
2789 {
2790 found_better = 1;
2791 best_addr_cost = ADDRESS_COST (new);
2792 best_rtx_cost = (COST (new) + 1) >> 1;
2793 best_elt = p;
2794 best_rtx = new;
2795 }
2796 }
2797
2798 if (found_better)
2799 {
2800 if (validate_change (insn, loc,
2801 canon_reg (copy_rtx (best_rtx),
2802 NULL_RTX), 0))
2803 return;
2804 else
2805 best_elt->flag = 1;
2806 }
2807 }
2808 }
2809 #endif
2810 }
2811 \f
2812 /* Given an operation (CODE, *PARG1, *PARG2), where code is a comparison
2813 operation (EQ, NE, GT, etc.), follow it back through the hash table and
2814 what values are being compared.
2815
2816 *PARG1 and *PARG2 are updated to contain the rtx representing the values
2817 actually being compared. For example, if *PARG1 was (cc0) and *PARG2
2818 was (const_int 0), *PARG1 and *PARG2 will be set to the objects that were
2819 compared to produce cc0.
2820
2821 The return value is the comparison operator and is either the code of
2822 A or the code corresponding to the inverse of the comparison. */
2823
2824 static enum rtx_code
2825 find_comparison_args (code, parg1, parg2, pmode1, pmode2)
2826 enum rtx_code code;
2827 rtx *parg1, *parg2;
2828 enum machine_mode *pmode1, *pmode2;
2829 {
2830 rtx arg1, arg2;
2831
2832 arg1 = *parg1, arg2 = *parg2;
2833
2834 /* If ARG2 is const0_rtx, see what ARG1 is equivalent to. */
2835
2836 while (arg2 == CONST0_RTX (GET_MODE (arg1)))
2837 {
2838 /* Set non-zero when we find something of interest. */
2839 rtx x = 0;
2840 int reverse_code = 0;
2841 struct table_elt *p = 0;
2842
2843 /* If arg1 is a COMPARE, extract the comparison arguments from it.
2844 On machines with CC0, this is the only case that can occur, since
2845 fold_rtx will return the COMPARE or item being compared with zero
2846 when given CC0. */
2847
2848 if (GET_CODE (arg1) == COMPARE && arg2 == const0_rtx)
2849 x = arg1;
2850
2851 /* If ARG1 is a comparison operator and CODE is testing for
2852 STORE_FLAG_VALUE, get the inner arguments. */
2853
2854 else if (GET_RTX_CLASS (GET_CODE (arg1)) == '<')
2855 {
2856 if (code == NE
2857 || (GET_MODE_CLASS (GET_MODE (arg1)) == MODE_INT
2858 && code == LT && STORE_FLAG_VALUE == -1)
2859 #ifdef FLOAT_STORE_FLAG_VALUE
2860 || (GET_MODE_CLASS (GET_MODE (arg1)) == MODE_FLOAT
2861 && FLOAT_STORE_FLAG_VALUE < 0)
2862 #endif
2863 )
2864 x = arg1;
2865 else if (code == EQ
2866 || (GET_MODE_CLASS (GET_MODE (arg1)) == MODE_INT
2867 && code == GE && STORE_FLAG_VALUE == -1)
2868 #ifdef FLOAT_STORE_FLAG_VALUE
2869 || (GET_MODE_CLASS (GET_MODE (arg1)) == MODE_FLOAT
2870 && FLOAT_STORE_FLAG_VALUE < 0)
2871 #endif
2872 )
2873 x = arg1, reverse_code = 1;
2874 }
2875
2876 /* ??? We could also check for
2877
2878 (ne (and (eq (...) (const_int 1))) (const_int 0))
2879
2880 and related forms, but let's wait until we see them occurring. */
2881
2882 if (x == 0)
2883 /* Look up ARG1 in the hash table and see if it has an equivalence
2884 that lets us see what is being compared. */
2885 p = lookup (arg1, safe_hash (arg1, GET_MODE (arg1)) % NBUCKETS,
2886 GET_MODE (arg1));
2887 if (p) p = p->first_same_value;
2888
2889 for (; p; p = p->next_same_value)
2890 {
2891 enum machine_mode inner_mode = GET_MODE (p->exp);
2892
2893 /* If the entry isn't valid, skip it. */
2894 if (! exp_equiv_p (p->exp, p->exp, 1, 0))
2895 continue;
2896
2897 if (GET_CODE (p->exp) == COMPARE
2898 /* Another possibility is that this machine has a compare insn
2899 that includes the comparison code. In that case, ARG1 would
2900 be equivalent to a comparison operation that would set ARG1 to
2901 either STORE_FLAG_VALUE or zero. If this is an NE operation,
2902 ORIG_CODE is the actual comparison being done; if it is an EQ,
2903 we must reverse ORIG_CODE. On machine with a negative value
2904 for STORE_FLAG_VALUE, also look at LT and GE operations. */
2905 || ((code == NE
2906 || (code == LT
2907 && GET_MODE_CLASS (inner_mode) == MODE_INT
2908 && (GET_MODE_BITSIZE (inner_mode)
2909 <= HOST_BITS_PER_WIDE_INT)
2910 && (STORE_FLAG_VALUE
2911 & ((HOST_WIDE_INT) 1
2912 << (GET_MODE_BITSIZE (inner_mode) - 1))))
2913 #ifdef FLOAT_STORE_FLAG_VALUE
2914 || (code == LT
2915 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
2916 && FLOAT_STORE_FLAG_VALUE < 0)
2917 #endif
2918 )
2919 && GET_RTX_CLASS (GET_CODE (p->exp)) == '<'))
2920 {
2921 x = p->exp;
2922 break;
2923 }
2924 else if ((code == EQ
2925 || (code == GE
2926 && GET_MODE_CLASS (inner_mode) == MODE_INT
2927 && (GET_MODE_BITSIZE (inner_mode)
2928 <= HOST_BITS_PER_WIDE_INT)
2929 && (STORE_FLAG_VALUE
2930 & ((HOST_WIDE_INT) 1
2931 << (GET_MODE_BITSIZE (inner_mode) - 1))))
2932 #ifdef FLOAT_STORE_FLAG_VALUE
2933 || (code == GE
2934 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
2935 && FLOAT_STORE_FLAG_VALUE < 0)
2936 #endif
2937 )
2938 && GET_RTX_CLASS (GET_CODE (p->exp)) == '<')
2939 {
2940 reverse_code = 1;
2941 x = p->exp;
2942 break;
2943 }
2944
2945 /* If this is fp + constant, the equivalent is a better operand since
2946 it may let us predict the value of the comparison. */
2947 else if (NONZERO_BASE_PLUS_P (p->exp))
2948 {
2949 arg1 = p->exp;
2950 continue;
2951 }
2952 }
2953
2954 /* If we didn't find a useful equivalence for ARG1, we are done.
2955 Otherwise, set up for the next iteration. */
2956 if (x == 0)
2957 break;
2958
2959 arg1 = XEXP (x, 0), arg2 = XEXP (x, 1);
2960 if (GET_RTX_CLASS (GET_CODE (x)) == '<')
2961 code = GET_CODE (x);
2962
2963 if (reverse_code)
2964 code = reverse_condition (code);
2965 }
2966
2967 /* Return our results. Return the modes from before fold_rtx
2968 because fold_rtx might produce const_int, and then it's too late. */
2969 *pmode1 = GET_MODE (arg1), *pmode2 = GET_MODE (arg2);
2970 *parg1 = fold_rtx (arg1, 0), *parg2 = fold_rtx (arg2, 0);
2971
2972 return code;
2973 }
2974 \f
2975 /* Try to simplify a unary operation CODE whose output mode is to be
2976 MODE with input operand OP whose mode was originally OP_MODE.
2977 Return zero if no simplification can be made. */
2978
2979 rtx
2980 simplify_unary_operation (code, mode, op, op_mode)
2981 enum rtx_code code;
2982 enum machine_mode mode;
2983 rtx op;
2984 enum machine_mode op_mode;
2985 {
2986 register int width = GET_MODE_BITSIZE (mode);
2987
2988 /* The order of these tests is critical so that, for example, we don't
2989 check the wrong mode (input vs. output) for a conversion operation,
2990 such as FIX. At some point, this should be simplified. */
2991
2992 #if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC)
2993
2994 if (code == FLOAT && GET_MODE (op) == VOIDmode
2995 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
2996 {
2997 HOST_WIDE_INT hv, lv;
2998 REAL_VALUE_TYPE d;
2999
3000 if (GET_CODE (op) == CONST_INT)
3001 lv = INTVAL (op), hv = INTVAL (op) < 0 ? -1 : 0;
3002 else
3003 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
3004
3005 #ifdef REAL_ARITHMETIC
3006 REAL_VALUE_FROM_INT (d, lv, hv);
3007 #else
3008 if (hv < 0)
3009 {
3010 d = (double) (~ hv);
3011 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
3012 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
3013 d += (double) (unsigned HOST_WIDE_INT) (~ lv);
3014 d = (- d - 1.0);
3015 }
3016 else
3017 {
3018 d = (double) hv;
3019 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
3020 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
3021 d += (double) (unsigned HOST_WIDE_INT) lv;
3022 }
3023 #endif /* REAL_ARITHMETIC */
3024 d = real_value_truncate (mode, d);
3025 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3026 }
3027 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
3028 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
3029 {
3030 HOST_WIDE_INT hv, lv;
3031 REAL_VALUE_TYPE d;
3032
3033 if (GET_CODE (op) == CONST_INT)
3034 lv = INTVAL (op), hv = INTVAL (op) < 0 ? -1 : 0;
3035 else
3036 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
3037
3038 if (op_mode == VOIDmode)
3039 {
3040 /* We don't know how to interpret negative-looking numbers in
3041 this case, so don't try to fold those. */
3042 if (hv < 0)
3043 return 0;
3044 }
3045 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
3046 ;
3047 else
3048 hv = 0, lv &= GET_MODE_MASK (op_mode);
3049
3050 #ifdef REAL_ARITHMETIC
3051 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv);
3052 #else
3053
3054 d = (double) (unsigned HOST_WIDE_INT) hv;
3055 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
3056 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
3057 d += (double) (unsigned HOST_WIDE_INT) lv;
3058 #endif /* REAL_ARITHMETIC */
3059 d = real_value_truncate (mode, d);
3060 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3061 }
3062 #endif
3063
3064 if (GET_CODE (op) == CONST_INT
3065 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
3066 {
3067 register HOST_WIDE_INT arg0 = INTVAL (op);
3068 register HOST_WIDE_INT val;
3069
3070 switch (code)
3071 {
3072 case NOT:
3073 val = ~ arg0;
3074 break;
3075
3076 case NEG:
3077 val = - arg0;
3078 break;
3079
3080 case ABS:
3081 val = (arg0 >= 0 ? arg0 : - arg0);
3082 break;
3083
3084 case FFS:
3085 /* Don't use ffs here. Instead, get low order bit and then its
3086 number. If arg0 is zero, this will return 0, as desired. */
3087 arg0 &= GET_MODE_MASK (mode);
3088 val = exact_log2 (arg0 & (- arg0)) + 1;
3089 break;
3090
3091 case TRUNCATE:
3092 val = arg0;
3093 break;
3094
3095 case ZERO_EXTEND:
3096 if (op_mode == VOIDmode)
3097 op_mode = mode;
3098 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
3099 {
3100 /* If we were really extending the mode,
3101 we would have to distinguish between zero-extension
3102 and sign-extension. */
3103 if (width != GET_MODE_BITSIZE (op_mode))
3104 abort ();
3105 val = arg0;
3106 }
3107 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
3108 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
3109 else
3110 return 0;
3111 break;
3112
3113 case SIGN_EXTEND:
3114 if (op_mode == VOIDmode)
3115 op_mode = mode;
3116 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
3117 {
3118 /* If we were really extending the mode,
3119 we would have to distinguish between zero-extension
3120 and sign-extension. */
3121 if (width != GET_MODE_BITSIZE (op_mode))
3122 abort ();
3123 val = arg0;
3124 }
3125 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
3126 {
3127 val
3128 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
3129 if (val
3130 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
3131 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
3132 }
3133 else
3134 return 0;
3135 break;
3136
3137 case SQRT:
3138 return 0;
3139
3140 default:
3141 abort ();
3142 }
3143
3144 /* Clear the bits that don't belong in our mode,
3145 unless they and our sign bit are all one.
3146 So we get either a reasonable negative value or a reasonable
3147 unsigned value for this mode. */
3148 if (width < HOST_BITS_PER_WIDE_INT
3149 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
3150 != ((HOST_WIDE_INT) (-1) << (width - 1))))
3151 val &= ((HOST_WIDE_INT) 1 << width) - 1;
3152
3153 return GEN_INT (val);
3154 }
3155
3156 /* We can do some operations on integer CONST_DOUBLEs. Also allow
3157 for a DImode operation on a CONST_INT. */
3158 else if (GET_MODE (op) == VOIDmode && width <= HOST_BITS_PER_INT * 2
3159 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
3160 {
3161 HOST_WIDE_INT l1, h1, lv, hv;
3162
3163 if (GET_CODE (op) == CONST_DOUBLE)
3164 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
3165 else
3166 l1 = INTVAL (op), h1 = l1 < 0 ? -1 : 0;
3167
3168 switch (code)
3169 {
3170 case NOT:
3171 lv = ~ l1;
3172 hv = ~ h1;
3173 break;
3174
3175 case NEG:
3176 neg_double (l1, h1, &lv, &hv);
3177 break;
3178
3179 case ABS:
3180 if (h1 < 0)
3181 neg_double (l1, h1, &lv, &hv);
3182 else
3183 lv = l1, hv = h1;
3184 break;
3185
3186 case FFS:
3187 hv = 0;
3188 if (l1 == 0)
3189 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & (-h1)) + 1;
3190 else
3191 lv = exact_log2 (l1 & (-l1)) + 1;
3192 break;
3193
3194 case TRUNCATE:
3195 /* This is just a change-of-mode, so do nothing. */
3196 lv = l1, hv = h1;
3197 break;
3198
3199 case ZERO_EXTEND:
3200 if (op_mode == VOIDmode
3201 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
3202 return 0;
3203
3204 hv = 0;
3205 lv = l1 & GET_MODE_MASK (op_mode);
3206 break;
3207
3208 case SIGN_EXTEND:
3209 if (op_mode == VOIDmode
3210 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
3211 return 0;
3212 else
3213 {
3214 lv = l1 & GET_MODE_MASK (op_mode);
3215 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
3216 && (lv & ((HOST_WIDE_INT) 1
3217 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
3218 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
3219
3220 hv = (lv < 0) ? ~ (HOST_WIDE_INT) 0 : 0;
3221 }
3222 break;
3223
3224 case SQRT:
3225 return 0;
3226
3227 default:
3228 return 0;
3229 }
3230
3231 return immed_double_const (lv, hv, mode);
3232 }
3233
3234 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3235 else if (GET_CODE (op) == CONST_DOUBLE
3236 && GET_MODE_CLASS (mode) == MODE_FLOAT)
3237 {
3238 REAL_VALUE_TYPE d;
3239 jmp_buf handler;
3240 rtx x;
3241
3242 if (setjmp (handler))
3243 /* There used to be a warning here, but that is inadvisable.
3244 People may want to cause traps, and the natural way
3245 to do it should not get a warning. */
3246 return 0;
3247
3248 set_float_handler (handler);
3249
3250 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
3251
3252 switch (code)
3253 {
3254 case NEG:
3255 d = REAL_VALUE_NEGATE (d);
3256 break;
3257
3258 case ABS:
3259 if (REAL_VALUE_NEGATIVE (d))
3260 d = REAL_VALUE_NEGATE (d);
3261 break;
3262
3263 case FLOAT_TRUNCATE:
3264 d = real_value_truncate (mode, d);
3265 break;
3266
3267 case FLOAT_EXTEND:
3268 /* All this does is change the mode. */
3269 break;
3270
3271 case FIX:
3272 d = REAL_VALUE_RNDZINT (d);
3273 break;
3274
3275 case UNSIGNED_FIX:
3276 d = REAL_VALUE_UNSIGNED_RNDZINT (d);
3277 break;
3278
3279 case SQRT:
3280 return 0;
3281
3282 default:
3283 abort ();
3284 }
3285
3286 x = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3287 set_float_handler (NULL_PTR);
3288 return x;
3289 }
3290
3291 else if (GET_CODE (op) == CONST_DOUBLE
3292 && GET_MODE_CLASS (GET_MODE (op)) == MODE_FLOAT
3293 && GET_MODE_CLASS (mode) == MODE_INT
3294 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
3295 {
3296 REAL_VALUE_TYPE d;
3297 jmp_buf handler;
3298 HOST_WIDE_INT val;
3299
3300 if (setjmp (handler))
3301 return 0;
3302
3303 set_float_handler (handler);
3304
3305 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
3306
3307 switch (code)
3308 {
3309 case FIX:
3310 val = REAL_VALUE_FIX (d);
3311 break;
3312
3313 case UNSIGNED_FIX:
3314 val = REAL_VALUE_UNSIGNED_FIX (d);
3315 break;
3316
3317 default:
3318 abort ();
3319 }
3320
3321 set_float_handler (NULL_PTR);
3322
3323 /* Clear the bits that don't belong in our mode,
3324 unless they and our sign bit are all one.
3325 So we get either a reasonable negative value or a reasonable
3326 unsigned value for this mode. */
3327 if (width < HOST_BITS_PER_WIDE_INT
3328 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
3329 != ((HOST_WIDE_INT) (-1) << (width - 1))))
3330 val &= ((HOST_WIDE_INT) 1 << width) - 1;
3331
3332 /* If this would be an entire word for the target, but is not for
3333 the host, then sign-extend on the host so that the number will look
3334 the same way on the host that it would on the target.
3335
3336 For example, when building a 64 bit alpha hosted 32 bit sparc
3337 targeted compiler, then we want the 32 bit unsigned value -1 to be
3338 represented as a 64 bit value -1, and not as 0x00000000ffffffff.
3339 The later confuses the sparc backend. */
3340
3341 if (BITS_PER_WORD < HOST_BITS_PER_WIDE_INT && BITS_PER_WORD == width
3342 && (val & ((HOST_WIDE_INT) 1 << (width - 1))))
3343 val |= ((HOST_WIDE_INT) (-1) << width);
3344
3345 return GEN_INT (val);
3346 }
3347 #endif
3348 /* This was formerly used only for non-IEEE float.
3349 eggert@twinsun.com says it is safe for IEEE also. */
3350 else
3351 {
3352 /* There are some simplifications we can do even if the operands
3353 aren't constant. */
3354 switch (code)
3355 {
3356 case NEG:
3357 case NOT:
3358 /* (not (not X)) == X, similarly for NEG. */
3359 if (GET_CODE (op) == code)
3360 return XEXP (op, 0);
3361 break;
3362
3363 case SIGN_EXTEND:
3364 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
3365 becomes just the MINUS if its mode is MODE. This allows
3366 folding switch statements on machines using casesi (such as
3367 the Vax). */
3368 if (GET_CODE (op) == TRUNCATE
3369 && GET_MODE (XEXP (op, 0)) == mode
3370 && GET_CODE (XEXP (op, 0)) == MINUS
3371 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
3372 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
3373 return XEXP (op, 0);
3374
3375 #ifdef POINTERS_EXTEND_UNSIGNED
3376 if (! POINTERS_EXTEND_UNSIGNED
3377 && mode == Pmode && GET_MODE (op) == ptr_mode
3378 && CONSTANT_P (op))
3379 return convert_memory_address (Pmode, op);
3380 #endif
3381 break;
3382
3383 #ifdef POINTERS_EXTEND_UNSIGNED
3384 case ZERO_EXTEND:
3385 if (POINTERS_EXTEND_UNSIGNED
3386 && mode == Pmode && GET_MODE (op) == ptr_mode
3387 && CONSTANT_P (op))
3388 return convert_memory_address (Pmode, op);
3389 break;
3390 #endif
3391 }
3392
3393 return 0;
3394 }
3395 }
3396 \f
3397 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
3398 and OP1. Return 0 if no simplification is possible.
3399
3400 Don't use this for relational operations such as EQ or LT.
3401 Use simplify_relational_operation instead. */
3402
3403 rtx
3404 simplify_binary_operation (code, mode, op0, op1)
3405 enum rtx_code code;
3406 enum machine_mode mode;
3407 rtx op0, op1;
3408 {
3409 register HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3410 HOST_WIDE_INT val;
3411 int width = GET_MODE_BITSIZE (mode);
3412 rtx tem;
3413
3414 /* Relational operations don't work here. We must know the mode
3415 of the operands in order to do the comparison correctly.
3416 Assuming a full word can give incorrect results.
3417 Consider comparing 128 with -128 in QImode. */
3418
3419 if (GET_RTX_CLASS (code) == '<')
3420 abort ();
3421
3422 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3423 if (GET_MODE_CLASS (mode) == MODE_FLOAT
3424 && GET_CODE (op0) == CONST_DOUBLE && GET_CODE (op1) == CONST_DOUBLE
3425 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3426 {
3427 REAL_VALUE_TYPE f0, f1, value;
3428 jmp_buf handler;
3429
3430 if (setjmp (handler))
3431 return 0;
3432
3433 set_float_handler (handler);
3434
3435 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3436 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3437 f0 = real_value_truncate (mode, f0);
3438 f1 = real_value_truncate (mode, f1);
3439
3440 #ifdef REAL_ARITHMETIC
3441 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
3442 #else
3443 switch (code)
3444 {
3445 case PLUS:
3446 value = f0 + f1;
3447 break;
3448 case MINUS:
3449 value = f0 - f1;
3450 break;
3451 case MULT:
3452 value = f0 * f1;
3453 break;
3454 case DIV:
3455 #ifndef REAL_INFINITY
3456 if (f1 == 0)
3457 return 0;
3458 #endif
3459 value = f0 / f1;
3460 break;
3461 case SMIN:
3462 value = MIN (f0, f1);
3463 break;
3464 case SMAX:
3465 value = MAX (f0, f1);
3466 break;
3467 default:
3468 abort ();
3469 }
3470 #endif
3471
3472 value = real_value_truncate (mode, value);
3473 set_float_handler (NULL_PTR);
3474 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
3475 }
3476 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3477
3478 /* We can fold some multi-word operations. */
3479 if (GET_MODE_CLASS (mode) == MODE_INT
3480 && width == HOST_BITS_PER_WIDE_INT * 2
3481 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
3482 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
3483 {
3484 HOST_WIDE_INT l1, l2, h1, h2, lv, hv;
3485
3486 if (GET_CODE (op0) == CONST_DOUBLE)
3487 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
3488 else
3489 l1 = INTVAL (op0), h1 = l1 < 0 ? -1 : 0;
3490
3491 if (GET_CODE (op1) == CONST_DOUBLE)
3492 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
3493 else
3494 l2 = INTVAL (op1), h2 = l2 < 0 ? -1 : 0;
3495
3496 switch (code)
3497 {
3498 case MINUS:
3499 /* A - B == A + (-B). */
3500 neg_double (l2, h2, &lv, &hv);
3501 l2 = lv, h2 = hv;
3502
3503 /* .. fall through ... */
3504
3505 case PLUS:
3506 add_double (l1, h1, l2, h2, &lv, &hv);
3507 break;
3508
3509 case MULT:
3510 mul_double (l1, h1, l2, h2, &lv, &hv);
3511 break;
3512
3513 case DIV: case MOD: case UDIV: case UMOD:
3514 /* We'd need to include tree.h to do this and it doesn't seem worth
3515 it. */
3516 return 0;
3517
3518 case AND:
3519 lv = l1 & l2, hv = h1 & h2;
3520 break;
3521
3522 case IOR:
3523 lv = l1 | l2, hv = h1 | h2;
3524 break;
3525
3526 case XOR:
3527 lv = l1 ^ l2, hv = h1 ^ h2;
3528 break;
3529
3530 case SMIN:
3531 if (h1 < h2
3532 || (h1 == h2
3533 && ((unsigned HOST_WIDE_INT) l1
3534 < (unsigned HOST_WIDE_INT) l2)))
3535 lv = l1, hv = h1;
3536 else
3537 lv = l2, hv = h2;
3538 break;
3539
3540 case SMAX:
3541 if (h1 > h2
3542 || (h1 == h2
3543 && ((unsigned HOST_WIDE_INT) l1
3544 > (unsigned HOST_WIDE_INT) l2)))
3545 lv = l1, hv = h1;
3546 else
3547 lv = l2, hv = h2;
3548 break;
3549
3550 case UMIN:
3551 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
3552 || (h1 == h2
3553 && ((unsigned HOST_WIDE_INT) l1
3554 < (unsigned HOST_WIDE_INT) l2)))
3555 lv = l1, hv = h1;
3556 else
3557 lv = l2, hv = h2;
3558 break;
3559
3560 case UMAX:
3561 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
3562 || (h1 == h2
3563 && ((unsigned HOST_WIDE_INT) l1
3564 > (unsigned HOST_WIDE_INT) l2)))
3565 lv = l1, hv = h1;
3566 else
3567 lv = l2, hv = h2;
3568 break;
3569
3570 case LSHIFTRT: case ASHIFTRT:
3571 case ASHIFT:
3572 case ROTATE: case ROTATERT:
3573 #ifdef SHIFT_COUNT_TRUNCATED
3574 if (SHIFT_COUNT_TRUNCATED)
3575 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
3576 #endif
3577
3578 if (h2 != 0 || l2 < 0 || l2 >= GET_MODE_BITSIZE (mode))
3579 return 0;
3580
3581 if (code == LSHIFTRT || code == ASHIFTRT)
3582 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
3583 code == ASHIFTRT);
3584 else if (code == ASHIFT)
3585 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
3586 else if (code == ROTATE)
3587 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3588 else /* code == ROTATERT */
3589 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3590 break;
3591
3592 default:
3593 return 0;
3594 }
3595
3596 return immed_double_const (lv, hv, mode);
3597 }
3598
3599 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
3600 || width > HOST_BITS_PER_WIDE_INT || width == 0)
3601 {
3602 /* Even if we can't compute a constant result,
3603 there are some cases worth simplifying. */
3604
3605 switch (code)
3606 {
3607 case PLUS:
3608 /* In IEEE floating point, x+0 is not the same as x. Similarly
3609 for the other optimizations below. */
3610 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
3611 && FLOAT_MODE_P (mode) && ! flag_fast_math)
3612 break;
3613
3614 if (op1 == CONST0_RTX (mode))
3615 return op0;
3616
3617 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)) */
3618 if (GET_CODE (op0) == NEG)
3619 return cse_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
3620 else if (GET_CODE (op1) == NEG)
3621 return cse_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
3622
3623 /* Handle both-operands-constant cases. We can only add
3624 CONST_INTs to constants since the sum of relocatable symbols
3625 can't be handled by most assemblers. Don't add CONST_INT
3626 to CONST_INT since overflow won't be computed properly if wider
3627 than HOST_BITS_PER_WIDE_INT. */
3628
3629 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
3630 && GET_CODE (op1) == CONST_INT)
3631 return plus_constant (op0, INTVAL (op1));
3632 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
3633 && GET_CODE (op0) == CONST_INT)
3634 return plus_constant (op1, INTVAL (op0));
3635
3636 /* See if this is something like X * C - X or vice versa or
3637 if the multiplication is written as a shift. If so, we can
3638 distribute and make a new multiply, shift, or maybe just
3639 have X (if C is 2 in the example above). But don't make
3640 real multiply if we didn't have one before. */
3641
3642 if (! FLOAT_MODE_P (mode))
3643 {
3644 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
3645 rtx lhs = op0, rhs = op1;
3646 int had_mult = 0;
3647
3648 if (GET_CODE (lhs) == NEG)
3649 coeff0 = -1, lhs = XEXP (lhs, 0);
3650 else if (GET_CODE (lhs) == MULT
3651 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
3652 {
3653 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
3654 had_mult = 1;
3655 }
3656 else if (GET_CODE (lhs) == ASHIFT
3657 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
3658 && INTVAL (XEXP (lhs, 1)) >= 0
3659 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
3660 {
3661 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
3662 lhs = XEXP (lhs, 0);
3663 }
3664
3665 if (GET_CODE (rhs) == NEG)
3666 coeff1 = -1, rhs = XEXP (rhs, 0);
3667 else if (GET_CODE (rhs) == MULT
3668 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
3669 {
3670 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
3671 had_mult = 1;
3672 }
3673 else if (GET_CODE (rhs) == ASHIFT
3674 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
3675 && INTVAL (XEXP (rhs, 1)) >= 0
3676 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
3677 {
3678 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
3679 rhs = XEXP (rhs, 0);
3680 }
3681
3682 if (rtx_equal_p (lhs, rhs))
3683 {
3684 tem = cse_gen_binary (MULT, mode, lhs,
3685 GEN_INT (coeff0 + coeff1));
3686 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
3687 }
3688 }
3689
3690 /* If one of the operands is a PLUS or a MINUS, see if we can
3691 simplify this by the associative law.
3692 Don't use the associative law for floating point.
3693 The inaccuracy makes it nonassociative,
3694 and subtle programs can break if operations are associated. */
3695
3696 if (INTEGRAL_MODE_P (mode)
3697 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
3698 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS)
3699 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
3700 return tem;
3701 break;
3702
3703 case COMPARE:
3704 #ifdef HAVE_cc0
3705 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
3706 using cc0, in which case we want to leave it as a COMPARE
3707 so we can distinguish it from a register-register-copy.
3708
3709 In IEEE floating point, x-0 is not the same as x. */
3710
3711 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3712 || ! FLOAT_MODE_P (mode) || flag_fast_math)
3713 && op1 == CONST0_RTX (mode))
3714 return op0;
3715 #else
3716 /* Do nothing here. */
3717 #endif
3718 break;
3719
3720 case MINUS:
3721 /* None of these optimizations can be done for IEEE
3722 floating point. */
3723 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
3724 && FLOAT_MODE_P (mode) && ! flag_fast_math)
3725 break;
3726
3727 /* We can't assume x-x is 0 even with non-IEEE floating point,
3728 but since it is zero except in very strange circumstances, we
3729 will treat it as zero with -ffast-math. */
3730 if (rtx_equal_p (op0, op1)
3731 && ! side_effects_p (op0)
3732 && (! FLOAT_MODE_P (mode) || flag_fast_math))
3733 return CONST0_RTX (mode);
3734
3735 /* Change subtraction from zero into negation. */
3736 if (op0 == CONST0_RTX (mode))
3737 return gen_rtx (NEG, mode, op1);
3738
3739 /* (-1 - a) is ~a. */
3740 if (op0 == constm1_rtx)
3741 return gen_rtx (NOT, mode, op1);
3742
3743 /* Subtracting 0 has no effect. */
3744 if (op1 == CONST0_RTX (mode))
3745 return op0;
3746
3747 /* See if this is something like X * C - X or vice versa or
3748 if the multiplication is written as a shift. If so, we can
3749 distribute and make a new multiply, shift, or maybe just
3750 have X (if C is 2 in the example above). But don't make
3751 real multiply if we didn't have one before. */
3752
3753 if (! FLOAT_MODE_P (mode))
3754 {
3755 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
3756 rtx lhs = op0, rhs = op1;
3757 int had_mult = 0;
3758
3759 if (GET_CODE (lhs) == NEG)
3760 coeff0 = -1, lhs = XEXP (lhs, 0);
3761 else if (GET_CODE (lhs) == MULT
3762 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
3763 {
3764 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
3765 had_mult = 1;
3766 }
3767 else if (GET_CODE (lhs) == ASHIFT
3768 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
3769 && INTVAL (XEXP (lhs, 1)) >= 0
3770 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
3771 {
3772 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
3773 lhs = XEXP (lhs, 0);
3774 }
3775
3776 if (GET_CODE (rhs) == NEG)
3777 coeff1 = - 1, rhs = XEXP (rhs, 0);
3778 else if (GET_CODE (rhs) == MULT
3779 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
3780 {
3781 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
3782 had_mult = 1;
3783 }
3784 else if (GET_CODE (rhs) == ASHIFT
3785 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
3786 && INTVAL (XEXP (rhs, 1)) >= 0
3787 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
3788 {
3789 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
3790 rhs = XEXP (rhs, 0);
3791 }
3792
3793 if (rtx_equal_p (lhs, rhs))
3794 {
3795 tem = cse_gen_binary (MULT, mode, lhs,
3796 GEN_INT (coeff0 - coeff1));
3797 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
3798 }
3799 }
3800
3801 /* (a - (-b)) -> (a + b). */
3802 if (GET_CODE (op1) == NEG)
3803 return cse_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
3804
3805 /* If one of the operands is a PLUS or a MINUS, see if we can
3806 simplify this by the associative law.
3807 Don't use the associative law for floating point.
3808 The inaccuracy makes it nonassociative,
3809 and subtle programs can break if operations are associated. */
3810
3811 if (INTEGRAL_MODE_P (mode)
3812 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
3813 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS)
3814 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
3815 return tem;
3816
3817 /* Don't let a relocatable value get a negative coeff. */
3818 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
3819 return plus_constant (op0, - INTVAL (op1));
3820
3821 /* (x - (x & y)) -> (x & ~y) */
3822 if (GET_CODE (op1) == AND)
3823 {
3824 if (rtx_equal_p (op0, XEXP (op1, 0)))
3825 return cse_gen_binary (AND, mode, op0, gen_rtx (NOT, mode, XEXP (op1, 1)));
3826 if (rtx_equal_p (op0, XEXP (op1, 1)))
3827 return cse_gen_binary (AND, mode, op0, gen_rtx (NOT, mode, XEXP (op1, 0)));
3828 }
3829 break;
3830
3831 case MULT:
3832 if (op1 == constm1_rtx)
3833 {
3834 tem = simplify_unary_operation (NEG, mode, op0, mode);
3835
3836 return tem ? tem : gen_rtx (NEG, mode, op0);
3837 }
3838
3839 /* In IEEE floating point, x*0 is not always 0. */
3840 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3841 || ! FLOAT_MODE_P (mode) || flag_fast_math)
3842 && op1 == CONST0_RTX (mode)
3843 && ! side_effects_p (op0))
3844 return op1;
3845
3846 /* In IEEE floating point, x*1 is not equivalent to x for nans.
3847 However, ANSI says we can drop signals,
3848 so we can do this anyway. */
3849 if (op1 == CONST1_RTX (mode))
3850 return op0;
3851
3852 /* Convert multiply by constant power of two into shift unless
3853 we are still generating RTL. This test is a kludge. */
3854 if (GET_CODE (op1) == CONST_INT
3855 && (val = exact_log2 (INTVAL (op1))) >= 0
3856 && ! rtx_equal_function_value_matters)
3857 return gen_rtx (ASHIFT, mode, op0, GEN_INT (val));
3858
3859 if (GET_CODE (op1) == CONST_DOUBLE
3860 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_FLOAT)
3861 {
3862 REAL_VALUE_TYPE d;
3863 jmp_buf handler;
3864 int op1is2, op1ism1;
3865
3866 if (setjmp (handler))
3867 return 0;
3868
3869 set_float_handler (handler);
3870 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
3871 op1is2 = REAL_VALUES_EQUAL (d, dconst2);
3872 op1ism1 = REAL_VALUES_EQUAL (d, dconstm1);
3873 set_float_handler (NULL_PTR);
3874
3875 /* x*2 is x+x and x*(-1) is -x */
3876 if (op1is2 && GET_MODE (op0) == mode)
3877 return gen_rtx (PLUS, mode, op0, copy_rtx (op0));
3878
3879 else if (op1ism1 && GET_MODE (op0) == mode)
3880 return gen_rtx (NEG, mode, op0);
3881 }
3882 break;
3883
3884 case IOR:
3885 if (op1 == const0_rtx)
3886 return op0;
3887 if (GET_CODE (op1) == CONST_INT
3888 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
3889 return op1;
3890 if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
3891 return op0;
3892 /* A | (~A) -> -1 */
3893 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3894 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3895 && ! side_effects_p (op0)
3896 && GET_MODE_CLASS (mode) != MODE_CC)
3897 return constm1_rtx;
3898 break;
3899
3900 case XOR:
3901 if (op1 == const0_rtx)
3902 return op0;
3903 if (GET_CODE (op1) == CONST_INT
3904 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
3905 return gen_rtx (NOT, mode, op0);
3906 if (op0 == op1 && ! side_effects_p (op0)
3907 && GET_MODE_CLASS (mode) != MODE_CC)
3908 return const0_rtx;
3909 break;
3910
3911 case AND:
3912 if (op1 == const0_rtx && ! side_effects_p (op0))
3913 return const0_rtx;
3914 if (GET_CODE (op1) == CONST_INT
3915 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
3916 return op0;
3917 if (op0 == op1 && ! side_effects_p (op0)
3918 && GET_MODE_CLASS (mode) != MODE_CC)
3919 return op0;
3920 /* A & (~A) -> 0 */
3921 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3922 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3923 && ! side_effects_p (op0)
3924 && GET_MODE_CLASS (mode) != MODE_CC)
3925 return const0_rtx;
3926 break;
3927
3928 case UDIV:
3929 /* Convert divide by power of two into shift (divide by 1 handled
3930 below). */
3931 if (GET_CODE (op1) == CONST_INT
3932 && (arg1 = exact_log2 (INTVAL (op1))) > 0)
3933 return gen_rtx (LSHIFTRT, mode, op0, GEN_INT (arg1));
3934
3935 /* ... fall through ... */
3936
3937 case DIV:
3938 if (op1 == CONST1_RTX (mode))
3939 return op0;
3940
3941 /* In IEEE floating point, 0/x is not always 0. */
3942 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3943 || ! FLOAT_MODE_P (mode) || flag_fast_math)
3944 && op0 == CONST0_RTX (mode)
3945 && ! side_effects_p (op1))
3946 return op0;
3947
3948 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3949 /* Change division by a constant into multiplication. Only do
3950 this with -ffast-math until an expert says it is safe in
3951 general. */
3952 else if (GET_CODE (op1) == CONST_DOUBLE
3953 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_FLOAT
3954 && op1 != CONST0_RTX (mode)
3955 && flag_fast_math)
3956 {
3957 REAL_VALUE_TYPE d;
3958 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
3959
3960 if (! REAL_VALUES_EQUAL (d, dconst0))
3961 {
3962 #if defined (REAL_ARITHMETIC)
3963 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
3964 return gen_rtx (MULT, mode, op0,
3965 CONST_DOUBLE_FROM_REAL_VALUE (d, mode));
3966 #else
3967 return gen_rtx (MULT, mode, op0,
3968 CONST_DOUBLE_FROM_REAL_VALUE (1./d, mode));
3969 #endif
3970 }
3971 }
3972 #endif
3973 break;
3974
3975 case UMOD:
3976 /* Handle modulus by power of two (mod with 1 handled below). */
3977 if (GET_CODE (op1) == CONST_INT
3978 && exact_log2 (INTVAL (op1)) > 0)
3979 return gen_rtx (AND, mode, op0, GEN_INT (INTVAL (op1) - 1));
3980
3981 /* ... fall through ... */
3982
3983 case MOD:
3984 if ((op0 == const0_rtx || op1 == const1_rtx)
3985 && ! side_effects_p (op0) && ! side_effects_p (op1))
3986 return const0_rtx;
3987 break;
3988
3989 case ROTATERT:
3990 case ROTATE:
3991 /* Rotating ~0 always results in ~0. */
3992 if (GET_CODE (op0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
3993 && INTVAL (op0) == GET_MODE_MASK (mode)
3994 && ! side_effects_p (op1))
3995 return op0;
3996
3997 /* ... fall through ... */
3998
3999 case ASHIFT:
4000 case ASHIFTRT:
4001 case LSHIFTRT:
4002 if (op1 == const0_rtx)
4003 return op0;
4004 if (op0 == const0_rtx && ! side_effects_p (op1))
4005 return op0;
4006 break;
4007
4008 case SMIN:
4009 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (op1) == CONST_INT
4010 && INTVAL (op1) == (HOST_WIDE_INT) 1 << (width -1)
4011 && ! side_effects_p (op0))
4012 return op1;
4013 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
4014 return op0;
4015 break;
4016
4017 case SMAX:
4018 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (op1) == CONST_INT
4019 && (INTVAL (op1)
4020 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
4021 && ! side_effects_p (op0))
4022 return op1;
4023 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
4024 return op0;
4025 break;
4026
4027 case UMIN:
4028 if (op1 == const0_rtx && ! side_effects_p (op0))
4029 return op1;
4030 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
4031 return op0;
4032 break;
4033
4034 case UMAX:
4035 if (op1 == constm1_rtx && ! side_effects_p (op0))
4036 return op1;
4037 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
4038 return op0;
4039 break;
4040
4041 default:
4042 abort ();
4043 }
4044
4045 return 0;
4046 }
4047
4048 /* Get the integer argument values in two forms:
4049 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
4050
4051 arg0 = INTVAL (op0);
4052 arg1 = INTVAL (op1);
4053
4054 if (width < HOST_BITS_PER_WIDE_INT)
4055 {
4056 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
4057 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
4058
4059 arg0s = arg0;
4060 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
4061 arg0s |= ((HOST_WIDE_INT) (-1) << width);
4062
4063 arg1s = arg1;
4064 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
4065 arg1s |= ((HOST_WIDE_INT) (-1) << width);
4066 }
4067 else
4068 {
4069 arg0s = arg0;
4070 arg1s = arg1;
4071 }
4072
4073 /* Compute the value of the arithmetic. */
4074
4075 switch (code)
4076 {
4077 case PLUS:
4078 val = arg0s + arg1s;
4079 break;
4080
4081 case MINUS:
4082 val = arg0s - arg1s;
4083 break;
4084
4085 case MULT:
4086 val = arg0s * arg1s;
4087 break;
4088
4089 case DIV:
4090 if (arg1s == 0)
4091 return 0;
4092 val = arg0s / arg1s;
4093 break;
4094
4095 case MOD:
4096 if (arg1s == 0)
4097 return 0;
4098 val = arg0s % arg1s;
4099 break;
4100
4101 case UDIV:
4102 if (arg1 == 0)
4103 return 0;
4104 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
4105 break;
4106
4107 case UMOD:
4108 if (arg1 == 0)
4109 return 0;
4110 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
4111 break;
4112
4113 case AND:
4114 val = arg0 & arg1;
4115 break;
4116
4117 case IOR:
4118 val = arg0 | arg1;
4119 break;
4120
4121 case XOR:
4122 val = arg0 ^ arg1;
4123 break;
4124
4125 case LSHIFTRT:
4126 /* If shift count is undefined, don't fold it; let the machine do
4127 what it wants. But truncate it if the machine will do that. */
4128 if (arg1 < 0)
4129 return 0;
4130
4131 #ifdef SHIFT_COUNT_TRUNCATED
4132 if (SHIFT_COUNT_TRUNCATED)
4133 arg1 %= width;
4134 #endif
4135
4136 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
4137 break;
4138
4139 case ASHIFT:
4140 if (arg1 < 0)
4141 return 0;
4142
4143 #ifdef SHIFT_COUNT_TRUNCATED
4144 if (SHIFT_COUNT_TRUNCATED)
4145 arg1 %= width;
4146 #endif
4147
4148 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
4149 break;
4150
4151 case ASHIFTRT:
4152 if (arg1 < 0)
4153 return 0;
4154
4155 #ifdef SHIFT_COUNT_TRUNCATED
4156 if (SHIFT_COUNT_TRUNCATED)
4157 arg1 %= width;
4158 #endif
4159
4160 val = arg0s >> arg1;
4161
4162 /* Bootstrap compiler may not have sign extended the right shift.
4163 Manually extend the sign to insure bootstrap cc matches gcc. */
4164 if (arg0s < 0 && arg1 > 0)
4165 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
4166
4167 break;
4168
4169 case ROTATERT:
4170 if (arg1 < 0)
4171 return 0;
4172
4173 arg1 %= width;
4174 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
4175 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
4176 break;
4177
4178 case ROTATE:
4179 if (arg1 < 0)
4180 return 0;
4181
4182 arg1 %= width;
4183 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
4184 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
4185 break;
4186
4187 case COMPARE:
4188 /* Do nothing here. */
4189 return 0;
4190
4191 case SMIN:
4192 val = arg0s <= arg1s ? arg0s : arg1s;
4193 break;
4194
4195 case UMIN:
4196 val = ((unsigned HOST_WIDE_INT) arg0
4197 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4198 break;
4199
4200 case SMAX:
4201 val = arg0s > arg1s ? arg0s : arg1s;
4202 break;
4203
4204 case UMAX:
4205 val = ((unsigned HOST_WIDE_INT) arg0
4206 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4207 break;
4208
4209 default:
4210 abort ();
4211 }
4212
4213 /* Clear the bits that don't belong in our mode, unless they and our sign
4214 bit are all one. So we get either a reasonable negative value or a
4215 reasonable unsigned value for this mode. */
4216 if (width < HOST_BITS_PER_WIDE_INT
4217 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4218 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4219 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4220
4221 /* If this would be an entire word for the target, but is not for
4222 the host, then sign-extend on the host so that the number will look
4223 the same way on the host that it would on the target.
4224
4225 For example, when building a 64 bit alpha hosted 32 bit sparc
4226 targeted compiler, then we want the 32 bit unsigned value -1 to be
4227 represented as a 64 bit value -1, and not as 0x00000000ffffffff.
4228 The later confuses the sparc backend. */
4229
4230 if (BITS_PER_WORD < HOST_BITS_PER_WIDE_INT && BITS_PER_WORD == width
4231 && (val & ((HOST_WIDE_INT) 1 << (width - 1))))
4232 val |= ((HOST_WIDE_INT) (-1) << width);
4233
4234 return GEN_INT (val);
4235 }
4236 \f
4237 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
4238 PLUS or MINUS.
4239
4240 Rather than test for specific case, we do this by a brute-force method
4241 and do all possible simplifications until no more changes occur. Then
4242 we rebuild the operation. */
4243
4244 static rtx
4245 simplify_plus_minus (code, mode, op0, op1)
4246 enum rtx_code code;
4247 enum machine_mode mode;
4248 rtx op0, op1;
4249 {
4250 rtx ops[8];
4251 int negs[8];
4252 rtx result, tem;
4253 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts = 0;
4254 int first = 1, negate = 0, changed;
4255 int i, j;
4256
4257 bzero ((char *) ops, sizeof ops);
4258
4259 /* Set up the two operands and then expand them until nothing has been
4260 changed. If we run out of room in our array, give up; this should
4261 almost never happen. */
4262
4263 ops[0] = op0, ops[1] = op1, negs[0] = 0, negs[1] = (code == MINUS);
4264
4265 changed = 1;
4266 while (changed)
4267 {
4268 changed = 0;
4269
4270 for (i = 0; i < n_ops; i++)
4271 switch (GET_CODE (ops[i]))
4272 {
4273 case PLUS:
4274 case MINUS:
4275 if (n_ops == 7)
4276 return 0;
4277
4278 ops[n_ops] = XEXP (ops[i], 1);
4279 negs[n_ops++] = GET_CODE (ops[i]) == MINUS ? !negs[i] : negs[i];
4280 ops[i] = XEXP (ops[i], 0);
4281 input_ops++;
4282 changed = 1;
4283 break;
4284
4285 case NEG:
4286 ops[i] = XEXP (ops[i], 0);
4287 negs[i] = ! negs[i];
4288 changed = 1;
4289 break;
4290
4291 case CONST:
4292 ops[i] = XEXP (ops[i], 0);
4293 input_consts++;
4294 changed = 1;
4295 break;
4296
4297 case NOT:
4298 /* ~a -> (-a - 1) */
4299 if (n_ops != 7)
4300 {
4301 ops[n_ops] = constm1_rtx;
4302 negs[n_ops++] = negs[i];
4303 ops[i] = XEXP (ops[i], 0);
4304 negs[i] = ! negs[i];
4305 changed = 1;
4306 }
4307 break;
4308
4309 case CONST_INT:
4310 if (negs[i])
4311 ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0, changed = 1;
4312 break;
4313 }
4314 }
4315
4316 /* If we only have two operands, we can't do anything. */
4317 if (n_ops <= 2)
4318 return 0;
4319
4320 /* Now simplify each pair of operands until nothing changes. The first
4321 time through just simplify constants against each other. */
4322
4323 changed = 1;
4324 while (changed)
4325 {
4326 changed = first;
4327
4328 for (i = 0; i < n_ops - 1; i++)
4329 for (j = i + 1; j < n_ops; j++)
4330 if (ops[i] != 0 && ops[j] != 0
4331 && (! first || (CONSTANT_P (ops[i]) && CONSTANT_P (ops[j]))))
4332 {
4333 rtx lhs = ops[i], rhs = ops[j];
4334 enum rtx_code ncode = PLUS;
4335
4336 if (negs[i] && ! negs[j])
4337 lhs = ops[j], rhs = ops[i], ncode = MINUS;
4338 else if (! negs[i] && negs[j])
4339 ncode = MINUS;
4340
4341 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4342 if (tem)
4343 {
4344 ops[i] = tem, ops[j] = 0;
4345 negs[i] = negs[i] && negs[j];
4346 if (GET_CODE (tem) == NEG)
4347 ops[i] = XEXP (tem, 0), negs[i] = ! negs[i];
4348
4349 if (GET_CODE (ops[i]) == CONST_INT && negs[i])
4350 ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0;
4351 changed = 1;
4352 }
4353 }
4354
4355 first = 0;
4356 }
4357
4358 /* Pack all the operands to the lower-numbered entries and give up if
4359 we didn't reduce the number of operands we had. Make sure we
4360 count a CONST as two operands. If we have the same number of
4361 operands, but have made more CONSTs than we had, this is also
4362 an improvement, so accept it. */
4363
4364 for (i = 0, j = 0; j < n_ops; j++)
4365 if (ops[j] != 0)
4366 {
4367 ops[i] = ops[j], negs[i++] = negs[j];
4368 if (GET_CODE (ops[j]) == CONST)
4369 n_consts++;
4370 }
4371
4372 if (i + n_consts > input_ops
4373 || (i + n_consts == input_ops && n_consts <= input_consts))
4374 return 0;
4375
4376 n_ops = i;
4377
4378 /* If we have a CONST_INT, put it last. */
4379 for (i = 0; i < n_ops - 1; i++)
4380 if (GET_CODE (ops[i]) == CONST_INT)
4381 {
4382 tem = ops[n_ops - 1], ops[n_ops - 1] = ops[i] , ops[i] = tem;
4383 j = negs[n_ops - 1], negs[n_ops - 1] = negs[i], negs[i] = j;
4384 }
4385
4386 /* Put a non-negated operand first. If there aren't any, make all
4387 operands positive and negate the whole thing later. */
4388 for (i = 0; i < n_ops && negs[i]; i++)
4389 ;
4390
4391 if (i == n_ops)
4392 {
4393 for (i = 0; i < n_ops; i++)
4394 negs[i] = 0;
4395 negate = 1;
4396 }
4397 else if (i != 0)
4398 {
4399 tem = ops[0], ops[0] = ops[i], ops[i] = tem;
4400 j = negs[0], negs[0] = negs[i], negs[i] = j;
4401 }
4402
4403 /* Now make the result by performing the requested operations. */
4404 result = ops[0];
4405 for (i = 1; i < n_ops; i++)
4406 result = cse_gen_binary (negs[i] ? MINUS : PLUS, mode, result, ops[i]);
4407
4408 return negate ? gen_rtx (NEG, mode, result) : result;
4409 }
4410 \f
4411 /* Make a binary operation by properly ordering the operands and
4412 seeing if the expression folds. */
4413
4414 static rtx
4415 cse_gen_binary (code, mode, op0, op1)
4416 enum rtx_code code;
4417 enum machine_mode mode;
4418 rtx op0, op1;
4419 {
4420 rtx tem;
4421
4422 /* Put complex operands first and constants second if commutative. */
4423 if (GET_RTX_CLASS (code) == 'c'
4424 && ((CONSTANT_P (op0) && GET_CODE (op1) != CONST_INT)
4425 || (GET_RTX_CLASS (GET_CODE (op0)) == 'o'
4426 && GET_RTX_CLASS (GET_CODE (op1)) != 'o')
4427 || (GET_CODE (op0) == SUBREG
4428 && GET_RTX_CLASS (GET_CODE (SUBREG_REG (op0))) == 'o'
4429 && GET_RTX_CLASS (GET_CODE (op1)) != 'o')))
4430 tem = op0, op0 = op1, op1 = tem;
4431
4432 /* If this simplifies, do it. */
4433 tem = simplify_binary_operation (code, mode, op0, op1);
4434
4435 if (tem)
4436 return tem;
4437
4438 /* Handle addition and subtraction of CONST_INT specially. Otherwise,
4439 just form the operation. */
4440
4441 if (code == PLUS && GET_CODE (op1) == CONST_INT
4442 && GET_MODE (op0) != VOIDmode)
4443 return plus_constant (op0, INTVAL (op1));
4444 else if (code == MINUS && GET_CODE (op1) == CONST_INT
4445 && GET_MODE (op0) != VOIDmode)
4446 return plus_constant (op0, - INTVAL (op1));
4447 else
4448 return gen_rtx (code, mode, op0, op1);
4449 }
4450 \f
4451 /* Like simplify_binary_operation except used for relational operators.
4452 MODE is the mode of the operands, not that of the result. If MODE
4453 is VOIDmode, both operands must also be VOIDmode and we compare the
4454 operands in "infinite precision".
4455
4456 If no simplification is possible, this function returns zero. Otherwise,
4457 it returns either const_true_rtx or const0_rtx. */
4458
4459 rtx
4460 simplify_relational_operation (code, mode, op0, op1)
4461 enum rtx_code code;
4462 enum machine_mode mode;
4463 rtx op0, op1;
4464 {
4465 int equal, op0lt, op0ltu, op1lt, op1ltu;
4466 rtx tem;
4467
4468 /* If op0 is a compare, extract the comparison arguments from it. */
4469 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4470 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
4471
4472 /* We can't simplify MODE_CC values since we don't know what the
4473 actual comparison is. */
4474 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC
4475 #ifdef HAVE_cc0
4476 || op0 == cc0_rtx
4477 #endif
4478 )
4479 return 0;
4480
4481 /* For integer comparisons of A and B maybe we can simplify A - B and can
4482 then simplify a comparison of that with zero. If A and B are both either
4483 a register or a CONST_INT, this can't help; testing for these cases will
4484 prevent infinite recursion here and speed things up.
4485
4486 If CODE is an unsigned comparison, then we can never do this optimization,
4487 because it gives an incorrect result if the subtraction wraps around zero.
4488 ANSI C defines unsigned operations such that they never overflow, and
4489 thus such cases can not be ignored. */
4490
4491 if (INTEGRAL_MODE_P (mode) && op1 != const0_rtx
4492 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == CONST_INT)
4493 && (GET_CODE (op1) == REG || GET_CODE (op1) == CONST_INT))
4494 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4495 && code != GTU && code != GEU && code != LTU && code != LEU)
4496 return simplify_relational_operation (signed_condition (code),
4497 mode, tem, const0_rtx);
4498
4499 /* For non-IEEE floating-point, if the two operands are equal, we know the
4500 result. */
4501 if (rtx_equal_p (op0, op1)
4502 && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
4503 || ! FLOAT_MODE_P (GET_MODE (op0)) || flag_fast_math))
4504 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
4505
4506 /* If the operands are floating-point constants, see if we can fold
4507 the result. */
4508 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
4509 else if (GET_CODE (op0) == CONST_DOUBLE && GET_CODE (op1) == CONST_DOUBLE
4510 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
4511 {
4512 REAL_VALUE_TYPE d0, d1;
4513 jmp_buf handler;
4514
4515 if (setjmp (handler))
4516 return 0;
4517
4518 set_float_handler (handler);
4519 REAL_VALUE_FROM_CONST_DOUBLE (d0, op0);
4520 REAL_VALUE_FROM_CONST_DOUBLE (d1, op1);
4521 equal = REAL_VALUES_EQUAL (d0, d1);
4522 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
4523 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
4524 set_float_handler (NULL_PTR);
4525 }
4526 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
4527
4528 /* Otherwise, see if the operands are both integers. */
4529 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4530 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
4531 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
4532 {
4533 int width = GET_MODE_BITSIZE (mode);
4534 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4535 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4536
4537 /* Get the two words comprising each integer constant. */
4538 if (GET_CODE (op0) == CONST_DOUBLE)
4539 {
4540 l0u = l0s = CONST_DOUBLE_LOW (op0);
4541 h0u = h0s = CONST_DOUBLE_HIGH (op0);
4542 }
4543 else
4544 {
4545 l0u = l0s = INTVAL (op0);
4546 h0u = 0, h0s = l0s < 0 ? -1 : 0;
4547 }
4548
4549 if (GET_CODE (op1) == CONST_DOUBLE)
4550 {
4551 l1u = l1s = CONST_DOUBLE_LOW (op1);
4552 h1u = h1s = CONST_DOUBLE_HIGH (op1);
4553 }
4554 else
4555 {
4556 l1u = l1s = INTVAL (op1);
4557 h1u = 0, h1s = l1s < 0 ? -1 : 0;
4558 }
4559
4560 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4561 we have to sign or zero-extend the values. */
4562 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4563 h0u = h1u = 0, h0s = l0s < 0 ? -1 : 0, h1s = l1s < 0 ? -1 : 0;
4564
4565 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4566 {
4567 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
4568 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
4569
4570 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
4571 l0s |= ((HOST_WIDE_INT) (-1) << width);
4572
4573 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
4574 l1s |= ((HOST_WIDE_INT) (-1) << width);
4575 }
4576
4577 equal = (h0u == h1u && l0u == l1u);
4578 op0lt = (h0s < h1s || (h0s == h1s && l0s < l1s));
4579 op1lt = (h1s < h0s || (h1s == h0s && l1s < l0s));
4580 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
4581 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
4582 }
4583
4584 /* Otherwise, there are some code-specific tests we can make. */
4585 else
4586 {
4587 switch (code)
4588 {
4589 case EQ:
4590 /* References to the frame plus a constant or labels cannot
4591 be zero, but a SYMBOL_REF can due to #pragma weak. */
4592 if (((NONZERO_BASE_PLUS_P (op0) && op1 == const0_rtx)
4593 || GET_CODE (op0) == LABEL_REF)
4594 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4595 /* On some machines, the ap reg can be 0 sometimes. */
4596 && op0 != arg_pointer_rtx
4597 #endif
4598 )
4599 return const0_rtx;
4600 break;
4601
4602 case NE:
4603 if (((NONZERO_BASE_PLUS_P (op0) && op1 == const0_rtx)
4604 || GET_CODE (op0) == LABEL_REF)
4605 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4606 && op0 != arg_pointer_rtx
4607 #endif
4608 )
4609 return const_true_rtx;
4610 break;
4611
4612 case GEU:
4613 /* Unsigned values are never negative. */
4614 if (op1 == const0_rtx)
4615 return const_true_rtx;
4616 break;
4617
4618 case LTU:
4619 if (op1 == const0_rtx)
4620 return const0_rtx;
4621 break;
4622
4623 case LEU:
4624 /* Unsigned values are never greater than the largest
4625 unsigned value. */
4626 if (GET_CODE (op1) == CONST_INT
4627 && INTVAL (op1) == GET_MODE_MASK (mode)
4628 && INTEGRAL_MODE_P (mode))
4629 return const_true_rtx;
4630 break;
4631
4632 case GTU:
4633 if (GET_CODE (op1) == CONST_INT
4634 && INTVAL (op1) == GET_MODE_MASK (mode)
4635 && INTEGRAL_MODE_P (mode))
4636 return const0_rtx;
4637 break;
4638 }
4639
4640 return 0;
4641 }
4642
4643 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
4644 as appropriate. */
4645 switch (code)
4646 {
4647 case EQ:
4648 return equal ? const_true_rtx : const0_rtx;
4649 case NE:
4650 return ! equal ? const_true_rtx : const0_rtx;
4651 case LT:
4652 return op0lt ? const_true_rtx : const0_rtx;
4653 case GT:
4654 return op1lt ? const_true_rtx : const0_rtx;
4655 case LTU:
4656 return op0ltu ? const_true_rtx : const0_rtx;
4657 case GTU:
4658 return op1ltu ? const_true_rtx : const0_rtx;
4659 case LE:
4660 return equal || op0lt ? const_true_rtx : const0_rtx;
4661 case GE:
4662 return equal || op1lt ? const_true_rtx : const0_rtx;
4663 case LEU:
4664 return equal || op0ltu ? const_true_rtx : const0_rtx;
4665 case GEU:
4666 return equal || op1ltu ? const_true_rtx : const0_rtx;
4667 }
4668
4669 abort ();
4670 }
4671 \f
4672 /* Simplify CODE, an operation with result mode MODE and three operands,
4673 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4674 a constant. Return 0 if no simplifications is possible. */
4675
4676 rtx
4677 simplify_ternary_operation (code, mode, op0_mode, op0, op1, op2)
4678 enum rtx_code code;
4679 enum machine_mode mode, op0_mode;
4680 rtx op0, op1, op2;
4681 {
4682 int width = GET_MODE_BITSIZE (mode);
4683
4684 /* VOIDmode means "infinite" precision. */
4685 if (width == 0)
4686 width = HOST_BITS_PER_WIDE_INT;
4687
4688 switch (code)
4689 {
4690 case SIGN_EXTRACT:
4691 case ZERO_EXTRACT:
4692 if (GET_CODE (op0) == CONST_INT
4693 && GET_CODE (op1) == CONST_INT
4694 && GET_CODE (op2) == CONST_INT
4695 && INTVAL (op1) + INTVAL (op2) <= GET_MODE_BITSIZE (op0_mode)
4696 && width <= HOST_BITS_PER_WIDE_INT)
4697 {
4698 /* Extracting a bit-field from a constant */
4699 HOST_WIDE_INT val = INTVAL (op0);
4700
4701 if (BITS_BIG_ENDIAN)
4702 val >>= (GET_MODE_BITSIZE (op0_mode)
4703 - INTVAL (op2) - INTVAL (op1));
4704 else
4705 val >>= INTVAL (op2);
4706
4707 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4708 {
4709 /* First zero-extend. */
4710 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4711 /* If desired, propagate sign bit. */
4712 if (code == SIGN_EXTRACT
4713 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4714 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4715 }
4716
4717 /* Clear the bits that don't belong in our mode,
4718 unless they and our sign bit are all one.
4719 So we get either a reasonable negative value or a reasonable
4720 unsigned value for this mode. */
4721 if (width < HOST_BITS_PER_WIDE_INT
4722 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4723 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4724 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4725
4726 return GEN_INT (val);
4727 }
4728 break;
4729
4730 case IF_THEN_ELSE:
4731 if (GET_CODE (op0) == CONST_INT)
4732 return op0 != const0_rtx ? op1 : op2;
4733 break;
4734
4735 default:
4736 abort ();
4737 }
4738
4739 return 0;
4740 }
4741 \f
4742 /* If X is a nontrivial arithmetic operation on an argument
4743 for which a constant value can be determined, return
4744 the result of operating on that value, as a constant.
4745 Otherwise, return X, possibly with one or more operands
4746 modified by recursive calls to this function.
4747
4748 If X is a register whose contents are known, we do NOT
4749 return those contents here. equiv_constant is called to
4750 perform that task.
4751
4752 INSN is the insn that we may be modifying. If it is 0, make a copy
4753 of X before modifying it. */
4754
4755 static rtx
4756 fold_rtx (x, insn)
4757 rtx x;
4758 rtx insn;
4759 {
4760 register enum rtx_code code;
4761 register enum machine_mode mode;
4762 register char *fmt;
4763 register int i;
4764 rtx new = 0;
4765 int copied = 0;
4766 int must_swap = 0;
4767
4768 /* Folded equivalents of first two operands of X. */
4769 rtx folded_arg0;
4770 rtx folded_arg1;
4771
4772 /* Constant equivalents of first three operands of X;
4773 0 when no such equivalent is known. */
4774 rtx const_arg0;
4775 rtx const_arg1;
4776 rtx const_arg2;
4777
4778 /* The mode of the first operand of X. We need this for sign and zero
4779 extends. */
4780 enum machine_mode mode_arg0;
4781
4782 if (x == 0)
4783 return x;
4784
4785 mode = GET_MODE (x);
4786 code = GET_CODE (x);
4787 switch (code)
4788 {
4789 case CONST:
4790 case CONST_INT:
4791 case CONST_DOUBLE:
4792 case SYMBOL_REF:
4793 case LABEL_REF:
4794 case REG:
4795 /* No use simplifying an EXPR_LIST
4796 since they are used only for lists of args
4797 in a function call's REG_EQUAL note. */
4798 case EXPR_LIST:
4799 return x;
4800
4801 #ifdef HAVE_cc0
4802 case CC0:
4803 return prev_insn_cc0;
4804 #endif
4805
4806 case PC:
4807 /* If the next insn is a CODE_LABEL followed by a jump table,
4808 PC's value is a LABEL_REF pointing to that label. That
4809 lets us fold switch statements on the Vax. */
4810 if (insn && GET_CODE (insn) == JUMP_INSN)
4811 {
4812 rtx next = next_nonnote_insn (insn);
4813
4814 if (next && GET_CODE (next) == CODE_LABEL
4815 && NEXT_INSN (next) != 0
4816 && GET_CODE (NEXT_INSN (next)) == JUMP_INSN
4817 && (GET_CODE (PATTERN (NEXT_INSN (next))) == ADDR_VEC
4818 || GET_CODE (PATTERN (NEXT_INSN (next))) == ADDR_DIFF_VEC))
4819 return gen_rtx (LABEL_REF, Pmode, next);
4820 }
4821 break;
4822
4823 case SUBREG:
4824 /* See if we previously assigned a constant value to this SUBREG. */
4825 if ((new = lookup_as_function (x, CONST_INT)) != 0
4826 || (new = lookup_as_function (x, CONST_DOUBLE)) != 0)
4827 return new;
4828
4829 /* If this is a paradoxical SUBREG, we have no idea what value the
4830 extra bits would have. However, if the operand is equivalent
4831 to a SUBREG whose operand is the same as our mode, and all the
4832 modes are within a word, we can just use the inner operand
4833 because these SUBREGs just say how to treat the register.
4834
4835 Similarly if we find an integer constant. */
4836
4837 if (GET_MODE_SIZE (mode) > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
4838 {
4839 enum machine_mode imode = GET_MODE (SUBREG_REG (x));
4840 struct table_elt *elt;
4841
4842 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
4843 && GET_MODE_SIZE (imode) <= UNITS_PER_WORD
4844 && (elt = lookup (SUBREG_REG (x), HASH (SUBREG_REG (x), imode),
4845 imode)) != 0)
4846 for (elt = elt->first_same_value;
4847 elt; elt = elt->next_same_value)
4848 {
4849 if (CONSTANT_P (elt->exp)
4850 && GET_MODE (elt->exp) == VOIDmode)
4851 return elt->exp;
4852
4853 if (GET_CODE (elt->exp) == SUBREG
4854 && GET_MODE (SUBREG_REG (elt->exp)) == mode
4855 && exp_equiv_p (elt->exp, elt->exp, 1, 0))
4856 return copy_rtx (SUBREG_REG (elt->exp));
4857 }
4858
4859 return x;
4860 }
4861
4862 /* Fold SUBREG_REG. If it changed, see if we can simplify the SUBREG.
4863 We might be able to if the SUBREG is extracting a single word in an
4864 integral mode or extracting the low part. */
4865
4866 folded_arg0 = fold_rtx (SUBREG_REG (x), insn);
4867 const_arg0 = equiv_constant (folded_arg0);
4868 if (const_arg0)
4869 folded_arg0 = const_arg0;
4870
4871 if (folded_arg0 != SUBREG_REG (x))
4872 {
4873 new = 0;
4874
4875 if (GET_MODE_CLASS (mode) == MODE_INT
4876 && GET_MODE_SIZE (mode) == UNITS_PER_WORD
4877 && GET_MODE (SUBREG_REG (x)) != VOIDmode)
4878 new = operand_subword (folded_arg0, SUBREG_WORD (x), 0,
4879 GET_MODE (SUBREG_REG (x)));
4880 if (new == 0 && subreg_lowpart_p (x))
4881 new = gen_lowpart_if_possible (mode, folded_arg0);
4882 if (new)
4883 return new;
4884 }
4885
4886 /* If this is a narrowing SUBREG and our operand is a REG, see if
4887 we can find an equivalence for REG that is an arithmetic operation
4888 in a wider mode where both operands are paradoxical SUBREGs
4889 from objects of our result mode. In that case, we couldn't report
4890 an equivalent value for that operation, since we don't know what the
4891 extra bits will be. But we can find an equivalence for this SUBREG
4892 by folding that operation is the narrow mode. This allows us to
4893 fold arithmetic in narrow modes when the machine only supports
4894 word-sized arithmetic.
4895
4896 Also look for a case where we have a SUBREG whose operand is the
4897 same as our result. If both modes are smaller than a word, we
4898 are simply interpreting a register in different modes and we
4899 can use the inner value. */
4900
4901 if (GET_CODE (folded_arg0) == REG
4902 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (folded_arg0))
4903 && subreg_lowpart_p (x))
4904 {
4905 struct table_elt *elt;
4906
4907 /* We can use HASH here since we know that canon_hash won't be
4908 called. */
4909 elt = lookup (folded_arg0,
4910 HASH (folded_arg0, GET_MODE (folded_arg0)),
4911 GET_MODE (folded_arg0));
4912
4913 if (elt)
4914 elt = elt->first_same_value;
4915
4916 for (; elt; elt = elt->next_same_value)
4917 {
4918 enum rtx_code eltcode = GET_CODE (elt->exp);
4919
4920 /* Just check for unary and binary operations. */
4921 if (GET_RTX_CLASS (GET_CODE (elt->exp)) == '1'
4922 && GET_CODE (elt->exp) != SIGN_EXTEND
4923 && GET_CODE (elt->exp) != ZERO_EXTEND
4924 && GET_CODE (XEXP (elt->exp, 0)) == SUBREG
4925 && GET_MODE (SUBREG_REG (XEXP (elt->exp, 0))) == mode)
4926 {
4927 rtx op0 = SUBREG_REG (XEXP (elt->exp, 0));
4928
4929 if (GET_CODE (op0) != REG && ! CONSTANT_P (op0))
4930 op0 = fold_rtx (op0, NULL_RTX);
4931
4932 op0 = equiv_constant (op0);
4933 if (op0)
4934 new = simplify_unary_operation (GET_CODE (elt->exp), mode,
4935 op0, mode);
4936 }
4937 else if ((GET_RTX_CLASS (GET_CODE (elt->exp)) == '2'
4938 || GET_RTX_CLASS (GET_CODE (elt->exp)) == 'c')
4939 && eltcode != DIV && eltcode != MOD
4940 && eltcode != UDIV && eltcode != UMOD
4941 && eltcode != ASHIFTRT && eltcode != LSHIFTRT
4942 && eltcode != ROTATE && eltcode != ROTATERT
4943 && ((GET_CODE (XEXP (elt->exp, 0)) == SUBREG
4944 && (GET_MODE (SUBREG_REG (XEXP (elt->exp, 0)))
4945 == mode))
4946 || CONSTANT_P (XEXP (elt->exp, 0)))
4947 && ((GET_CODE (XEXP (elt->exp, 1)) == SUBREG
4948 && (GET_MODE (SUBREG_REG (XEXP (elt->exp, 1)))
4949 == mode))
4950 || CONSTANT_P (XEXP (elt->exp, 1))))
4951 {
4952 rtx op0 = gen_lowpart_common (mode, XEXP (elt->exp, 0));
4953 rtx op1 = gen_lowpart_common (mode, XEXP (elt->exp, 1));
4954
4955 if (op0 && GET_CODE (op0) != REG && ! CONSTANT_P (op0))
4956 op0 = fold_rtx (op0, NULL_RTX);
4957
4958 if (op0)
4959 op0 = equiv_constant (op0);
4960
4961 if (op1 && GET_CODE (op1) != REG && ! CONSTANT_P (op1))
4962 op1 = fold_rtx (op1, NULL_RTX);
4963
4964 if (op1)
4965 op1 = equiv_constant (op1);
4966
4967 /* If we are looking for the low SImode part of
4968 (ashift:DI c (const_int 32)), it doesn't work
4969 to compute that in SImode, because a 32-bit shift
4970 in SImode is unpredictable. We know the value is 0. */
4971 if (op0 && op1
4972 && GET_CODE (elt->exp) == ASHIFT
4973 && GET_CODE (op1) == CONST_INT
4974 && INTVAL (op1) >= GET_MODE_BITSIZE (mode))
4975 {
4976 if (INTVAL (op1) < GET_MODE_BITSIZE (GET_MODE (elt->exp)))
4977
4978 /* If the count fits in the inner mode's width,
4979 but exceeds the outer mode's width,
4980 the value will get truncated to 0
4981 by the subreg. */
4982 new = const0_rtx;
4983 else
4984 /* If the count exceeds even the inner mode's width,
4985 don't fold this expression. */
4986 new = 0;
4987 }
4988 else if (op0 && op1)
4989 new = simplify_binary_operation (GET_CODE (elt->exp), mode,
4990 op0, op1);
4991 }
4992
4993 else if (GET_CODE (elt->exp) == SUBREG
4994 && GET_MODE (SUBREG_REG (elt->exp)) == mode
4995 && (GET_MODE_SIZE (GET_MODE (folded_arg0))
4996 <= UNITS_PER_WORD)
4997 && exp_equiv_p (elt->exp, elt->exp, 1, 0))
4998 new = copy_rtx (SUBREG_REG (elt->exp));
4999
5000 if (new)
5001 return new;
5002 }
5003 }
5004
5005 return x;
5006
5007 case NOT:
5008 case NEG:
5009 /* If we have (NOT Y), see if Y is known to be (NOT Z).
5010 If so, (NOT Y) simplifies to Z. Similarly for NEG. */
5011 new = lookup_as_function (XEXP (x, 0), code);
5012 if (new)
5013 return fold_rtx (copy_rtx (XEXP (new, 0)), insn);
5014 break;
5015
5016 case MEM:
5017 /* If we are not actually processing an insn, don't try to find the
5018 best address. Not only don't we care, but we could modify the
5019 MEM in an invalid way since we have no insn to validate against. */
5020 if (insn != 0)
5021 find_best_addr (insn, &XEXP (x, 0));
5022
5023 {
5024 /* Even if we don't fold in the insn itself,
5025 we can safely do so here, in hopes of getting a constant. */
5026 rtx addr = fold_rtx (XEXP (x, 0), NULL_RTX);
5027 rtx base = 0;
5028 HOST_WIDE_INT offset = 0;
5029
5030 if (GET_CODE (addr) == REG
5031 && REGNO_QTY_VALID_P (REGNO (addr))
5032 && GET_MODE (addr) == qty_mode[reg_qty[REGNO (addr)]]
5033 && qty_const[reg_qty[REGNO (addr)]] != 0)
5034 addr = qty_const[reg_qty[REGNO (addr)]];
5035
5036 /* If address is constant, split it into a base and integer offset. */
5037 if (GET_CODE (addr) == SYMBOL_REF || GET_CODE (addr) == LABEL_REF)
5038 base = addr;
5039 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
5040 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
5041 {
5042 base = XEXP (XEXP (addr, 0), 0);
5043 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
5044 }
5045 else if (GET_CODE (addr) == LO_SUM
5046 && GET_CODE (XEXP (addr, 1)) == SYMBOL_REF)
5047 base = XEXP (addr, 1);
5048
5049 /* If this is a constant pool reference, we can fold it into its
5050 constant to allow better value tracking. */
5051 if (base && GET_CODE (base) == SYMBOL_REF
5052 && CONSTANT_POOL_ADDRESS_P (base))
5053 {
5054 rtx constant = get_pool_constant (base);
5055 enum machine_mode const_mode = get_pool_mode (base);
5056 rtx new;
5057
5058 if (CONSTANT_P (constant) && GET_CODE (constant) != CONST_INT)
5059 constant_pool_entries_cost = COST (constant);
5060
5061 /* If we are loading the full constant, we have an equivalence. */
5062 if (offset == 0 && mode == const_mode)
5063 return constant;
5064
5065 /* If this actually isn't a constant (weird!), we can't do
5066 anything. Otherwise, handle the two most common cases:
5067 extracting a word from a multi-word constant, and extracting
5068 the low-order bits. Other cases don't seem common enough to
5069 worry about. */
5070 if (! CONSTANT_P (constant))
5071 return x;
5072
5073 if (GET_MODE_CLASS (mode) == MODE_INT
5074 && GET_MODE_SIZE (mode) == UNITS_PER_WORD
5075 && offset % UNITS_PER_WORD == 0
5076 && (new = operand_subword (constant,
5077 offset / UNITS_PER_WORD,
5078 0, const_mode)) != 0)
5079 return new;
5080
5081 if (((BYTES_BIG_ENDIAN
5082 && offset == GET_MODE_SIZE (GET_MODE (constant)) - 1)
5083 || (! BYTES_BIG_ENDIAN && offset == 0))
5084 && (new = gen_lowpart_if_possible (mode, constant)) != 0)
5085 return new;
5086 }
5087
5088 /* If this is a reference to a label at a known position in a jump
5089 table, we also know its value. */
5090 if (base && GET_CODE (base) == LABEL_REF)
5091 {
5092 rtx label = XEXP (base, 0);
5093 rtx table_insn = NEXT_INSN (label);
5094
5095 if (table_insn && GET_CODE (table_insn) == JUMP_INSN
5096 && GET_CODE (PATTERN (table_insn)) == ADDR_VEC)
5097 {
5098 rtx table = PATTERN (table_insn);
5099
5100 if (offset >= 0
5101 && (offset / GET_MODE_SIZE (GET_MODE (table))
5102 < XVECLEN (table, 0)))
5103 return XVECEXP (table, 0,
5104 offset / GET_MODE_SIZE (GET_MODE (table)));
5105 }
5106 if (table_insn && GET_CODE (table_insn) == JUMP_INSN
5107 && GET_CODE (PATTERN (table_insn)) == ADDR_DIFF_VEC)
5108 {
5109 rtx table = PATTERN (table_insn);
5110
5111 if (offset >= 0
5112 && (offset / GET_MODE_SIZE (GET_MODE (table))
5113 < XVECLEN (table, 1)))
5114 {
5115 offset /= GET_MODE_SIZE (GET_MODE (table));
5116 new = gen_rtx (MINUS, Pmode, XVECEXP (table, 1, offset),
5117 XEXP (table, 0));
5118
5119 if (GET_MODE (table) != Pmode)
5120 new = gen_rtx (TRUNCATE, GET_MODE (table), new);
5121
5122 /* Indicate this is a constant. This isn't a
5123 valid form of CONST, but it will only be used
5124 to fold the next insns and then discarded, so
5125 it should be safe. */
5126 return gen_rtx (CONST, GET_MODE (new), new);
5127 }
5128 }
5129 }
5130
5131 return x;
5132 }
5133 }
5134
5135 const_arg0 = 0;
5136 const_arg1 = 0;
5137 const_arg2 = 0;
5138 mode_arg0 = VOIDmode;
5139
5140 /* Try folding our operands.
5141 Then see which ones have constant values known. */
5142
5143 fmt = GET_RTX_FORMAT (code);
5144 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5145 if (fmt[i] == 'e')
5146 {
5147 rtx arg = XEXP (x, i);
5148 rtx folded_arg = arg, const_arg = 0;
5149 enum machine_mode mode_arg = GET_MODE (arg);
5150 rtx cheap_arg, expensive_arg;
5151 rtx replacements[2];
5152 int j;
5153
5154 /* Most arguments are cheap, so handle them specially. */
5155 switch (GET_CODE (arg))
5156 {
5157 case REG:
5158 /* This is the same as calling equiv_constant; it is duplicated
5159 here for speed. */
5160 if (REGNO_QTY_VALID_P (REGNO (arg))
5161 && qty_const[reg_qty[REGNO (arg)]] != 0
5162 && GET_CODE (qty_const[reg_qty[REGNO (arg)]]) != REG
5163 && GET_CODE (qty_const[reg_qty[REGNO (arg)]]) != PLUS)
5164 const_arg
5165 = gen_lowpart_if_possible (GET_MODE (arg),
5166 qty_const[reg_qty[REGNO (arg)]]);
5167 break;
5168
5169 case CONST:
5170 case CONST_INT:
5171 case SYMBOL_REF:
5172 case LABEL_REF:
5173 case CONST_DOUBLE:
5174 const_arg = arg;
5175 break;
5176
5177 #ifdef HAVE_cc0
5178 case CC0:
5179 folded_arg = prev_insn_cc0;
5180 mode_arg = prev_insn_cc0_mode;
5181 const_arg = equiv_constant (folded_arg);
5182 break;
5183 #endif
5184
5185 default:
5186 folded_arg = fold_rtx (arg, insn);
5187 const_arg = equiv_constant (folded_arg);
5188 }
5189
5190 /* For the first three operands, see if the operand
5191 is constant or equivalent to a constant. */
5192 switch (i)
5193 {
5194 case 0:
5195 folded_arg0 = folded_arg;
5196 const_arg0 = const_arg;
5197 mode_arg0 = mode_arg;
5198 break;
5199 case 1:
5200 folded_arg1 = folded_arg;
5201 const_arg1 = const_arg;
5202 break;
5203 case 2:
5204 const_arg2 = const_arg;
5205 break;
5206 }
5207
5208 /* Pick the least expensive of the folded argument and an
5209 equivalent constant argument. */
5210 if (const_arg == 0 || const_arg == folded_arg
5211 || COST (const_arg) > COST (folded_arg))
5212 cheap_arg = folded_arg, expensive_arg = const_arg;
5213 else
5214 cheap_arg = const_arg, expensive_arg = folded_arg;
5215
5216 /* Try to replace the operand with the cheapest of the two
5217 possibilities. If it doesn't work and this is either of the first
5218 two operands of a commutative operation, try swapping them.
5219 If THAT fails, try the more expensive, provided it is cheaper
5220 than what is already there. */
5221
5222 if (cheap_arg == XEXP (x, i))
5223 continue;
5224
5225 if (insn == 0 && ! copied)
5226 {
5227 x = copy_rtx (x);
5228 copied = 1;
5229 }
5230
5231 replacements[0] = cheap_arg, replacements[1] = expensive_arg;
5232 for (j = 0;
5233 j < 2 && replacements[j]
5234 && COST (replacements[j]) < COST (XEXP (x, i));
5235 j++)
5236 {
5237 if (validate_change (insn, &XEXP (x, i), replacements[j], 0))
5238 break;
5239
5240 if (code == NE || code == EQ || GET_RTX_CLASS (code) == 'c')
5241 {
5242 validate_change (insn, &XEXP (x, i), XEXP (x, 1 - i), 1);
5243 validate_change (insn, &XEXP (x, 1 - i), replacements[j], 1);
5244
5245 if (apply_change_group ())
5246 {
5247 /* Swap them back to be invalid so that this loop can
5248 continue and flag them to be swapped back later. */
5249 rtx tem;
5250
5251 tem = XEXP (x, 0); XEXP (x, 0) = XEXP (x, 1);
5252 XEXP (x, 1) = tem;
5253 must_swap = 1;
5254 break;
5255 }
5256 }
5257 }
5258 }
5259
5260 else if (fmt[i] == 'E')
5261 /* Don't try to fold inside of a vector of expressions.
5262 Doing nothing is harmless. */
5263 ;
5264
5265 /* If a commutative operation, place a constant integer as the second
5266 operand unless the first operand is also a constant integer. Otherwise,
5267 place any constant second unless the first operand is also a constant. */
5268
5269 if (code == EQ || code == NE || GET_RTX_CLASS (code) == 'c')
5270 {
5271 if (must_swap || (const_arg0
5272 && (const_arg1 == 0
5273 || (GET_CODE (const_arg0) == CONST_INT
5274 && GET_CODE (const_arg1) != CONST_INT))))
5275 {
5276 register rtx tem = XEXP (x, 0);
5277
5278 if (insn == 0 && ! copied)
5279 {
5280 x = copy_rtx (x);
5281 copied = 1;
5282 }
5283
5284 validate_change (insn, &XEXP (x, 0), XEXP (x, 1), 1);
5285 validate_change (insn, &XEXP (x, 1), tem, 1);
5286 if (apply_change_group ())
5287 {
5288 tem = const_arg0, const_arg0 = const_arg1, const_arg1 = tem;
5289 tem = folded_arg0, folded_arg0 = folded_arg1, folded_arg1 = tem;
5290 }
5291 }
5292 }
5293
5294 /* If X is an arithmetic operation, see if we can simplify it. */
5295
5296 switch (GET_RTX_CLASS (code))
5297 {
5298 case '1':
5299 {
5300 int is_const = 0;
5301
5302 /* We can't simplify extension ops unless we know the
5303 original mode. */
5304 if ((code == ZERO_EXTEND || code == SIGN_EXTEND)
5305 && mode_arg0 == VOIDmode)
5306 break;
5307
5308 /* If we had a CONST, strip it off and put it back later if we
5309 fold. */
5310 if (const_arg0 != 0 && GET_CODE (const_arg0) == CONST)
5311 is_const = 1, const_arg0 = XEXP (const_arg0, 0);
5312
5313 new = simplify_unary_operation (code, mode,
5314 const_arg0 ? const_arg0 : folded_arg0,
5315 mode_arg0);
5316 if (new != 0 && is_const)
5317 new = gen_rtx (CONST, mode, new);
5318 }
5319 break;
5320
5321 case '<':
5322 /* See what items are actually being compared and set FOLDED_ARG[01]
5323 to those values and CODE to the actual comparison code. If any are
5324 constant, set CONST_ARG0 and CONST_ARG1 appropriately. We needn't
5325 do anything if both operands are already known to be constant. */
5326
5327 if (const_arg0 == 0 || const_arg1 == 0)
5328 {
5329 struct table_elt *p0, *p1;
5330 rtx true = const_true_rtx, false = const0_rtx;
5331 enum machine_mode mode_arg1;
5332
5333 #ifdef FLOAT_STORE_FLAG_VALUE
5334 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5335 {
5336 true = CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE,
5337 mode);
5338 false = CONST0_RTX (mode);
5339 }
5340 #endif
5341
5342 code = find_comparison_args (code, &folded_arg0, &folded_arg1,
5343 &mode_arg0, &mode_arg1);
5344 const_arg0 = equiv_constant (folded_arg0);
5345 const_arg1 = equiv_constant (folded_arg1);
5346
5347 /* If the mode is VOIDmode or a MODE_CC mode, we don't know
5348 what kinds of things are being compared, so we can't do
5349 anything with this comparison. */
5350
5351 if (mode_arg0 == VOIDmode || GET_MODE_CLASS (mode_arg0) == MODE_CC)
5352 break;
5353
5354 /* If we do not now have two constants being compared, see if we
5355 can nevertheless deduce some things about the comparison. */
5356 if (const_arg0 == 0 || const_arg1 == 0)
5357 {
5358 /* Is FOLDED_ARG0 frame-pointer plus a constant? Or non-explicit
5359 constant? These aren't zero, but we don't know their sign. */
5360 if (const_arg1 == const0_rtx
5361 && (NONZERO_BASE_PLUS_P (folded_arg0)
5362 #if 0 /* Sad to say, on sysvr4, #pragma weak can make a symbol address
5363 come out as 0. */
5364 || GET_CODE (folded_arg0) == SYMBOL_REF
5365 #endif
5366 || GET_CODE (folded_arg0) == LABEL_REF
5367 || GET_CODE (folded_arg0) == CONST))
5368 {
5369 if (code == EQ)
5370 return false;
5371 else if (code == NE)
5372 return true;
5373 }
5374
5375 /* See if the two operands are the same. We don't do this
5376 for IEEE floating-point since we can't assume x == x
5377 since x might be a NaN. */
5378
5379 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
5380 || ! FLOAT_MODE_P (mode_arg0) || flag_fast_math)
5381 && (folded_arg0 == folded_arg1
5382 || (GET_CODE (folded_arg0) == REG
5383 && GET_CODE (folded_arg1) == REG
5384 && (reg_qty[REGNO (folded_arg0)]
5385 == reg_qty[REGNO (folded_arg1)]))
5386 || ((p0 = lookup (folded_arg0,
5387 (safe_hash (folded_arg0, mode_arg0)
5388 % NBUCKETS), mode_arg0))
5389 && (p1 = lookup (folded_arg1,
5390 (safe_hash (folded_arg1, mode_arg0)
5391 % NBUCKETS), mode_arg0))
5392 && p0->first_same_value == p1->first_same_value)))
5393 return ((code == EQ || code == LE || code == GE
5394 || code == LEU || code == GEU)
5395 ? true : false);
5396
5397 /* If FOLDED_ARG0 is a register, see if the comparison we are
5398 doing now is either the same as we did before or the reverse
5399 (we only check the reverse if not floating-point). */
5400 else if (GET_CODE (folded_arg0) == REG)
5401 {
5402 int qty = reg_qty[REGNO (folded_arg0)];
5403
5404 if (REGNO_QTY_VALID_P (REGNO (folded_arg0))
5405 && (comparison_dominates_p (qty_comparison_code[qty], code)
5406 || (comparison_dominates_p (qty_comparison_code[qty],
5407 reverse_condition (code))
5408 && ! FLOAT_MODE_P (mode_arg0)))
5409 && (rtx_equal_p (qty_comparison_const[qty], folded_arg1)
5410 || (const_arg1
5411 && rtx_equal_p (qty_comparison_const[qty],
5412 const_arg1))
5413 || (GET_CODE (folded_arg1) == REG
5414 && (reg_qty[REGNO (folded_arg1)]
5415 == qty_comparison_qty[qty]))))
5416 return (comparison_dominates_p (qty_comparison_code[qty],
5417 code)
5418 ? true : false);
5419 }
5420 }
5421 }
5422
5423 /* If we are comparing against zero, see if the first operand is
5424 equivalent to an IOR with a constant. If so, we may be able to
5425 determine the result of this comparison. */
5426
5427 if (const_arg1 == const0_rtx)
5428 {
5429 rtx y = lookup_as_function (folded_arg0, IOR);
5430 rtx inner_const;
5431
5432 if (y != 0
5433 && (inner_const = equiv_constant (XEXP (y, 1))) != 0
5434 && GET_CODE (inner_const) == CONST_INT
5435 && INTVAL (inner_const) != 0)
5436 {
5437 int sign_bitnum = GET_MODE_BITSIZE (mode_arg0) - 1;
5438 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5439 && (INTVAL (inner_const)
5440 & ((HOST_WIDE_INT) 1 << sign_bitnum)));
5441 rtx true = const_true_rtx, false = const0_rtx;
5442
5443 #ifdef FLOAT_STORE_FLAG_VALUE
5444 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5445 {
5446 true = CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE,
5447 mode);
5448 false = CONST0_RTX (mode);
5449 }
5450 #endif
5451
5452 switch (code)
5453 {
5454 case EQ:
5455 return false;
5456 case NE:
5457 return true;
5458 case LT: case LE:
5459 if (has_sign)
5460 return true;
5461 break;
5462 case GT: case GE:
5463 if (has_sign)
5464 return false;
5465 break;
5466 }
5467 }
5468 }
5469
5470 new = simplify_relational_operation (code, mode_arg0,
5471 const_arg0 ? const_arg0 : folded_arg0,
5472 const_arg1 ? const_arg1 : folded_arg1);
5473 #ifdef FLOAT_STORE_FLAG_VALUE
5474 if (new != 0 && GET_MODE_CLASS (mode) == MODE_FLOAT)
5475 new = ((new == const0_rtx) ? CONST0_RTX (mode)
5476 : CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE, mode));
5477 #endif
5478 break;
5479
5480 case '2':
5481 case 'c':
5482 switch (code)
5483 {
5484 case PLUS:
5485 /* If the second operand is a LABEL_REF, see if the first is a MINUS
5486 with that LABEL_REF as its second operand. If so, the result is
5487 the first operand of that MINUS. This handles switches with an
5488 ADDR_DIFF_VEC table. */
5489 if (const_arg1 && GET_CODE (const_arg1) == LABEL_REF)
5490 {
5491 rtx y
5492 = GET_CODE (folded_arg0) == MINUS ? folded_arg0
5493 : lookup_as_function (folded_arg0, MINUS);
5494
5495 if (y != 0 && GET_CODE (XEXP (y, 1)) == LABEL_REF
5496 && XEXP (XEXP (y, 1), 0) == XEXP (const_arg1, 0))
5497 return XEXP (y, 0);
5498
5499 /* Now try for a CONST of a MINUS like the above. */
5500 if ((y = (GET_CODE (folded_arg0) == CONST ? folded_arg0
5501 : lookup_as_function (folded_arg0, CONST))) != 0
5502 && GET_CODE (XEXP (y, 0)) == MINUS
5503 && GET_CODE (XEXP (XEXP (y, 0), 1)) == LABEL_REF
5504 && XEXP (XEXP (XEXP (y, 0),1), 0) == XEXP (const_arg1, 0))
5505 return XEXP (XEXP (y, 0), 0);
5506 }
5507
5508 /* Likewise if the operands are in the other order. */
5509 if (const_arg0 && GET_CODE (const_arg0) == LABEL_REF)
5510 {
5511 rtx y
5512 = GET_CODE (folded_arg1) == MINUS ? folded_arg1
5513 : lookup_as_function (folded_arg1, MINUS);
5514
5515 if (y != 0 && GET_CODE (XEXP (y, 1)) == LABEL_REF
5516 && XEXP (XEXP (y, 1), 0) == XEXP (const_arg0, 0))
5517 return XEXP (y, 0);
5518
5519 /* Now try for a CONST of a MINUS like the above. */
5520 if ((y = (GET_CODE (folded_arg1) == CONST ? folded_arg1
5521 : lookup_as_function (folded_arg1, CONST))) != 0
5522 && GET_CODE (XEXP (y, 0)) == MINUS
5523 && GET_CODE (XEXP (XEXP (y, 0), 1)) == LABEL_REF
5524 && XEXP (XEXP (XEXP (y, 0),1), 0) == XEXP (const_arg0, 0))
5525 return XEXP (XEXP (y, 0), 0);
5526 }
5527
5528 /* If second operand is a register equivalent to a negative
5529 CONST_INT, see if we can find a register equivalent to the
5530 positive constant. Make a MINUS if so. Don't do this for
5531 a negative constant since we might then alternate between
5532 chosing positive and negative constants. Having the positive
5533 constant previously-used is the more common case. */
5534 if (const_arg1 && GET_CODE (const_arg1) == CONST_INT
5535 && INTVAL (const_arg1) < 0 && GET_CODE (folded_arg1) == REG)
5536 {
5537 rtx new_const = GEN_INT (- INTVAL (const_arg1));
5538 struct table_elt *p
5539 = lookup (new_const, safe_hash (new_const, mode) % NBUCKETS,
5540 mode);
5541
5542 if (p)
5543 for (p = p->first_same_value; p; p = p->next_same_value)
5544 if (GET_CODE (p->exp) == REG)
5545 return cse_gen_binary (MINUS, mode, folded_arg0,
5546 canon_reg (p->exp, NULL_RTX));
5547 }
5548 goto from_plus;
5549
5550 case MINUS:
5551 /* If we have (MINUS Y C), see if Y is known to be (PLUS Z C2).
5552 If so, produce (PLUS Z C2-C). */
5553 if (const_arg1 != 0 && GET_CODE (const_arg1) == CONST_INT)
5554 {
5555 rtx y = lookup_as_function (XEXP (x, 0), PLUS);
5556 if (y && GET_CODE (XEXP (y, 1)) == CONST_INT)
5557 return fold_rtx (plus_constant (copy_rtx (y),
5558 -INTVAL (const_arg1)),
5559 NULL_RTX);
5560 }
5561
5562 /* ... fall through ... */
5563
5564 from_plus:
5565 case SMIN: case SMAX: case UMIN: case UMAX:
5566 case IOR: case AND: case XOR:
5567 case MULT: case DIV: case UDIV:
5568 case ASHIFT: case LSHIFTRT: case ASHIFTRT:
5569 /* If we have (<op> <reg> <const_int>) for an associative OP and REG
5570 is known to be of similar form, we may be able to replace the
5571 operation with a combined operation. This may eliminate the
5572 intermediate operation if every use is simplified in this way.
5573 Note that the similar optimization done by combine.c only works
5574 if the intermediate operation's result has only one reference. */
5575
5576 if (GET_CODE (folded_arg0) == REG
5577 && const_arg1 && GET_CODE (const_arg1) == CONST_INT)
5578 {
5579 int is_shift
5580 = (code == ASHIFT || code == ASHIFTRT || code == LSHIFTRT);
5581 rtx y = lookup_as_function (folded_arg0, code);
5582 rtx inner_const;
5583 enum rtx_code associate_code;
5584 rtx new_const;
5585
5586 if (y == 0
5587 || 0 == (inner_const
5588 = equiv_constant (fold_rtx (XEXP (y, 1), 0)))
5589 || GET_CODE (inner_const) != CONST_INT
5590 /* If we have compiled a statement like
5591 "if (x == (x & mask1))", and now are looking at
5592 "x & mask2", we will have a case where the first operand
5593 of Y is the same as our first operand. Unless we detect
5594 this case, an infinite loop will result. */
5595 || XEXP (y, 0) == folded_arg0)
5596 break;
5597
5598 /* Don't associate these operations if they are a PLUS with the
5599 same constant and it is a power of two. These might be doable
5600 with a pre- or post-increment. Similarly for two subtracts of
5601 identical powers of two with post decrement. */
5602
5603 if (code == PLUS && INTVAL (const_arg1) == INTVAL (inner_const)
5604 && (0
5605 #if defined(HAVE_PRE_INCREMENT) || defined(HAVE_POST_INCREMENT)
5606 || exact_log2 (INTVAL (const_arg1)) >= 0
5607 #endif
5608 #if defined(HAVE_PRE_DECREMENT) || defined(HAVE_POST_DECREMENT)
5609 || exact_log2 (- INTVAL (const_arg1)) >= 0
5610 #endif
5611 ))
5612 break;
5613
5614 /* Compute the code used to compose the constants. For example,
5615 A/C1/C2 is A/(C1 * C2), so if CODE == DIV, we want MULT. */
5616
5617 associate_code
5618 = (code == MULT || code == DIV || code == UDIV ? MULT
5619 : is_shift || code == PLUS || code == MINUS ? PLUS : code);
5620
5621 new_const = simplify_binary_operation (associate_code, mode,
5622 const_arg1, inner_const);
5623
5624 if (new_const == 0)
5625 break;
5626
5627 /* If we are associating shift operations, don't let this
5628 produce a shift of the size of the object or larger.
5629 This could occur when we follow a sign-extend by a right
5630 shift on a machine that does a sign-extend as a pair
5631 of shifts. */
5632
5633 if (is_shift && GET_CODE (new_const) == CONST_INT
5634 && INTVAL (new_const) >= GET_MODE_BITSIZE (mode))
5635 {
5636 /* As an exception, we can turn an ASHIFTRT of this
5637 form into a shift of the number of bits - 1. */
5638 if (code == ASHIFTRT)
5639 new_const = GEN_INT (GET_MODE_BITSIZE (mode) - 1);
5640 else
5641 break;
5642 }
5643
5644 y = copy_rtx (XEXP (y, 0));
5645
5646 /* If Y contains our first operand (the most common way this
5647 can happen is if Y is a MEM), we would do into an infinite
5648 loop if we tried to fold it. So don't in that case. */
5649
5650 if (! reg_mentioned_p (folded_arg0, y))
5651 y = fold_rtx (y, insn);
5652
5653 return cse_gen_binary (code, mode, y, new_const);
5654 }
5655 }
5656
5657 new = simplify_binary_operation (code, mode,
5658 const_arg0 ? const_arg0 : folded_arg0,
5659 const_arg1 ? const_arg1 : folded_arg1);
5660 break;
5661
5662 case 'o':
5663 /* (lo_sum (high X) X) is simply X. */
5664 if (code == LO_SUM && const_arg0 != 0
5665 && GET_CODE (const_arg0) == HIGH
5666 && rtx_equal_p (XEXP (const_arg0, 0), const_arg1))
5667 return const_arg1;
5668 break;
5669
5670 case '3':
5671 case 'b':
5672 new = simplify_ternary_operation (code, mode, mode_arg0,
5673 const_arg0 ? const_arg0 : folded_arg0,
5674 const_arg1 ? const_arg1 : folded_arg1,
5675 const_arg2 ? const_arg2 : XEXP (x, 2));
5676 break;
5677 }
5678
5679 return new ? new : x;
5680 }
5681 \f
5682 /* Return a constant value currently equivalent to X.
5683 Return 0 if we don't know one. */
5684
5685 static rtx
5686 equiv_constant (x)
5687 rtx x;
5688 {
5689 if (GET_CODE (x) == REG
5690 && REGNO_QTY_VALID_P (REGNO (x))
5691 && qty_const[reg_qty[REGNO (x)]])
5692 x = gen_lowpart_if_possible (GET_MODE (x), qty_const[reg_qty[REGNO (x)]]);
5693
5694 if (x != 0 && CONSTANT_P (x))
5695 return x;
5696
5697 /* If X is a MEM, try to fold it outside the context of any insn to see if
5698 it might be equivalent to a constant. That handles the case where it
5699 is a constant-pool reference. Then try to look it up in the hash table
5700 in case it is something whose value we have seen before. */
5701
5702 if (GET_CODE (x) == MEM)
5703 {
5704 struct table_elt *elt;
5705
5706 x = fold_rtx (x, NULL_RTX);
5707 if (CONSTANT_P (x))
5708 return x;
5709
5710 elt = lookup (x, safe_hash (x, GET_MODE (x)) % NBUCKETS, GET_MODE (x));
5711 if (elt == 0)
5712 return 0;
5713
5714 for (elt = elt->first_same_value; elt; elt = elt->next_same_value)
5715 if (elt->is_const && CONSTANT_P (elt->exp))
5716 return elt->exp;
5717 }
5718
5719 return 0;
5720 }
5721 \f
5722 /* Assuming that X is an rtx (e.g., MEM, REG or SUBREG) for a fixed-point
5723 number, return an rtx (MEM, SUBREG, or CONST_INT) that refers to the
5724 least-significant part of X.
5725 MODE specifies how big a part of X to return.
5726
5727 If the requested operation cannot be done, 0 is returned.
5728
5729 This is similar to gen_lowpart in emit-rtl.c. */
5730
5731 rtx
5732 gen_lowpart_if_possible (mode, x)
5733 enum machine_mode mode;
5734 register rtx x;
5735 {
5736 rtx result = gen_lowpart_common (mode, x);
5737
5738 if (result)
5739 return result;
5740 else if (GET_CODE (x) == MEM)
5741 {
5742 /* This is the only other case we handle. */
5743 register int offset = 0;
5744 rtx new;
5745
5746 if (WORDS_BIG_ENDIAN)
5747 offset = (MAX (GET_MODE_SIZE (GET_MODE (x)), UNITS_PER_WORD)
5748 - MAX (GET_MODE_SIZE (mode), UNITS_PER_WORD));
5749 if (BYTES_BIG_ENDIAN)
5750 /* Adjust the address so that the address-after-the-data is
5751 unchanged. */
5752 offset -= (MIN (UNITS_PER_WORD, GET_MODE_SIZE (mode))
5753 - MIN (UNITS_PER_WORD, GET_MODE_SIZE (GET_MODE (x))));
5754 new = gen_rtx (MEM, mode, plus_constant (XEXP (x, 0), offset));
5755 if (! memory_address_p (mode, XEXP (new, 0)))
5756 return 0;
5757 MEM_VOLATILE_P (new) = MEM_VOLATILE_P (x);
5758 RTX_UNCHANGING_P (new) = RTX_UNCHANGING_P (x);
5759 MEM_IN_STRUCT_P (new) = MEM_IN_STRUCT_P (x);
5760 return new;
5761 }
5762 else
5763 return 0;
5764 }
5765 \f
5766 /* Given INSN, a jump insn, TAKEN indicates if we are following the "taken"
5767 branch. It will be zero if not.
5768
5769 In certain cases, this can cause us to add an equivalence. For example,
5770 if we are following the taken case of
5771 if (i == 2)
5772 we can add the fact that `i' and '2' are now equivalent.
5773
5774 In any case, we can record that this comparison was passed. If the same
5775 comparison is seen later, we will know its value. */
5776
5777 static void
5778 record_jump_equiv (insn, taken)
5779 rtx insn;
5780 int taken;
5781 {
5782 int cond_known_true;
5783 rtx op0, op1;
5784 enum machine_mode mode, mode0, mode1;
5785 int reversed_nonequality = 0;
5786 enum rtx_code code;
5787
5788 /* Ensure this is the right kind of insn. */
5789 if (! condjump_p (insn) || simplejump_p (insn))
5790 return;
5791
5792 /* See if this jump condition is known true or false. */
5793 if (taken)
5794 cond_known_true = (XEXP (SET_SRC (PATTERN (insn)), 2) == pc_rtx);
5795 else
5796 cond_known_true = (XEXP (SET_SRC (PATTERN (insn)), 1) == pc_rtx);
5797
5798 /* Get the type of comparison being done and the operands being compared.
5799 If we had to reverse a non-equality condition, record that fact so we
5800 know that it isn't valid for floating-point. */
5801 code = GET_CODE (XEXP (SET_SRC (PATTERN (insn)), 0));
5802 op0 = fold_rtx (XEXP (XEXP (SET_SRC (PATTERN (insn)), 0), 0), insn);
5803 op1 = fold_rtx (XEXP (XEXP (SET_SRC (PATTERN (insn)), 0), 1), insn);
5804
5805 code = find_comparison_args (code, &op0, &op1, &mode0, &mode1);
5806 if (! cond_known_true)
5807 {
5808 reversed_nonequality = (code != EQ && code != NE);
5809 code = reverse_condition (code);
5810 }
5811
5812 /* The mode is the mode of the non-constant. */
5813 mode = mode0;
5814 if (mode1 != VOIDmode)
5815 mode = mode1;
5816
5817 record_jump_cond (code, mode, op0, op1, reversed_nonequality);
5818 }
5819
5820 /* We know that comparison CODE applied to OP0 and OP1 in MODE is true.
5821 REVERSED_NONEQUALITY is nonzero if CODE had to be swapped.
5822 Make any useful entries we can with that information. Called from
5823 above function and called recursively. */
5824
5825 static void
5826 record_jump_cond (code, mode, op0, op1, reversed_nonequality)
5827 enum rtx_code code;
5828 enum machine_mode mode;
5829 rtx op0, op1;
5830 int reversed_nonequality;
5831 {
5832 unsigned op0_hash, op1_hash;
5833 int op0_in_memory, op0_in_struct, op1_in_memory, op1_in_struct;
5834 struct table_elt *op0_elt, *op1_elt;
5835
5836 /* If OP0 and OP1 are known equal, and either is a paradoxical SUBREG,
5837 we know that they are also equal in the smaller mode (this is also
5838 true for all smaller modes whether or not there is a SUBREG, but
5839 is not worth testing for with no SUBREG. */
5840
5841 /* Note that GET_MODE (op0) may not equal MODE. */
5842 if (code == EQ && GET_CODE (op0) == SUBREG
5843 && (GET_MODE_SIZE (GET_MODE (op0))
5844 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))))
5845 {
5846 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
5847 rtx tem = gen_lowpart_if_possible (inner_mode, op1);
5848
5849 record_jump_cond (code, mode, SUBREG_REG (op0),
5850 tem ? tem : gen_rtx (SUBREG, inner_mode, op1, 0),
5851 reversed_nonequality);
5852 }
5853
5854 if (code == EQ && GET_CODE (op1) == SUBREG
5855 && (GET_MODE_SIZE (GET_MODE (op1))
5856 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (op1)))))
5857 {
5858 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op1));
5859 rtx tem = gen_lowpart_if_possible (inner_mode, op0);
5860
5861 record_jump_cond (code, mode, SUBREG_REG (op1),
5862 tem ? tem : gen_rtx (SUBREG, inner_mode, op0, 0),
5863 reversed_nonequality);
5864 }
5865
5866 /* Similarly, if this is an NE comparison, and either is a SUBREG
5867 making a smaller mode, we know the whole thing is also NE. */
5868
5869 /* Note that GET_MODE (op0) may not equal MODE;
5870 if we test MODE instead, we can get an infinite recursion
5871 alternating between two modes each wider than MODE. */
5872
5873 if (code == NE && GET_CODE (op0) == SUBREG
5874 && subreg_lowpart_p (op0)
5875 && (GET_MODE_SIZE (GET_MODE (op0))
5876 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))))
5877 {
5878 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
5879 rtx tem = gen_lowpart_if_possible (inner_mode, op1);
5880
5881 record_jump_cond (code, mode, SUBREG_REG (op0),
5882 tem ? tem : gen_rtx (SUBREG, inner_mode, op1, 0),
5883 reversed_nonequality);
5884 }
5885
5886 if (code == NE && GET_CODE (op1) == SUBREG
5887 && subreg_lowpart_p (op1)
5888 && (GET_MODE_SIZE (GET_MODE (op1))
5889 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op1)))))
5890 {
5891 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op1));
5892 rtx tem = gen_lowpart_if_possible (inner_mode, op0);
5893
5894 record_jump_cond (code, mode, SUBREG_REG (op1),
5895 tem ? tem : gen_rtx (SUBREG, inner_mode, op0, 0),
5896 reversed_nonequality);
5897 }
5898
5899 /* Hash both operands. */
5900
5901 do_not_record = 0;
5902 hash_arg_in_memory = 0;
5903 hash_arg_in_struct = 0;
5904 op0_hash = HASH (op0, mode);
5905 op0_in_memory = hash_arg_in_memory;
5906 op0_in_struct = hash_arg_in_struct;
5907
5908 if (do_not_record)
5909 return;
5910
5911 do_not_record = 0;
5912 hash_arg_in_memory = 0;
5913 hash_arg_in_struct = 0;
5914 op1_hash = HASH (op1, mode);
5915 op1_in_memory = hash_arg_in_memory;
5916 op1_in_struct = hash_arg_in_struct;
5917
5918 if (do_not_record)
5919 return;
5920
5921 /* Look up both operands. */
5922 op0_elt = lookup (op0, op0_hash, mode);
5923 op1_elt = lookup (op1, op1_hash, mode);
5924
5925 /* If both operands are already equivalent or if they are not in the
5926 table but are identical, do nothing. */
5927 if ((op0_elt != 0 && op1_elt != 0
5928 && op0_elt->first_same_value == op1_elt->first_same_value)
5929 || op0 == op1 || rtx_equal_p (op0, op1))
5930 return;
5931
5932 /* If we aren't setting two things equal all we can do is save this
5933 comparison. Similarly if this is floating-point. In the latter
5934 case, OP1 might be zero and both -0.0 and 0.0 are equal to it.
5935 If we record the equality, we might inadvertently delete code
5936 whose intent was to change -0 to +0. */
5937
5938 if (code != EQ || FLOAT_MODE_P (GET_MODE (op0)))
5939 {
5940 /* If we reversed a floating-point comparison, if OP0 is not a
5941 register, or if OP1 is neither a register or constant, we can't
5942 do anything. */
5943
5944 if (GET_CODE (op1) != REG)
5945 op1 = equiv_constant (op1);
5946
5947 if ((reversed_nonequality && FLOAT_MODE_P (mode))
5948 || GET_CODE (op0) != REG || op1 == 0)
5949 return;
5950
5951 /* Put OP0 in the hash table if it isn't already. This gives it a
5952 new quantity number. */
5953 if (op0_elt == 0)
5954 {
5955 if (insert_regs (op0, NULL_PTR, 0))
5956 {
5957 rehash_using_reg (op0);
5958 op0_hash = HASH (op0, mode);
5959
5960 /* If OP0 is contained in OP1, this changes its hash code
5961 as well. Faster to rehash than to check, except
5962 for the simple case of a constant. */
5963 if (! CONSTANT_P (op1))
5964 op1_hash = HASH (op1,mode);
5965 }
5966
5967 op0_elt = insert (op0, NULL_PTR, op0_hash, mode);
5968 op0_elt->in_memory = op0_in_memory;
5969 op0_elt->in_struct = op0_in_struct;
5970 }
5971
5972 qty_comparison_code[reg_qty[REGNO (op0)]] = code;
5973 if (GET_CODE (op1) == REG)
5974 {
5975 /* Look it up again--in case op0 and op1 are the same. */
5976 op1_elt = lookup (op1, op1_hash, mode);
5977
5978 /* Put OP1 in the hash table so it gets a new quantity number. */
5979 if (op1_elt == 0)
5980 {
5981 if (insert_regs (op1, NULL_PTR, 0))
5982 {
5983 rehash_using_reg (op1);
5984 op1_hash = HASH (op1, mode);
5985 }
5986
5987 op1_elt = insert (op1, NULL_PTR, op1_hash, mode);
5988 op1_elt->in_memory = op1_in_memory;
5989 op1_elt->in_struct = op1_in_struct;
5990 }
5991
5992 qty_comparison_qty[reg_qty[REGNO (op0)]] = reg_qty[REGNO (op1)];
5993 qty_comparison_const[reg_qty[REGNO (op0)]] = 0;
5994 }
5995 else
5996 {
5997 qty_comparison_qty[reg_qty[REGNO (op0)]] = -1;
5998 qty_comparison_const[reg_qty[REGNO (op0)]] = op1;
5999 }
6000
6001 return;
6002 }
6003
6004 /* If either side is still missing an equivalence, make it now,
6005 then merge the equivalences. */
6006
6007 if (op0_elt == 0)
6008 {
6009 if (insert_regs (op0, NULL_PTR, 0))
6010 {
6011 rehash_using_reg (op0);
6012 op0_hash = HASH (op0, mode);
6013 }
6014
6015 op0_elt = insert (op0, NULL_PTR, op0_hash, mode);
6016 op0_elt->in_memory = op0_in_memory;
6017 op0_elt->in_struct = op0_in_struct;
6018 }
6019
6020 if (op1_elt == 0)
6021 {
6022 if (insert_regs (op1, NULL_PTR, 0))
6023 {
6024 rehash_using_reg (op1);
6025 op1_hash = HASH (op1, mode);
6026 }
6027
6028 op1_elt = insert (op1, NULL_PTR, op1_hash, mode);
6029 op1_elt->in_memory = op1_in_memory;
6030 op1_elt->in_struct = op1_in_struct;
6031 }
6032
6033 merge_equiv_classes (op0_elt, op1_elt);
6034 last_jump_equiv_class = op0_elt;
6035 }
6036 \f
6037 /* CSE processing for one instruction.
6038 First simplify sources and addresses of all assignments
6039 in the instruction, using previously-computed equivalents values.
6040 Then install the new sources and destinations in the table
6041 of available values.
6042
6043 If IN_LIBCALL_BLOCK is nonzero, don't record any equivalence made in
6044 the insn. */
6045
6046 /* Data on one SET contained in the instruction. */
6047
6048 struct set
6049 {
6050 /* The SET rtx itself. */
6051 rtx rtl;
6052 /* The SET_SRC of the rtx (the original value, if it is changing). */
6053 rtx src;
6054 /* The hash-table element for the SET_SRC of the SET. */
6055 struct table_elt *src_elt;
6056 /* Hash value for the SET_SRC. */
6057 unsigned src_hash;
6058 /* Hash value for the SET_DEST. */
6059 unsigned dest_hash;
6060 /* The SET_DEST, with SUBREG, etc., stripped. */
6061 rtx inner_dest;
6062 /* Place where the pointer to the INNER_DEST was found. */
6063 rtx *inner_dest_loc;
6064 /* Nonzero if the SET_SRC is in memory. */
6065 char src_in_memory;
6066 /* Nonzero if the SET_SRC is in a structure. */
6067 char src_in_struct;
6068 /* Nonzero if the SET_SRC contains something
6069 whose value cannot be predicted and understood. */
6070 char src_volatile;
6071 /* Original machine mode, in case it becomes a CONST_INT. */
6072 enum machine_mode mode;
6073 /* A constant equivalent for SET_SRC, if any. */
6074 rtx src_const;
6075 /* Hash value of constant equivalent for SET_SRC. */
6076 unsigned src_const_hash;
6077 /* Table entry for constant equivalent for SET_SRC, if any. */
6078 struct table_elt *src_const_elt;
6079 };
6080
6081 static void
6082 cse_insn (insn, in_libcall_block)
6083 rtx insn;
6084 int in_libcall_block;
6085 {
6086 register rtx x = PATTERN (insn);
6087 register int i;
6088 rtx tem;
6089 register int n_sets = 0;
6090
6091 /* Records what this insn does to set CC0. */
6092 rtx this_insn_cc0 = 0;
6093 enum machine_mode this_insn_cc0_mode;
6094 struct write_data writes_memory;
6095 static struct write_data init = {0, 0, 0, 0};
6096
6097 rtx src_eqv = 0;
6098 struct table_elt *src_eqv_elt = 0;
6099 int src_eqv_volatile;
6100 int src_eqv_in_memory;
6101 int src_eqv_in_struct;
6102 unsigned src_eqv_hash;
6103
6104 struct set *sets;
6105
6106 this_insn = insn;
6107 writes_memory = init;
6108
6109 /* Find all the SETs and CLOBBERs in this instruction.
6110 Record all the SETs in the array `set' and count them.
6111 Also determine whether there is a CLOBBER that invalidates
6112 all memory references, or all references at varying addresses. */
6113
6114 if (GET_CODE (insn) == CALL_INSN)
6115 {
6116 for (tem = CALL_INSN_FUNCTION_USAGE (insn); tem; tem = XEXP (tem, 1))
6117 if (GET_CODE (XEXP (tem, 0)) == CLOBBER)
6118 invalidate (SET_DEST (XEXP (tem, 0)), VOIDmode);
6119 }
6120
6121 if (GET_CODE (x) == SET)
6122 {
6123 sets = (struct set *) alloca (sizeof (struct set));
6124 sets[0].rtl = x;
6125
6126 /* Ignore SETs that are unconditional jumps.
6127 They never need cse processing, so this does not hurt.
6128 The reason is not efficiency but rather
6129 so that we can test at the end for instructions
6130 that have been simplified to unconditional jumps
6131 and not be misled by unchanged instructions
6132 that were unconditional jumps to begin with. */
6133 if (SET_DEST (x) == pc_rtx
6134 && GET_CODE (SET_SRC (x)) == LABEL_REF)
6135 ;
6136
6137 /* Don't count call-insns, (set (reg 0) (call ...)), as a set.
6138 The hard function value register is used only once, to copy to
6139 someplace else, so it isn't worth cse'ing (and on 80386 is unsafe)!
6140 Ensure we invalidate the destination register. On the 80386 no
6141 other code would invalidate it since it is a fixed_reg.
6142 We need not check the return of apply_change_group; see canon_reg. */
6143
6144 else if (GET_CODE (SET_SRC (x)) == CALL)
6145 {
6146 canon_reg (SET_SRC (x), insn);
6147 apply_change_group ();
6148 fold_rtx (SET_SRC (x), insn);
6149 invalidate (SET_DEST (x), VOIDmode);
6150 }
6151 else
6152 n_sets = 1;
6153 }
6154 else if (GET_CODE (x) == PARALLEL)
6155 {
6156 register int lim = XVECLEN (x, 0);
6157
6158 sets = (struct set *) alloca (lim * sizeof (struct set));
6159
6160 /* Find all regs explicitly clobbered in this insn,
6161 and ensure they are not replaced with any other regs
6162 elsewhere in this insn.
6163 When a reg that is clobbered is also used for input,
6164 we should presume that that is for a reason,
6165 and we should not substitute some other register
6166 which is not supposed to be clobbered.
6167 Therefore, this loop cannot be merged into the one below
6168 because a CALL may precede a CLOBBER and refer to the
6169 value clobbered. We must not let a canonicalization do
6170 anything in that case. */
6171 for (i = 0; i < lim; i++)
6172 {
6173 register rtx y = XVECEXP (x, 0, i);
6174 if (GET_CODE (y) == CLOBBER)
6175 {
6176 rtx clobbered = XEXP (y, 0);
6177
6178 if (GET_CODE (clobbered) == REG
6179 || GET_CODE (clobbered) == SUBREG)
6180 invalidate (clobbered, VOIDmode);
6181 else if (GET_CODE (clobbered) == STRICT_LOW_PART
6182 || GET_CODE (clobbered) == ZERO_EXTRACT)
6183 invalidate (XEXP (clobbered, 0), GET_MODE (clobbered));
6184 }
6185 }
6186
6187 for (i = 0; i < lim; i++)
6188 {
6189 register rtx y = XVECEXP (x, 0, i);
6190 if (GET_CODE (y) == SET)
6191 {
6192 /* As above, we ignore unconditional jumps and call-insns and
6193 ignore the result of apply_change_group. */
6194 if (GET_CODE (SET_SRC (y)) == CALL)
6195 {
6196 canon_reg (SET_SRC (y), insn);
6197 apply_change_group ();
6198 fold_rtx (SET_SRC (y), insn);
6199 invalidate (SET_DEST (y), VOIDmode);
6200 }
6201 else if (SET_DEST (y) == pc_rtx
6202 && GET_CODE (SET_SRC (y)) == LABEL_REF)
6203 ;
6204 else
6205 sets[n_sets++].rtl = y;
6206 }
6207 else if (GET_CODE (y) == CLOBBER)
6208 {
6209 /* If we clobber memory, take note of that,
6210 and canon the address.
6211 This does nothing when a register is clobbered
6212 because we have already invalidated the reg. */
6213 if (GET_CODE (XEXP (y, 0)) == MEM)
6214 {
6215 canon_reg (XEXP (y, 0), NULL_RTX);
6216 note_mem_written (XEXP (y, 0), &writes_memory);
6217 }
6218 }
6219 else if (GET_CODE (y) == USE
6220 && ! (GET_CODE (XEXP (y, 0)) == REG
6221 && REGNO (XEXP (y, 0)) < FIRST_PSEUDO_REGISTER))
6222 canon_reg (y, NULL_RTX);
6223 else if (GET_CODE (y) == CALL)
6224 {
6225 /* The result of apply_change_group can be ignored; see
6226 canon_reg. */
6227 canon_reg (y, insn);
6228 apply_change_group ();
6229 fold_rtx (y, insn);
6230 }
6231 }
6232 }
6233 else if (GET_CODE (x) == CLOBBER)
6234 {
6235 if (GET_CODE (XEXP (x, 0)) == MEM)
6236 {
6237 canon_reg (XEXP (x, 0), NULL_RTX);
6238 note_mem_written (XEXP (x, 0), &writes_memory);
6239 }
6240 }
6241
6242 /* Canonicalize a USE of a pseudo register or memory location. */
6243 else if (GET_CODE (x) == USE
6244 && ! (GET_CODE (XEXP (x, 0)) == REG
6245 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER))
6246 canon_reg (XEXP (x, 0), NULL_RTX);
6247 else if (GET_CODE (x) == CALL)
6248 {
6249 /* The result of apply_change_group can be ignored; see canon_reg. */
6250 canon_reg (x, insn);
6251 apply_change_group ();
6252 fold_rtx (x, insn);
6253 }
6254
6255 /* Store the equivalent value in SRC_EQV, if different, or if the DEST
6256 is a STRICT_LOW_PART. The latter condition is necessary because SRC_EQV
6257 is handled specially for this case, and if it isn't set, then there will
6258 be no equivalence for the destination. */
6259 if (n_sets == 1 && REG_NOTES (insn) != 0
6260 && (tem = find_reg_note (insn, REG_EQUAL, NULL_RTX)) != 0
6261 && (! rtx_equal_p (XEXP (tem, 0), SET_SRC (sets[0].rtl))
6262 || GET_CODE (SET_DEST (sets[0].rtl)) == STRICT_LOW_PART))
6263 src_eqv = canon_reg (XEXP (tem, 0), NULL_RTX);
6264
6265 /* Canonicalize sources and addresses of destinations.
6266 We do this in a separate pass to avoid problems when a MATCH_DUP is
6267 present in the insn pattern. In that case, we want to ensure that
6268 we don't break the duplicate nature of the pattern. So we will replace
6269 both operands at the same time. Otherwise, we would fail to find an
6270 equivalent substitution in the loop calling validate_change below.
6271
6272 We used to suppress canonicalization of DEST if it appears in SRC,
6273 but we don't do this any more. */
6274
6275 for (i = 0; i < n_sets; i++)
6276 {
6277 rtx dest = SET_DEST (sets[i].rtl);
6278 rtx src = SET_SRC (sets[i].rtl);
6279 rtx new = canon_reg (src, insn);
6280
6281 if ((GET_CODE (new) == REG && GET_CODE (src) == REG
6282 && ((REGNO (new) < FIRST_PSEUDO_REGISTER)
6283 != (REGNO (src) < FIRST_PSEUDO_REGISTER)))
6284 || insn_n_dups[recog_memoized (insn)] > 0)
6285 validate_change (insn, &SET_SRC (sets[i].rtl), new, 1);
6286 else
6287 SET_SRC (sets[i].rtl) = new;
6288
6289 if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
6290 {
6291 validate_change (insn, &XEXP (dest, 1),
6292 canon_reg (XEXP (dest, 1), insn), 1);
6293 validate_change (insn, &XEXP (dest, 2),
6294 canon_reg (XEXP (dest, 2), insn), 1);
6295 }
6296
6297 while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART
6298 || GET_CODE (dest) == ZERO_EXTRACT
6299 || GET_CODE (dest) == SIGN_EXTRACT)
6300 dest = XEXP (dest, 0);
6301
6302 if (GET_CODE (dest) == MEM)
6303 canon_reg (dest, insn);
6304 }
6305
6306 /* Now that we have done all the replacements, we can apply the change
6307 group and see if they all work. Note that this will cause some
6308 canonicalizations that would have worked individually not to be applied
6309 because some other canonicalization didn't work, but this should not
6310 occur often.
6311
6312 The result of apply_change_group can be ignored; see canon_reg. */
6313
6314 apply_change_group ();
6315
6316 /* Set sets[i].src_elt to the class each source belongs to.
6317 Detect assignments from or to volatile things
6318 and set set[i] to zero so they will be ignored
6319 in the rest of this function.
6320
6321 Nothing in this loop changes the hash table or the register chains. */
6322
6323 for (i = 0; i < n_sets; i++)
6324 {
6325 register rtx src, dest;
6326 register rtx src_folded;
6327 register struct table_elt *elt = 0, *p;
6328 enum machine_mode mode;
6329 rtx src_eqv_here;
6330 rtx src_const = 0;
6331 rtx src_related = 0;
6332 struct table_elt *src_const_elt = 0;
6333 int src_cost = 10000, src_eqv_cost = 10000, src_folded_cost = 10000;
6334 int src_related_cost = 10000, src_elt_cost = 10000;
6335 /* Set non-zero if we need to call force_const_mem on with the
6336 contents of src_folded before using it. */
6337 int src_folded_force_flag = 0;
6338
6339 dest = SET_DEST (sets[i].rtl);
6340 src = SET_SRC (sets[i].rtl);
6341
6342 /* If SRC is a constant that has no machine mode,
6343 hash it with the destination's machine mode.
6344 This way we can keep different modes separate. */
6345
6346 mode = GET_MODE (src) == VOIDmode ? GET_MODE (dest) : GET_MODE (src);
6347 sets[i].mode = mode;
6348
6349 if (src_eqv)
6350 {
6351 enum machine_mode eqvmode = mode;
6352 if (GET_CODE (dest) == STRICT_LOW_PART)
6353 eqvmode = GET_MODE (SUBREG_REG (XEXP (dest, 0)));
6354 do_not_record = 0;
6355 hash_arg_in_memory = 0;
6356 hash_arg_in_struct = 0;
6357 src_eqv = fold_rtx (src_eqv, insn);
6358 src_eqv_hash = HASH (src_eqv, eqvmode);
6359
6360 /* Find the equivalence class for the equivalent expression. */
6361
6362 if (!do_not_record)
6363 src_eqv_elt = lookup (src_eqv, src_eqv_hash, eqvmode);
6364
6365 src_eqv_volatile = do_not_record;
6366 src_eqv_in_memory = hash_arg_in_memory;
6367 src_eqv_in_struct = hash_arg_in_struct;
6368 }
6369
6370 /* If this is a STRICT_LOW_PART assignment, src_eqv corresponds to the
6371 value of the INNER register, not the destination. So it is not
6372 a valid substitution for the source. But save it for later. */
6373 if (GET_CODE (dest) == STRICT_LOW_PART)
6374 src_eqv_here = 0;
6375 else
6376 src_eqv_here = src_eqv;
6377
6378 /* Simplify and foldable subexpressions in SRC. Then get the fully-
6379 simplified result, which may not necessarily be valid. */
6380 src_folded = fold_rtx (src, insn);
6381
6382 #if 0
6383 /* ??? This caused bad code to be generated for the m68k port with -O2.
6384 Suppose src is (CONST_INT -1), and that after truncation src_folded
6385 is (CONST_INT 3). Suppose src_folded is then used for src_const.
6386 At the end we will add src and src_const to the same equivalence
6387 class. We now have 3 and -1 on the same equivalence class. This
6388 causes later instructions to be mis-optimized. */
6389 /* If storing a constant in a bitfield, pre-truncate the constant
6390 so we will be able to record it later. */
6391 if (GET_CODE (SET_DEST (sets[i].rtl)) == ZERO_EXTRACT
6392 || GET_CODE (SET_DEST (sets[i].rtl)) == SIGN_EXTRACT)
6393 {
6394 rtx width = XEXP (SET_DEST (sets[i].rtl), 1);
6395
6396 if (GET_CODE (src) == CONST_INT
6397 && GET_CODE (width) == CONST_INT
6398 && INTVAL (width) < HOST_BITS_PER_WIDE_INT
6399 && (INTVAL (src) & ((HOST_WIDE_INT) (-1) << INTVAL (width))))
6400 src_folded
6401 = GEN_INT (INTVAL (src) & (((HOST_WIDE_INT) 1
6402 << INTVAL (width)) - 1));
6403 }
6404 #endif
6405
6406 /* Compute SRC's hash code, and also notice if it
6407 should not be recorded at all. In that case,
6408 prevent any further processing of this assignment. */
6409 do_not_record = 0;
6410 hash_arg_in_memory = 0;
6411 hash_arg_in_struct = 0;
6412
6413 sets[i].src = src;
6414 sets[i].src_hash = HASH (src, mode);
6415 sets[i].src_volatile = do_not_record;
6416 sets[i].src_in_memory = hash_arg_in_memory;
6417 sets[i].src_in_struct = hash_arg_in_struct;
6418
6419 #if 0
6420 /* It is no longer clear why we used to do this, but it doesn't
6421 appear to still be needed. So let's try without it since this
6422 code hurts cse'ing widened ops. */
6423 /* If source is a perverse subreg (such as QI treated as an SI),
6424 treat it as volatile. It may do the work of an SI in one context
6425 where the extra bits are not being used, but cannot replace an SI
6426 in general. */
6427 if (GET_CODE (src) == SUBREG
6428 && (GET_MODE_SIZE (GET_MODE (src))
6429 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))))
6430 sets[i].src_volatile = 1;
6431 #endif
6432
6433 /* Locate all possible equivalent forms for SRC. Try to replace
6434 SRC in the insn with each cheaper equivalent.
6435
6436 We have the following types of equivalents: SRC itself, a folded
6437 version, a value given in a REG_EQUAL note, or a value related
6438 to a constant.
6439
6440 Each of these equivalents may be part of an additional class
6441 of equivalents (if more than one is in the table, they must be in
6442 the same class; we check for this).
6443
6444 If the source is volatile, we don't do any table lookups.
6445
6446 We note any constant equivalent for possible later use in a
6447 REG_NOTE. */
6448
6449 if (!sets[i].src_volatile)
6450 elt = lookup (src, sets[i].src_hash, mode);
6451
6452 sets[i].src_elt = elt;
6453
6454 if (elt && src_eqv_here && src_eqv_elt)
6455 {
6456 if (elt->first_same_value != src_eqv_elt->first_same_value)
6457 {
6458 /* The REG_EQUAL is indicating that two formerly distinct
6459 classes are now equivalent. So merge them. */
6460 merge_equiv_classes (elt, src_eqv_elt);
6461 src_eqv_hash = HASH (src_eqv, elt->mode);
6462 src_eqv_elt = lookup (src_eqv, src_eqv_hash, elt->mode);
6463 }
6464
6465 src_eqv_here = 0;
6466 }
6467
6468 else if (src_eqv_elt)
6469 elt = src_eqv_elt;
6470
6471 /* Try to find a constant somewhere and record it in `src_const'.
6472 Record its table element, if any, in `src_const_elt'. Look in
6473 any known equivalences first. (If the constant is not in the
6474 table, also set `sets[i].src_const_hash'). */
6475 if (elt)
6476 for (p = elt->first_same_value; p; p = p->next_same_value)
6477 if (p->is_const)
6478 {
6479 src_const = p->exp;
6480 src_const_elt = elt;
6481 break;
6482 }
6483
6484 if (src_const == 0
6485 && (CONSTANT_P (src_folded)
6486 /* Consider (minus (label_ref L1) (label_ref L2)) as
6487 "constant" here so we will record it. This allows us
6488 to fold switch statements when an ADDR_DIFF_VEC is used. */
6489 || (GET_CODE (src_folded) == MINUS
6490 && GET_CODE (XEXP (src_folded, 0)) == LABEL_REF
6491 && GET_CODE (XEXP (src_folded, 1)) == LABEL_REF)))
6492 src_const = src_folded, src_const_elt = elt;
6493 else if (src_const == 0 && src_eqv_here && CONSTANT_P (src_eqv_here))
6494 src_const = src_eqv_here, src_const_elt = src_eqv_elt;
6495
6496 /* If we don't know if the constant is in the table, get its
6497 hash code and look it up. */
6498 if (src_const && src_const_elt == 0)
6499 {
6500 sets[i].src_const_hash = HASH (src_const, mode);
6501 src_const_elt = lookup (src_const, sets[i].src_const_hash, mode);
6502 }
6503
6504 sets[i].src_const = src_const;
6505 sets[i].src_const_elt = src_const_elt;
6506
6507 /* If the constant and our source are both in the table, mark them as
6508 equivalent. Otherwise, if a constant is in the table but the source
6509 isn't, set ELT to it. */
6510 if (src_const_elt && elt
6511 && src_const_elt->first_same_value != elt->first_same_value)
6512 merge_equiv_classes (elt, src_const_elt);
6513 else if (src_const_elt && elt == 0)
6514 elt = src_const_elt;
6515
6516 /* See if there is a register linearly related to a constant
6517 equivalent of SRC. */
6518 if (src_const
6519 && (GET_CODE (src_const) == CONST
6520 || (src_const_elt && src_const_elt->related_value != 0)))
6521 {
6522 src_related = use_related_value (src_const, src_const_elt);
6523 if (src_related)
6524 {
6525 struct table_elt *src_related_elt
6526 = lookup (src_related, HASH (src_related, mode), mode);
6527 if (src_related_elt && elt)
6528 {
6529 if (elt->first_same_value
6530 != src_related_elt->first_same_value)
6531 /* This can occur when we previously saw a CONST
6532 involving a SYMBOL_REF and then see the SYMBOL_REF
6533 twice. Merge the involved classes. */
6534 merge_equiv_classes (elt, src_related_elt);
6535
6536 src_related = 0;
6537 src_related_elt = 0;
6538 }
6539 else if (src_related_elt && elt == 0)
6540 elt = src_related_elt;
6541 }
6542 }
6543
6544 /* See if we have a CONST_INT that is already in a register in a
6545 wider mode. */
6546
6547 if (src_const && src_related == 0 && GET_CODE (src_const) == CONST_INT
6548 && GET_MODE_CLASS (mode) == MODE_INT
6549 && GET_MODE_BITSIZE (mode) < BITS_PER_WORD)
6550 {
6551 enum machine_mode wider_mode;
6552
6553 for (wider_mode = GET_MODE_WIDER_MODE (mode);
6554 GET_MODE_BITSIZE (wider_mode) <= BITS_PER_WORD
6555 && src_related == 0;
6556 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
6557 {
6558 struct table_elt *const_elt
6559 = lookup (src_const, HASH (src_const, wider_mode), wider_mode);
6560
6561 if (const_elt == 0)
6562 continue;
6563
6564 for (const_elt = const_elt->first_same_value;
6565 const_elt; const_elt = const_elt->next_same_value)
6566 if (GET_CODE (const_elt->exp) == REG)
6567 {
6568 src_related = gen_lowpart_if_possible (mode,
6569 const_elt->exp);
6570 break;
6571 }
6572 }
6573 }
6574
6575 /* Another possibility is that we have an AND with a constant in
6576 a mode narrower than a word. If so, it might have been generated
6577 as part of an "if" which would narrow the AND. If we already
6578 have done the AND in a wider mode, we can use a SUBREG of that
6579 value. */
6580
6581 if (flag_expensive_optimizations && ! src_related
6582 && GET_CODE (src) == AND && GET_CODE (XEXP (src, 1)) == CONST_INT
6583 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
6584 {
6585 enum machine_mode tmode;
6586 rtx new_and = gen_rtx (AND, VOIDmode, NULL_RTX, XEXP (src, 1));
6587
6588 for (tmode = GET_MODE_WIDER_MODE (mode);
6589 GET_MODE_SIZE (tmode) <= UNITS_PER_WORD;
6590 tmode = GET_MODE_WIDER_MODE (tmode))
6591 {
6592 rtx inner = gen_lowpart_if_possible (tmode, XEXP (src, 0));
6593 struct table_elt *larger_elt;
6594
6595 if (inner)
6596 {
6597 PUT_MODE (new_and, tmode);
6598 XEXP (new_and, 0) = inner;
6599 larger_elt = lookup (new_and, HASH (new_and, tmode), tmode);
6600 if (larger_elt == 0)
6601 continue;
6602
6603 for (larger_elt = larger_elt->first_same_value;
6604 larger_elt; larger_elt = larger_elt->next_same_value)
6605 if (GET_CODE (larger_elt->exp) == REG)
6606 {
6607 src_related
6608 = gen_lowpart_if_possible (mode, larger_elt->exp);
6609 break;
6610 }
6611
6612 if (src_related)
6613 break;
6614 }
6615 }
6616 }
6617
6618 #ifdef LOAD_EXTEND_OP
6619 /* See if a MEM has already been loaded with a widening operation;
6620 if it has, we can use a subreg of that. Many CISC machines
6621 also have such operations, but this is only likely to be
6622 beneficial these machines. */
6623
6624 if (flag_expensive_optimizations && src_related == 0
6625 && (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
6626 && GET_MODE_CLASS (mode) == MODE_INT
6627 && GET_CODE (src) == MEM && ! do_not_record
6628 && LOAD_EXTEND_OP (mode) != NIL)
6629 {
6630 enum machine_mode tmode;
6631
6632 /* Set what we are trying to extend and the operation it might
6633 have been extended with. */
6634 PUT_CODE (memory_extend_rtx, LOAD_EXTEND_OP (mode));
6635 XEXP (memory_extend_rtx, 0) = src;
6636
6637 for (tmode = GET_MODE_WIDER_MODE (mode);
6638 GET_MODE_SIZE (tmode) <= UNITS_PER_WORD;
6639 tmode = GET_MODE_WIDER_MODE (tmode))
6640 {
6641 struct table_elt *larger_elt;
6642
6643 PUT_MODE (memory_extend_rtx, tmode);
6644 larger_elt = lookup (memory_extend_rtx,
6645 HASH (memory_extend_rtx, tmode), tmode);
6646 if (larger_elt == 0)
6647 continue;
6648
6649 for (larger_elt = larger_elt->first_same_value;
6650 larger_elt; larger_elt = larger_elt->next_same_value)
6651 if (GET_CODE (larger_elt->exp) == REG)
6652 {
6653 src_related = gen_lowpart_if_possible (mode,
6654 larger_elt->exp);
6655 break;
6656 }
6657
6658 if (src_related)
6659 break;
6660 }
6661 }
6662 #endif /* LOAD_EXTEND_OP */
6663
6664 if (src == src_folded)
6665 src_folded = 0;
6666
6667 /* At this point, ELT, if non-zero, points to a class of expressions
6668 equivalent to the source of this SET and SRC, SRC_EQV, SRC_FOLDED,
6669 and SRC_RELATED, if non-zero, each contain additional equivalent
6670 expressions. Prune these latter expressions by deleting expressions
6671 already in the equivalence class.
6672
6673 Check for an equivalent identical to the destination. If found,
6674 this is the preferred equivalent since it will likely lead to
6675 elimination of the insn. Indicate this by placing it in
6676 `src_related'. */
6677
6678 if (elt) elt = elt->first_same_value;
6679 for (p = elt; p; p = p->next_same_value)
6680 {
6681 enum rtx_code code = GET_CODE (p->exp);
6682
6683 /* If the expression is not valid, ignore it. Then we do not
6684 have to check for validity below. In most cases, we can use
6685 `rtx_equal_p', since canonicalization has already been done. */
6686 if (code != REG && ! exp_equiv_p (p->exp, p->exp, 1, 0))
6687 continue;
6688
6689 if (src && GET_CODE (src) == code && rtx_equal_p (src, p->exp))
6690 src = 0;
6691 else if (src_folded && GET_CODE (src_folded) == code
6692 && rtx_equal_p (src_folded, p->exp))
6693 src_folded = 0;
6694 else if (src_eqv_here && GET_CODE (src_eqv_here) == code
6695 && rtx_equal_p (src_eqv_here, p->exp))
6696 src_eqv_here = 0;
6697 else if (src_related && GET_CODE (src_related) == code
6698 && rtx_equal_p (src_related, p->exp))
6699 src_related = 0;
6700
6701 /* This is the same as the destination of the insns, we want
6702 to prefer it. Copy it to src_related. The code below will
6703 then give it a negative cost. */
6704 if (GET_CODE (dest) == code && rtx_equal_p (p->exp, dest))
6705 src_related = dest;
6706
6707 }
6708
6709 /* Find the cheapest valid equivalent, trying all the available
6710 possibilities. Prefer items not in the hash table to ones
6711 that are when they are equal cost. Note that we can never
6712 worsen an insn as the current contents will also succeed.
6713 If we find an equivalent identical to the destination, use it as best,
6714 since this insn will probably be eliminated in that case. */
6715 if (src)
6716 {
6717 if (rtx_equal_p (src, dest))
6718 src_cost = -1;
6719 else
6720 src_cost = COST (src);
6721 }
6722
6723 if (src_eqv_here)
6724 {
6725 if (rtx_equal_p (src_eqv_here, dest))
6726 src_eqv_cost = -1;
6727 else
6728 src_eqv_cost = COST (src_eqv_here);
6729 }
6730
6731 if (src_folded)
6732 {
6733 if (rtx_equal_p (src_folded, dest))
6734 src_folded_cost = -1;
6735 else
6736 src_folded_cost = COST (src_folded);
6737 }
6738
6739 if (src_related)
6740 {
6741 if (rtx_equal_p (src_related, dest))
6742 src_related_cost = -1;
6743 else
6744 src_related_cost = COST (src_related);
6745 }
6746
6747 /* If this was an indirect jump insn, a known label will really be
6748 cheaper even though it looks more expensive. */
6749 if (dest == pc_rtx && src_const && GET_CODE (src_const) == LABEL_REF)
6750 src_folded = src_const, src_folded_cost = -1;
6751
6752 /* Terminate loop when replacement made. This must terminate since
6753 the current contents will be tested and will always be valid. */
6754 while (1)
6755 {
6756 rtx trial;
6757
6758 /* Skip invalid entries. */
6759 while (elt && GET_CODE (elt->exp) != REG
6760 && ! exp_equiv_p (elt->exp, elt->exp, 1, 0))
6761 elt = elt->next_same_value;
6762
6763 if (elt) src_elt_cost = elt->cost;
6764
6765 /* Find cheapest and skip it for the next time. For items
6766 of equal cost, use this order:
6767 src_folded, src, src_eqv, src_related and hash table entry. */
6768 if (src_folded_cost <= src_cost
6769 && src_folded_cost <= src_eqv_cost
6770 && src_folded_cost <= src_related_cost
6771 && src_folded_cost <= src_elt_cost)
6772 {
6773 trial = src_folded, src_folded_cost = 10000;
6774 if (src_folded_force_flag)
6775 trial = force_const_mem (mode, trial);
6776 }
6777 else if (src_cost <= src_eqv_cost
6778 && src_cost <= src_related_cost
6779 && src_cost <= src_elt_cost)
6780 trial = src, src_cost = 10000;
6781 else if (src_eqv_cost <= src_related_cost
6782 && src_eqv_cost <= src_elt_cost)
6783 trial = copy_rtx (src_eqv_here), src_eqv_cost = 10000;
6784 else if (src_related_cost <= src_elt_cost)
6785 trial = copy_rtx (src_related), src_related_cost = 10000;
6786 else
6787 {
6788 trial = copy_rtx (elt->exp);
6789 elt = elt->next_same_value;
6790 src_elt_cost = 10000;
6791 }
6792
6793 /* We don't normally have an insn matching (set (pc) (pc)), so
6794 check for this separately here. We will delete such an
6795 insn below.
6796
6797 Tablejump insns contain a USE of the table, so simply replacing
6798 the operand with the constant won't match. This is simply an
6799 unconditional branch, however, and is therefore valid. Just
6800 insert the substitution here and we will delete and re-emit
6801 the insn later. */
6802
6803 if (n_sets == 1 && dest == pc_rtx
6804 && (trial == pc_rtx
6805 || (GET_CODE (trial) == LABEL_REF
6806 && ! condjump_p (insn))))
6807 {
6808 /* If TRIAL is a label in front of a jump table, we are
6809 really falling through the switch (this is how casesi
6810 insns work), so we must branch around the table. */
6811 if (GET_CODE (trial) == CODE_LABEL
6812 && NEXT_INSN (trial) != 0
6813 && GET_CODE (NEXT_INSN (trial)) == JUMP_INSN
6814 && (GET_CODE (PATTERN (NEXT_INSN (trial))) == ADDR_DIFF_VEC
6815 || GET_CODE (PATTERN (NEXT_INSN (trial))) == ADDR_VEC))
6816
6817 trial = gen_rtx (LABEL_REF, Pmode, get_label_after (trial));
6818
6819 SET_SRC (sets[i].rtl) = trial;
6820 cse_jumps_altered = 1;
6821 break;
6822 }
6823
6824 /* Look for a substitution that makes a valid insn. */
6825 else if (validate_change (insn, &SET_SRC (sets[i].rtl), trial, 0))
6826 {
6827 /* The result of apply_change_group can be ignored; see
6828 canon_reg. */
6829
6830 validate_change (insn, &SET_SRC (sets[i].rtl),
6831 canon_reg (SET_SRC (sets[i].rtl), insn),
6832 1);
6833 apply_change_group ();
6834 break;
6835 }
6836
6837 /* If we previously found constant pool entries for
6838 constants and this is a constant, try making a
6839 pool entry. Put it in src_folded unless we already have done
6840 this since that is where it likely came from. */
6841
6842 else if (constant_pool_entries_cost
6843 && CONSTANT_P (trial)
6844 && ! (GET_CODE (trial) == CONST
6845 && GET_CODE (XEXP (trial, 0)) == TRUNCATE)
6846 && (src_folded == 0
6847 || (GET_CODE (src_folded) != MEM
6848 && ! src_folded_force_flag))
6849 && GET_MODE_CLASS (mode) != MODE_CC)
6850 {
6851 src_folded_force_flag = 1;
6852 src_folded = trial;
6853 src_folded_cost = constant_pool_entries_cost;
6854 }
6855 }
6856
6857 src = SET_SRC (sets[i].rtl);
6858
6859 /* In general, it is good to have a SET with SET_SRC == SET_DEST.
6860 However, there is an important exception: If both are registers
6861 that are not the head of their equivalence class, replace SET_SRC
6862 with the head of the class. If we do not do this, we will have
6863 both registers live over a portion of the basic block. This way,
6864 their lifetimes will likely abut instead of overlapping. */
6865 if (GET_CODE (dest) == REG
6866 && REGNO_QTY_VALID_P (REGNO (dest))
6867 && qty_mode[reg_qty[REGNO (dest)]] == GET_MODE (dest)
6868 && qty_first_reg[reg_qty[REGNO (dest)]] != REGNO (dest)
6869 && GET_CODE (src) == REG && REGNO (src) == REGNO (dest)
6870 /* Don't do this if the original insn had a hard reg as
6871 SET_SRC. */
6872 && (GET_CODE (sets[i].src) != REG
6873 || REGNO (sets[i].src) >= FIRST_PSEUDO_REGISTER))
6874 /* We can't call canon_reg here because it won't do anything if
6875 SRC is a hard register. */
6876 {
6877 int first = qty_first_reg[reg_qty[REGNO (src)]];
6878
6879 src = SET_SRC (sets[i].rtl)
6880 = first >= FIRST_PSEUDO_REGISTER ? regno_reg_rtx[first]
6881 : gen_rtx (REG, GET_MODE (src), first);
6882
6883 /* If we had a constant that is cheaper than what we are now
6884 setting SRC to, use that constant. We ignored it when we
6885 thought we could make this into a no-op. */
6886 if (src_const && COST (src_const) < COST (src)
6887 && validate_change (insn, &SET_SRC (sets[i].rtl), src_const, 0))
6888 src = src_const;
6889 }
6890
6891 /* If we made a change, recompute SRC values. */
6892 if (src != sets[i].src)
6893 {
6894 do_not_record = 0;
6895 hash_arg_in_memory = 0;
6896 hash_arg_in_struct = 0;
6897 sets[i].src = src;
6898 sets[i].src_hash = HASH (src, mode);
6899 sets[i].src_volatile = do_not_record;
6900 sets[i].src_in_memory = hash_arg_in_memory;
6901 sets[i].src_in_struct = hash_arg_in_struct;
6902 sets[i].src_elt = lookup (src, sets[i].src_hash, mode);
6903 }
6904
6905 /* If this is a single SET, we are setting a register, and we have an
6906 equivalent constant, we want to add a REG_NOTE. We don't want
6907 to write a REG_EQUAL note for a constant pseudo since verifying that
6908 that pseudo hasn't been eliminated is a pain. Such a note also
6909 won't help anything. */
6910 if (n_sets == 1 && src_const && GET_CODE (dest) == REG
6911 && GET_CODE (src_const) != REG)
6912 {
6913 tem = find_reg_note (insn, REG_EQUAL, NULL_RTX);
6914
6915 /* Record the actual constant value in a REG_EQUAL note, making
6916 a new one if one does not already exist. */
6917 if (tem)
6918 XEXP (tem, 0) = src_const;
6919 else
6920 REG_NOTES (insn) = gen_rtx (EXPR_LIST, REG_EQUAL,
6921 src_const, REG_NOTES (insn));
6922
6923 /* If storing a constant value in a register that
6924 previously held the constant value 0,
6925 record this fact with a REG_WAS_0 note on this insn.
6926
6927 Note that the *register* is required to have previously held 0,
6928 not just any register in the quantity and we must point to the
6929 insn that set that register to zero.
6930
6931 Rather than track each register individually, we just see if
6932 the last set for this quantity was for this register. */
6933
6934 if (REGNO_QTY_VALID_P (REGNO (dest))
6935 && qty_const[reg_qty[REGNO (dest)]] == const0_rtx)
6936 {
6937 /* See if we previously had a REG_WAS_0 note. */
6938 rtx note = find_reg_note (insn, REG_WAS_0, NULL_RTX);
6939 rtx const_insn = qty_const_insn[reg_qty[REGNO (dest)]];
6940
6941 if ((tem = single_set (const_insn)) != 0
6942 && rtx_equal_p (SET_DEST (tem), dest))
6943 {
6944 if (note)
6945 XEXP (note, 0) = const_insn;
6946 else
6947 REG_NOTES (insn) = gen_rtx (INSN_LIST, REG_WAS_0,
6948 const_insn, REG_NOTES (insn));
6949 }
6950 }
6951 }
6952
6953 /* Now deal with the destination. */
6954 do_not_record = 0;
6955 sets[i].inner_dest_loc = &SET_DEST (sets[0].rtl);
6956
6957 /* Look within any SIGN_EXTRACT or ZERO_EXTRACT
6958 to the MEM or REG within it. */
6959 while (GET_CODE (dest) == SIGN_EXTRACT
6960 || GET_CODE (dest) == ZERO_EXTRACT
6961 || GET_CODE (dest) == SUBREG
6962 || GET_CODE (dest) == STRICT_LOW_PART)
6963 {
6964 sets[i].inner_dest_loc = &XEXP (dest, 0);
6965 dest = XEXP (dest, 0);
6966 }
6967
6968 sets[i].inner_dest = dest;
6969
6970 if (GET_CODE (dest) == MEM)
6971 {
6972 dest = fold_rtx (dest, insn);
6973
6974 /* Decide whether we invalidate everything in memory,
6975 or just things at non-fixed places.
6976 Writing a large aggregate must invalidate everything
6977 because we don't know how long it is. */
6978 note_mem_written (dest, &writes_memory);
6979 }
6980
6981 /* Compute the hash code of the destination now,
6982 before the effects of this instruction are recorded,
6983 since the register values used in the address computation
6984 are those before this instruction. */
6985 sets[i].dest_hash = HASH (dest, mode);
6986
6987 /* Don't enter a bit-field in the hash table
6988 because the value in it after the store
6989 may not equal what was stored, due to truncation. */
6990
6991 if (GET_CODE (SET_DEST (sets[i].rtl)) == ZERO_EXTRACT
6992 || GET_CODE (SET_DEST (sets[i].rtl)) == SIGN_EXTRACT)
6993 {
6994 rtx width = XEXP (SET_DEST (sets[i].rtl), 1);
6995
6996 if (src_const != 0 && GET_CODE (src_const) == CONST_INT
6997 && GET_CODE (width) == CONST_INT
6998 && INTVAL (width) < HOST_BITS_PER_WIDE_INT
6999 && ! (INTVAL (src_const)
7000 & ((HOST_WIDE_INT) (-1) << INTVAL (width))))
7001 /* Exception: if the value is constant,
7002 and it won't be truncated, record it. */
7003 ;
7004 else
7005 {
7006 /* This is chosen so that the destination will be invalidated
7007 but no new value will be recorded.
7008 We must invalidate because sometimes constant
7009 values can be recorded for bitfields. */
7010 sets[i].src_elt = 0;
7011 sets[i].src_volatile = 1;
7012 src_eqv = 0;
7013 src_eqv_elt = 0;
7014 }
7015 }
7016
7017 /* If only one set in a JUMP_INSN and it is now a no-op, we can delete
7018 the insn. */
7019 else if (n_sets == 1 && dest == pc_rtx && src == pc_rtx)
7020 {
7021 PUT_CODE (insn, NOTE);
7022 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
7023 NOTE_SOURCE_FILE (insn) = 0;
7024 cse_jumps_altered = 1;
7025 /* One less use of the label this insn used to jump to. */
7026 --LABEL_NUSES (JUMP_LABEL (insn));
7027 /* No more processing for this set. */
7028 sets[i].rtl = 0;
7029 }
7030
7031 /* If this SET is now setting PC to a label, we know it used to
7032 be a conditional or computed branch. So we see if we can follow
7033 it. If it was a computed branch, delete it and re-emit. */
7034 else if (dest == pc_rtx && GET_CODE (src) == LABEL_REF)
7035 {
7036 rtx p;
7037
7038 /* If this is not in the format for a simple branch and
7039 we are the only SET in it, re-emit it. */
7040 if (! simplejump_p (insn) && n_sets == 1)
7041 {
7042 rtx new = emit_jump_insn_before (gen_jump (XEXP (src, 0)), insn);
7043 JUMP_LABEL (new) = XEXP (src, 0);
7044 LABEL_NUSES (XEXP (src, 0))++;
7045 delete_insn (insn);
7046 insn = new;
7047 }
7048 else
7049 /* Otherwise, force rerecognition, since it probably had
7050 a different pattern before.
7051 This shouldn't really be necessary, since whatever
7052 changed the source value above should have done this.
7053 Until the right place is found, might as well do this here. */
7054 INSN_CODE (insn) = -1;
7055
7056 /* Now that we've converted this jump to an unconditional jump,
7057 there is dead code after it. Delete the dead code until we
7058 reach a BARRIER, the end of the function, or a label. Do
7059 not delete NOTEs except for NOTE_INSN_DELETED since later
7060 phases assume these notes are retained. */
7061
7062 p = insn;
7063
7064 while (NEXT_INSN (p) != 0
7065 && GET_CODE (NEXT_INSN (p)) != BARRIER
7066 && GET_CODE (NEXT_INSN (p)) != CODE_LABEL)
7067 {
7068 if (GET_CODE (NEXT_INSN (p)) != NOTE
7069 || NOTE_LINE_NUMBER (NEXT_INSN (p)) == NOTE_INSN_DELETED)
7070 delete_insn (NEXT_INSN (p));
7071 else
7072 p = NEXT_INSN (p);
7073 }
7074
7075 /* If we don't have a BARRIER immediately after INSN, put one there.
7076 Much code assumes that there are no NOTEs between a JUMP_INSN and
7077 BARRIER. */
7078
7079 if (NEXT_INSN (insn) == 0
7080 || GET_CODE (NEXT_INSN (insn)) != BARRIER)
7081 emit_barrier_before (NEXT_INSN (insn));
7082
7083 /* We might have two BARRIERs separated by notes. Delete the second
7084 one if so. */
7085
7086 if (p != insn && NEXT_INSN (p) != 0
7087 && GET_CODE (NEXT_INSN (p)) == BARRIER)
7088 delete_insn (NEXT_INSN (p));
7089
7090 cse_jumps_altered = 1;
7091 sets[i].rtl = 0;
7092 }
7093
7094 /* If destination is volatile, invalidate it and then do no further
7095 processing for this assignment. */
7096
7097 else if (do_not_record)
7098 {
7099 if (GET_CODE (dest) == REG || GET_CODE (dest) == SUBREG
7100 || GET_CODE (dest) == MEM)
7101 invalidate (dest, VOIDmode);
7102 else if (GET_CODE (dest) == STRICT_LOW_PART
7103 || GET_CODE (dest) == ZERO_EXTRACT)
7104 invalidate (XEXP (dest, 0), GET_MODE (dest));
7105 sets[i].rtl = 0;
7106 }
7107
7108 if (sets[i].rtl != 0 && dest != SET_DEST (sets[i].rtl))
7109 sets[i].dest_hash = HASH (SET_DEST (sets[i].rtl), mode);
7110
7111 #ifdef HAVE_cc0
7112 /* If setting CC0, record what it was set to, or a constant, if it
7113 is equivalent to a constant. If it is being set to a floating-point
7114 value, make a COMPARE with the appropriate constant of 0. If we
7115 don't do this, later code can interpret this as a test against
7116 const0_rtx, which can cause problems if we try to put it into an
7117 insn as a floating-point operand. */
7118 if (dest == cc0_rtx)
7119 {
7120 this_insn_cc0 = src_const && mode != VOIDmode ? src_const : src;
7121 this_insn_cc0_mode = mode;
7122 if (FLOAT_MODE_P (mode))
7123 this_insn_cc0 = gen_rtx (COMPARE, VOIDmode, this_insn_cc0,
7124 CONST0_RTX (mode));
7125 }
7126 #endif
7127 }
7128
7129 /* Now enter all non-volatile source expressions in the hash table
7130 if they are not already present.
7131 Record their equivalence classes in src_elt.
7132 This way we can insert the corresponding destinations into
7133 the same classes even if the actual sources are no longer in them
7134 (having been invalidated). */
7135
7136 if (src_eqv && src_eqv_elt == 0 && sets[0].rtl != 0 && ! src_eqv_volatile
7137 && ! rtx_equal_p (src_eqv, SET_DEST (sets[0].rtl)))
7138 {
7139 register struct table_elt *elt;
7140 register struct table_elt *classp = sets[0].src_elt;
7141 rtx dest = SET_DEST (sets[0].rtl);
7142 enum machine_mode eqvmode = GET_MODE (dest);
7143
7144 if (GET_CODE (dest) == STRICT_LOW_PART)
7145 {
7146 eqvmode = GET_MODE (SUBREG_REG (XEXP (dest, 0)));
7147 classp = 0;
7148 }
7149 if (insert_regs (src_eqv, classp, 0))
7150 {
7151 rehash_using_reg (src_eqv);
7152 src_eqv_hash = HASH (src_eqv, eqvmode);
7153 }
7154 elt = insert (src_eqv, classp, src_eqv_hash, eqvmode);
7155 elt->in_memory = src_eqv_in_memory;
7156 elt->in_struct = src_eqv_in_struct;
7157 src_eqv_elt = elt;
7158
7159 /* Check to see if src_eqv_elt is the same as a set source which
7160 does not yet have an elt, and if so set the elt of the set source
7161 to src_eqv_elt. */
7162 for (i = 0; i < n_sets; i++)
7163 if (sets[i].rtl && sets[i].src_elt == 0
7164 && rtx_equal_p (SET_SRC (sets[i].rtl), src_eqv))
7165 sets[i].src_elt = src_eqv_elt;
7166 }
7167
7168 for (i = 0; i < n_sets; i++)
7169 if (sets[i].rtl && ! sets[i].src_volatile
7170 && ! rtx_equal_p (SET_SRC (sets[i].rtl), SET_DEST (sets[i].rtl)))
7171 {
7172 if (GET_CODE (SET_DEST (sets[i].rtl)) == STRICT_LOW_PART)
7173 {
7174 /* REG_EQUAL in setting a STRICT_LOW_PART
7175 gives an equivalent for the entire destination register,
7176 not just for the subreg being stored in now.
7177 This is a more interesting equivalence, so we arrange later
7178 to treat the entire reg as the destination. */
7179 sets[i].src_elt = src_eqv_elt;
7180 sets[i].src_hash = src_eqv_hash;
7181 }
7182 else
7183 {
7184 /* Insert source and constant equivalent into hash table, if not
7185 already present. */
7186 register struct table_elt *classp = src_eqv_elt;
7187 register rtx src = sets[i].src;
7188 register rtx dest = SET_DEST (sets[i].rtl);
7189 enum machine_mode mode
7190 = GET_MODE (src) == VOIDmode ? GET_MODE (dest) : GET_MODE (src);
7191
7192 if (sets[i].src_elt == 0)
7193 {
7194 register struct table_elt *elt;
7195
7196 /* Note that these insert_regs calls cannot remove
7197 any of the src_elt's, because they would have failed to
7198 match if not still valid. */
7199 if (insert_regs (src, classp, 0))
7200 {
7201 rehash_using_reg (src);
7202 sets[i].src_hash = HASH (src, mode);
7203 }
7204 elt = insert (src, classp, sets[i].src_hash, mode);
7205 elt->in_memory = sets[i].src_in_memory;
7206 elt->in_struct = sets[i].src_in_struct;
7207 sets[i].src_elt = classp = elt;
7208 }
7209
7210 if (sets[i].src_const && sets[i].src_const_elt == 0
7211 && src != sets[i].src_const
7212 && ! rtx_equal_p (sets[i].src_const, src))
7213 sets[i].src_elt = insert (sets[i].src_const, classp,
7214 sets[i].src_const_hash, mode);
7215 }
7216 }
7217 else if (sets[i].src_elt == 0)
7218 /* If we did not insert the source into the hash table (e.g., it was
7219 volatile), note the equivalence class for the REG_EQUAL value, if any,
7220 so that the destination goes into that class. */
7221 sets[i].src_elt = src_eqv_elt;
7222
7223 invalidate_from_clobbers (&writes_memory, x);
7224
7225 /* Some registers are invalidated by subroutine calls. Memory is
7226 invalidated by non-constant calls. */
7227
7228 if (GET_CODE (insn) == CALL_INSN)
7229 {
7230 static struct write_data everything = {0, 1, 1, 1};
7231
7232 if (! CONST_CALL_P (insn))
7233 invalidate_memory (&everything);
7234 invalidate_for_call ();
7235 }
7236
7237 /* Now invalidate everything set by this instruction.
7238 If a SUBREG or other funny destination is being set,
7239 sets[i].rtl is still nonzero, so here we invalidate the reg
7240 a part of which is being set. */
7241
7242 for (i = 0; i < n_sets; i++)
7243 if (sets[i].rtl)
7244 {
7245 /* We can't use the inner dest, because the mode associated with
7246 a ZERO_EXTRACT is significant. */
7247 register rtx dest = SET_DEST (sets[i].rtl);
7248
7249 /* Needed for registers to remove the register from its
7250 previous quantity's chain.
7251 Needed for memory if this is a nonvarying address, unless
7252 we have just done an invalidate_memory that covers even those. */
7253 if (GET_CODE (dest) == REG || GET_CODE (dest) == SUBREG
7254 || (GET_CODE (dest) == MEM && ! writes_memory.all
7255 && ! cse_rtx_addr_varies_p (dest)))
7256 invalidate (dest, VOIDmode);
7257 else if (GET_CODE (dest) == STRICT_LOW_PART
7258 || GET_CODE (dest) == ZERO_EXTRACT)
7259 invalidate (XEXP (dest, 0), GET_MODE (dest));
7260 }
7261
7262 /* Make sure registers mentioned in destinations
7263 are safe for use in an expression to be inserted.
7264 This removes from the hash table
7265 any invalid entry that refers to one of these registers.
7266
7267 We don't care about the return value from mention_regs because
7268 we are going to hash the SET_DEST values unconditionally. */
7269
7270 for (i = 0; i < n_sets; i++)
7271 if (sets[i].rtl && GET_CODE (SET_DEST (sets[i].rtl)) != REG)
7272 mention_regs (SET_DEST (sets[i].rtl));
7273
7274 /* We may have just removed some of the src_elt's from the hash table.
7275 So replace each one with the current head of the same class. */
7276
7277 for (i = 0; i < n_sets; i++)
7278 if (sets[i].rtl)
7279 {
7280 if (sets[i].src_elt && sets[i].src_elt->first_same_value == 0)
7281 /* If elt was removed, find current head of same class,
7282 or 0 if nothing remains of that class. */
7283 {
7284 register struct table_elt *elt = sets[i].src_elt;
7285
7286 while (elt && elt->prev_same_value)
7287 elt = elt->prev_same_value;
7288
7289 while (elt && elt->first_same_value == 0)
7290 elt = elt->next_same_value;
7291 sets[i].src_elt = elt ? elt->first_same_value : 0;
7292 }
7293 }
7294
7295 /* Now insert the destinations into their equivalence classes. */
7296
7297 for (i = 0; i < n_sets; i++)
7298 if (sets[i].rtl)
7299 {
7300 register rtx dest = SET_DEST (sets[i].rtl);
7301 register struct table_elt *elt;
7302
7303 /* Don't record value if we are not supposed to risk allocating
7304 floating-point values in registers that might be wider than
7305 memory. */
7306 if ((flag_float_store
7307 && GET_CODE (dest) == MEM
7308 && FLOAT_MODE_P (GET_MODE (dest)))
7309 /* Don't record values of destinations set inside a libcall block
7310 since we might delete the libcall. Things should have been set
7311 up so we won't want to reuse such a value, but we play it safe
7312 here. */
7313 || in_libcall_block
7314 /* If we didn't put a REG_EQUAL value or a source into the hash
7315 table, there is no point is recording DEST. */
7316 || sets[i].src_elt == 0
7317 /* If DEST is a paradoxical SUBREG and SRC is a ZERO_EXTEND
7318 or SIGN_EXTEND, don't record DEST since it can cause
7319 some tracking to be wrong.
7320
7321 ??? Think about this more later. */
7322 || (GET_CODE (dest) == SUBREG
7323 && (GET_MODE_SIZE (GET_MODE (dest))
7324 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))))
7325 && (GET_CODE (sets[i].src) == SIGN_EXTEND
7326 || GET_CODE (sets[i].src) == ZERO_EXTEND)))
7327 continue;
7328
7329 /* STRICT_LOW_PART isn't part of the value BEING set,
7330 and neither is the SUBREG inside it.
7331 Note that in this case SETS[I].SRC_ELT is really SRC_EQV_ELT. */
7332 if (GET_CODE (dest) == STRICT_LOW_PART)
7333 dest = SUBREG_REG (XEXP (dest, 0));
7334
7335 if (GET_CODE (dest) == REG || GET_CODE (dest) == SUBREG)
7336 /* Registers must also be inserted into chains for quantities. */
7337 if (insert_regs (dest, sets[i].src_elt, 1))
7338 {
7339 /* If `insert_regs' changes something, the hash code must be
7340 recalculated. */
7341 rehash_using_reg (dest);
7342 sets[i].dest_hash = HASH (dest, GET_MODE (dest));
7343 }
7344
7345 elt = insert (dest, sets[i].src_elt,
7346 sets[i].dest_hash, GET_MODE (dest));
7347 elt->in_memory = (GET_CODE (sets[i].inner_dest) == MEM
7348 && ! RTX_UNCHANGING_P (sets[i].inner_dest));
7349
7350 if (elt->in_memory)
7351 {
7352 /* This implicitly assumes a whole struct
7353 need not have MEM_IN_STRUCT_P.
7354 But a whole struct is *supposed* to have MEM_IN_STRUCT_P. */
7355 elt->in_struct = (MEM_IN_STRUCT_P (sets[i].inner_dest)
7356 || sets[i].inner_dest != SET_DEST (sets[i].rtl));
7357 }
7358
7359 /* If we have (set (subreg:m1 (reg:m2 foo) 0) (bar:m1)), M1 is no
7360 narrower than M2, and both M1 and M2 are the same number of words,
7361 we are also doing (set (reg:m2 foo) (subreg:m2 (bar:m1) 0)) so
7362 make that equivalence as well.
7363
7364 However, BAR may have equivalences for which gen_lowpart_if_possible
7365 will produce a simpler value than gen_lowpart_if_possible applied to
7366 BAR (e.g., if BAR was ZERO_EXTENDed from M2), so we will scan all
7367 BAR's equivalences. If we don't get a simplified form, make
7368 the SUBREG. It will not be used in an equivalence, but will
7369 cause two similar assignments to be detected.
7370
7371 Note the loop below will find SUBREG_REG (DEST) since we have
7372 already entered SRC and DEST of the SET in the table. */
7373
7374 if (GET_CODE (dest) == SUBREG
7375 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))) - 1)
7376 / UNITS_PER_WORD)
7377 == (GET_MODE_SIZE (GET_MODE (dest)) - 1)/ UNITS_PER_WORD)
7378 && (GET_MODE_SIZE (GET_MODE (dest))
7379 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))))
7380 && sets[i].src_elt != 0)
7381 {
7382 enum machine_mode new_mode = GET_MODE (SUBREG_REG (dest));
7383 struct table_elt *elt, *classp = 0;
7384
7385 for (elt = sets[i].src_elt->first_same_value; elt;
7386 elt = elt->next_same_value)
7387 {
7388 rtx new_src = 0;
7389 unsigned src_hash;
7390 struct table_elt *src_elt;
7391
7392 /* Ignore invalid entries. */
7393 if (GET_CODE (elt->exp) != REG
7394 && ! exp_equiv_p (elt->exp, elt->exp, 1, 0))
7395 continue;
7396
7397 new_src = gen_lowpart_if_possible (new_mode, elt->exp);
7398 if (new_src == 0)
7399 new_src = gen_rtx (SUBREG, new_mode, elt->exp, 0);
7400
7401 src_hash = HASH (new_src, new_mode);
7402 src_elt = lookup (new_src, src_hash, new_mode);
7403
7404 /* Put the new source in the hash table is if isn't
7405 already. */
7406 if (src_elt == 0)
7407 {
7408 if (insert_regs (new_src, classp, 0))
7409 {
7410 rehash_using_reg (new_src);
7411 src_hash = HASH (new_src, new_mode);
7412 }
7413 src_elt = insert (new_src, classp, src_hash, new_mode);
7414 src_elt->in_memory = elt->in_memory;
7415 src_elt->in_struct = elt->in_struct;
7416 }
7417 else if (classp && classp != src_elt->first_same_value)
7418 /* Show that two things that we've seen before are
7419 actually the same. */
7420 merge_equiv_classes (src_elt, classp);
7421
7422 classp = src_elt->first_same_value;
7423 }
7424 }
7425 }
7426
7427 /* Special handling for (set REG0 REG1)
7428 where REG0 is the "cheapest", cheaper than REG1.
7429 After cse, REG1 will probably not be used in the sequel,
7430 so (if easily done) change this insn to (set REG1 REG0) and
7431 replace REG1 with REG0 in the previous insn that computed their value.
7432 Then REG1 will become a dead store and won't cloud the situation
7433 for later optimizations.
7434
7435 Do not make this change if REG1 is a hard register, because it will
7436 then be used in the sequel and we may be changing a two-operand insn
7437 into a three-operand insn.
7438
7439 Also do not do this if we are operating on a copy of INSN. */
7440
7441 if (n_sets == 1 && sets[0].rtl && GET_CODE (SET_DEST (sets[0].rtl)) == REG
7442 && NEXT_INSN (PREV_INSN (insn)) == insn
7443 && GET_CODE (SET_SRC (sets[0].rtl)) == REG
7444 && REGNO (SET_SRC (sets[0].rtl)) >= FIRST_PSEUDO_REGISTER
7445 && REGNO_QTY_VALID_P (REGNO (SET_SRC (sets[0].rtl)))
7446 && (qty_first_reg[reg_qty[REGNO (SET_SRC (sets[0].rtl))]]
7447 == REGNO (SET_DEST (sets[0].rtl))))
7448 {
7449 rtx prev = PREV_INSN (insn);
7450 while (prev && GET_CODE (prev) == NOTE)
7451 prev = PREV_INSN (prev);
7452
7453 if (prev && GET_CODE (prev) == INSN && GET_CODE (PATTERN (prev)) == SET
7454 && SET_DEST (PATTERN (prev)) == SET_SRC (sets[0].rtl))
7455 {
7456 rtx dest = SET_DEST (sets[0].rtl);
7457 rtx note = find_reg_note (prev, REG_EQUIV, NULL_RTX);
7458
7459 validate_change (prev, & SET_DEST (PATTERN (prev)), dest, 1);
7460 validate_change (insn, & SET_DEST (sets[0].rtl),
7461 SET_SRC (sets[0].rtl), 1);
7462 validate_change (insn, & SET_SRC (sets[0].rtl), dest, 1);
7463 apply_change_group ();
7464
7465 /* If REG1 was equivalent to a constant, REG0 is not. */
7466 if (note)
7467 PUT_REG_NOTE_KIND (note, REG_EQUAL);
7468
7469 /* If there was a REG_WAS_0 note on PREV, remove it. Move
7470 any REG_WAS_0 note on INSN to PREV. */
7471 note = find_reg_note (prev, REG_WAS_0, NULL_RTX);
7472 if (note)
7473 remove_note (prev, note);
7474
7475 note = find_reg_note (insn, REG_WAS_0, NULL_RTX);
7476 if (note)
7477 {
7478 remove_note (insn, note);
7479 XEXP (note, 1) = REG_NOTES (prev);
7480 REG_NOTES (prev) = note;
7481 }
7482
7483 /* If INSN has a REG_EQUAL note, and this note mentions REG0,
7484 then we must delete it, because the value in REG0 has changed. */
7485 note = find_reg_note (insn, REG_EQUAL, NULL_RTX);
7486 if (note && reg_mentioned_p (dest, XEXP (note, 0)))
7487 remove_note (insn, note);
7488 }
7489 }
7490
7491 /* If this is a conditional jump insn, record any known equivalences due to
7492 the condition being tested. */
7493
7494 last_jump_equiv_class = 0;
7495 if (GET_CODE (insn) == JUMP_INSN
7496 && n_sets == 1 && GET_CODE (x) == SET
7497 && GET_CODE (SET_SRC (x)) == IF_THEN_ELSE)
7498 record_jump_equiv (insn, 0);
7499
7500 #ifdef HAVE_cc0
7501 /* If the previous insn set CC0 and this insn no longer references CC0,
7502 delete the previous insn. Here we use the fact that nothing expects CC0
7503 to be valid over an insn, which is true until the final pass. */
7504 if (prev_insn && GET_CODE (prev_insn) == INSN
7505 && (tem = single_set (prev_insn)) != 0
7506 && SET_DEST (tem) == cc0_rtx
7507 && ! reg_mentioned_p (cc0_rtx, x))
7508 {
7509 PUT_CODE (prev_insn, NOTE);
7510 NOTE_LINE_NUMBER (prev_insn) = NOTE_INSN_DELETED;
7511 NOTE_SOURCE_FILE (prev_insn) = 0;
7512 }
7513
7514 prev_insn_cc0 = this_insn_cc0;
7515 prev_insn_cc0_mode = this_insn_cc0_mode;
7516 #endif
7517
7518 prev_insn = insn;
7519 }
7520 \f
7521 /* Store 1 in *WRITES_PTR for those categories of memory ref
7522 that must be invalidated when the expression WRITTEN is stored in.
7523 If WRITTEN is null, say everything must be invalidated. */
7524
7525 static void
7526 note_mem_written (written, writes_ptr)
7527 rtx written;
7528 struct write_data *writes_ptr;
7529 {
7530 static struct write_data everything = {0, 1, 1, 1};
7531
7532 if (written == 0)
7533 *writes_ptr = everything;
7534 else if (GET_CODE (written) == MEM)
7535 {
7536 /* Pushing or popping the stack invalidates just the stack pointer. */
7537 rtx addr = XEXP (written, 0);
7538 if ((GET_CODE (addr) == PRE_DEC || GET_CODE (addr) == PRE_INC
7539 || GET_CODE (addr) == POST_DEC || GET_CODE (addr) == POST_INC)
7540 && GET_CODE (XEXP (addr, 0)) == REG
7541 && REGNO (XEXP (addr, 0)) == STACK_POINTER_REGNUM)
7542 {
7543 writes_ptr->sp = 1;
7544 return;
7545 }
7546 else if (GET_MODE (written) == BLKmode)
7547 *writes_ptr = everything;
7548 /* (mem (scratch)) means clobber everything. */
7549 else if (GET_CODE (addr) == SCRATCH)
7550 *writes_ptr = everything;
7551 else if (cse_rtx_addr_varies_p (written))
7552 {
7553 /* A varying address that is a sum indicates an array element,
7554 and that's just as good as a structure element
7555 in implying that we need not invalidate scalar variables.
7556 However, we must allow QImode aliasing of scalars, because the
7557 ANSI C standard allows character pointers to alias anything. */
7558 if (! ((MEM_IN_STRUCT_P (written)
7559 || GET_CODE (XEXP (written, 0)) == PLUS)
7560 && GET_MODE (written) != QImode))
7561 writes_ptr->all = 1;
7562 writes_ptr->nonscalar = 1;
7563 }
7564 writes_ptr->var = 1;
7565 }
7566 }
7567
7568 /* Perform invalidation on the basis of everything about an insn
7569 except for invalidating the actual places that are SET in it.
7570 This includes the places CLOBBERed, and anything that might
7571 alias with something that is SET or CLOBBERed.
7572
7573 W points to the writes_memory for this insn, a struct write_data
7574 saying which kinds of memory references must be invalidated.
7575 X is the pattern of the insn. */
7576
7577 static void
7578 invalidate_from_clobbers (w, x)
7579 struct write_data *w;
7580 rtx x;
7581 {
7582 /* If W->var is not set, W specifies no action.
7583 If W->all is set, this step gets all memory refs
7584 so they can be ignored in the rest of this function. */
7585 if (w->var)
7586 invalidate_memory (w);
7587
7588 if (w->sp)
7589 {
7590 if (reg_tick[STACK_POINTER_REGNUM] >= 0)
7591 reg_tick[STACK_POINTER_REGNUM]++;
7592
7593 /* This should be *very* rare. */
7594 if (TEST_HARD_REG_BIT (hard_regs_in_table, STACK_POINTER_REGNUM))
7595 invalidate (stack_pointer_rtx, VOIDmode);
7596 }
7597
7598 if (GET_CODE (x) == CLOBBER)
7599 {
7600 rtx ref = XEXP (x, 0);
7601 if (ref)
7602 {
7603 if (GET_CODE (ref) == REG || GET_CODE (ref) == SUBREG
7604 || (GET_CODE (ref) == MEM && ! w->all))
7605 invalidate (ref, VOIDmode);
7606 else if (GET_CODE (ref) == STRICT_LOW_PART
7607 || GET_CODE (ref) == ZERO_EXTRACT)
7608 invalidate (XEXP (ref, 0), GET_MODE (ref));
7609 }
7610 }
7611 else if (GET_CODE (x) == PARALLEL)
7612 {
7613 register int i;
7614 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
7615 {
7616 register rtx y = XVECEXP (x, 0, i);
7617 if (GET_CODE (y) == CLOBBER)
7618 {
7619 rtx ref = XEXP (y, 0);
7620 if (ref)
7621 {
7622 if (GET_CODE (ref) == REG || GET_CODE (ref) == SUBREG
7623 || (GET_CODE (ref) == MEM && !w->all))
7624 invalidate (ref, VOIDmode);
7625 else if (GET_CODE (ref) == STRICT_LOW_PART
7626 || GET_CODE (ref) == ZERO_EXTRACT)
7627 invalidate (XEXP (ref, 0), GET_MODE (ref));
7628 }
7629 }
7630 }
7631 }
7632 }
7633 \f
7634 /* Process X, part of the REG_NOTES of an insn. Look at any REG_EQUAL notes
7635 and replace any registers in them with either an equivalent constant
7636 or the canonical form of the register. If we are inside an address,
7637 only do this if the address remains valid.
7638
7639 OBJECT is 0 except when within a MEM in which case it is the MEM.
7640
7641 Return the replacement for X. */
7642
7643 static rtx
7644 cse_process_notes (x, object)
7645 rtx x;
7646 rtx object;
7647 {
7648 enum rtx_code code = GET_CODE (x);
7649 char *fmt = GET_RTX_FORMAT (code);
7650 int i;
7651
7652 switch (code)
7653 {
7654 case CONST_INT:
7655 case CONST:
7656 case SYMBOL_REF:
7657 case LABEL_REF:
7658 case CONST_DOUBLE:
7659 case PC:
7660 case CC0:
7661 case LO_SUM:
7662 return x;
7663
7664 case MEM:
7665 XEXP (x, 0) = cse_process_notes (XEXP (x, 0), x);
7666 return x;
7667
7668 case EXPR_LIST:
7669 case INSN_LIST:
7670 if (REG_NOTE_KIND (x) == REG_EQUAL)
7671 XEXP (x, 0) = cse_process_notes (XEXP (x, 0), NULL_RTX);
7672 if (XEXP (x, 1))
7673 XEXP (x, 1) = cse_process_notes (XEXP (x, 1), NULL_RTX);
7674 return x;
7675
7676 case SIGN_EXTEND:
7677 case ZERO_EXTEND:
7678 {
7679 rtx new = cse_process_notes (XEXP (x, 0), object);
7680 /* We don't substitute VOIDmode constants into these rtx,
7681 since they would impede folding. */
7682 if (GET_MODE (new) != VOIDmode)
7683 validate_change (object, &XEXP (x, 0), new, 0);
7684 return x;
7685 }
7686
7687 case REG:
7688 i = reg_qty[REGNO (x)];
7689
7690 /* Return a constant or a constant register. */
7691 if (REGNO_QTY_VALID_P (REGNO (x))
7692 && qty_const[i] != 0
7693 && (CONSTANT_P (qty_const[i])
7694 || GET_CODE (qty_const[i]) == REG))
7695 {
7696 rtx new = gen_lowpart_if_possible (GET_MODE (x), qty_const[i]);
7697 if (new)
7698 return new;
7699 }
7700
7701 /* Otherwise, canonicalize this register. */
7702 return canon_reg (x, NULL_RTX);
7703 }
7704
7705 for (i = 0; i < GET_RTX_LENGTH (code); i++)
7706 if (fmt[i] == 'e')
7707 validate_change (object, &XEXP (x, i),
7708 cse_process_notes (XEXP (x, i), object), 0);
7709
7710 return x;
7711 }
7712 \f
7713 /* Find common subexpressions between the end test of a loop and the beginning
7714 of the loop. LOOP_START is the CODE_LABEL at the start of a loop.
7715
7716 Often we have a loop where an expression in the exit test is used
7717 in the body of the loop. For example "while (*p) *q++ = *p++;".
7718 Because of the way we duplicate the loop exit test in front of the loop,
7719 however, we don't detect that common subexpression. This will be caught
7720 when global cse is implemented, but this is a quite common case.
7721
7722 This function handles the most common cases of these common expressions.
7723 It is called after we have processed the basic block ending with the
7724 NOTE_INSN_LOOP_END note that ends a loop and the previous JUMP_INSN
7725 jumps to a label used only once. */
7726
7727 static void
7728 cse_around_loop (loop_start)
7729 rtx loop_start;
7730 {
7731 rtx insn;
7732 int i;
7733 struct table_elt *p;
7734
7735 /* If the jump at the end of the loop doesn't go to the start, we don't
7736 do anything. */
7737 for (insn = PREV_INSN (loop_start);
7738 insn && (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) >= 0);
7739 insn = PREV_INSN (insn))
7740 ;
7741
7742 if (insn == 0
7743 || GET_CODE (insn) != NOTE
7744 || NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG)
7745 return;
7746
7747 /* If the last insn of the loop (the end test) was an NE comparison,
7748 we will interpret it as an EQ comparison, since we fell through
7749 the loop. Any equivalences resulting from that comparison are
7750 therefore not valid and must be invalidated. */
7751 if (last_jump_equiv_class)
7752 for (p = last_jump_equiv_class->first_same_value; p;
7753 p = p->next_same_value)
7754 if (GET_CODE (p->exp) == MEM || GET_CODE (p->exp) == REG
7755 || (GET_CODE (p->exp) == SUBREG
7756 && GET_CODE (SUBREG_REG (p->exp)) == REG))
7757 invalidate (p->exp, VOIDmode);
7758 else if (GET_CODE (p->exp) == STRICT_LOW_PART
7759 || GET_CODE (p->exp) == ZERO_EXTRACT)
7760 invalidate (XEXP (p->exp, 0), GET_MODE (p->exp));
7761
7762 /* Process insns starting after LOOP_START until we hit a CALL_INSN or
7763 a CODE_LABEL (we could handle a CALL_INSN, but it isn't worth it).
7764
7765 The only thing we do with SET_DEST is invalidate entries, so we
7766 can safely process each SET in order. It is slightly less efficient
7767 to do so, but we only want to handle the most common cases. */
7768
7769 for (insn = NEXT_INSN (loop_start);
7770 GET_CODE (insn) != CALL_INSN && GET_CODE (insn) != CODE_LABEL
7771 && ! (GET_CODE (insn) == NOTE
7772 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END);
7773 insn = NEXT_INSN (insn))
7774 {
7775 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
7776 && (GET_CODE (PATTERN (insn)) == SET
7777 || GET_CODE (PATTERN (insn)) == CLOBBER))
7778 cse_set_around_loop (PATTERN (insn), insn, loop_start);
7779 else if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
7780 && GET_CODE (PATTERN (insn)) == PARALLEL)
7781 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
7782 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET
7783 || GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == CLOBBER)
7784 cse_set_around_loop (XVECEXP (PATTERN (insn), 0, i), insn,
7785 loop_start);
7786 }
7787 }
7788 \f
7789 /* Variable used for communications between the next two routines. */
7790
7791 static struct write_data skipped_writes_memory;
7792
7793 /* Process one SET of an insn that was skipped. We ignore CLOBBERs
7794 since they are done elsewhere. This function is called via note_stores. */
7795
7796 static void
7797 invalidate_skipped_set (dest, set)
7798 rtx set;
7799 rtx dest;
7800 {
7801 if (GET_CODE (set) == CLOBBER
7802 #ifdef HAVE_cc0
7803 || dest == cc0_rtx
7804 #endif
7805 || dest == pc_rtx)
7806 return;
7807
7808 if (GET_CODE (dest) == MEM)
7809 note_mem_written (dest, &skipped_writes_memory);
7810
7811 /* There are times when an address can appear varying and be a PLUS
7812 during this scan when it would be a fixed address were we to know
7813 the proper equivalences. So promote "nonscalar" to be "all". */
7814 if (skipped_writes_memory.nonscalar)
7815 skipped_writes_memory.all = 1;
7816
7817 if (GET_CODE (dest) == REG || GET_CODE (dest) == SUBREG
7818 || (! skipped_writes_memory.all && ! cse_rtx_addr_varies_p (dest)))
7819 invalidate (dest, VOIDmode);
7820 else if (GET_CODE (dest) == STRICT_LOW_PART
7821 || GET_CODE (dest) == ZERO_EXTRACT)
7822 invalidate (XEXP (dest, 0), GET_MODE (dest));
7823 }
7824
7825 /* Invalidate all insns from START up to the end of the function or the
7826 next label. This called when we wish to CSE around a block that is
7827 conditionally executed. */
7828
7829 static void
7830 invalidate_skipped_block (start)
7831 rtx start;
7832 {
7833 rtx insn;
7834 static struct write_data init = {0, 0, 0, 0};
7835 static struct write_data everything = {0, 1, 1, 1};
7836
7837 for (insn = start; insn && GET_CODE (insn) != CODE_LABEL;
7838 insn = NEXT_INSN (insn))
7839 {
7840 if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
7841 continue;
7842
7843 skipped_writes_memory = init;
7844
7845 if (GET_CODE (insn) == CALL_INSN)
7846 {
7847 invalidate_for_call ();
7848 skipped_writes_memory = everything;
7849 }
7850
7851 note_stores (PATTERN (insn), invalidate_skipped_set);
7852 invalidate_from_clobbers (&skipped_writes_memory, PATTERN (insn));
7853 }
7854 }
7855 \f
7856 /* Used for communication between the following two routines; contains a
7857 value to be checked for modification. */
7858
7859 static rtx cse_check_loop_start_value;
7860
7861 /* If modifying X will modify the value in CSE_CHECK_LOOP_START_VALUE,
7862 indicate that fact by setting CSE_CHECK_LOOP_START_VALUE to 0. */
7863
7864 static void
7865 cse_check_loop_start (x, set)
7866 rtx x;
7867 rtx set;
7868 {
7869 if (cse_check_loop_start_value == 0
7870 || GET_CODE (x) == CC0 || GET_CODE (x) == PC)
7871 return;
7872
7873 if ((GET_CODE (x) == MEM && GET_CODE (cse_check_loop_start_value) == MEM)
7874 || reg_overlap_mentioned_p (x, cse_check_loop_start_value))
7875 cse_check_loop_start_value = 0;
7876 }
7877
7878 /* X is a SET or CLOBBER contained in INSN that was found near the start of
7879 a loop that starts with the label at LOOP_START.
7880
7881 If X is a SET, we see if its SET_SRC is currently in our hash table.
7882 If so, we see if it has a value equal to some register used only in the
7883 loop exit code (as marked by jump.c).
7884
7885 If those two conditions are true, we search backwards from the start of
7886 the loop to see if that same value was loaded into a register that still
7887 retains its value at the start of the loop.
7888
7889 If so, we insert an insn after the load to copy the destination of that
7890 load into the equivalent register and (try to) replace our SET_SRC with that
7891 register.
7892
7893 In any event, we invalidate whatever this SET or CLOBBER modifies. */
7894
7895 static void
7896 cse_set_around_loop (x, insn, loop_start)
7897 rtx x;
7898 rtx insn;
7899 rtx loop_start;
7900 {
7901 struct table_elt *src_elt;
7902 static struct write_data init = {0, 0, 0, 0};
7903 struct write_data writes_memory;
7904
7905 writes_memory = init;
7906
7907 /* If this is a SET, see if we can replace SET_SRC, but ignore SETs that
7908 are setting PC or CC0 or whose SET_SRC is already a register. */
7909 if (GET_CODE (x) == SET
7910 && GET_CODE (SET_DEST (x)) != PC && GET_CODE (SET_DEST (x)) != CC0
7911 && GET_CODE (SET_SRC (x)) != REG)
7912 {
7913 src_elt = lookup (SET_SRC (x),
7914 HASH (SET_SRC (x), GET_MODE (SET_DEST (x))),
7915 GET_MODE (SET_DEST (x)));
7916
7917 if (src_elt)
7918 for (src_elt = src_elt->first_same_value; src_elt;
7919 src_elt = src_elt->next_same_value)
7920 if (GET_CODE (src_elt->exp) == REG && REG_LOOP_TEST_P (src_elt->exp)
7921 && COST (src_elt->exp) < COST (SET_SRC (x)))
7922 {
7923 rtx p, set;
7924
7925 /* Look for an insn in front of LOOP_START that sets
7926 something in the desired mode to SET_SRC (x) before we hit
7927 a label or CALL_INSN. */
7928
7929 for (p = prev_nonnote_insn (loop_start);
7930 p && GET_CODE (p) != CALL_INSN
7931 && GET_CODE (p) != CODE_LABEL;
7932 p = prev_nonnote_insn (p))
7933 if ((set = single_set (p)) != 0
7934 && GET_CODE (SET_DEST (set)) == REG
7935 && GET_MODE (SET_DEST (set)) == src_elt->mode
7936 && rtx_equal_p (SET_SRC (set), SET_SRC (x)))
7937 {
7938 /* We now have to ensure that nothing between P
7939 and LOOP_START modified anything referenced in
7940 SET_SRC (x). We know that nothing within the loop
7941 can modify it, or we would have invalidated it in
7942 the hash table. */
7943 rtx q;
7944
7945 cse_check_loop_start_value = SET_SRC (x);
7946 for (q = p; q != loop_start; q = NEXT_INSN (q))
7947 if (GET_RTX_CLASS (GET_CODE (q)) == 'i')
7948 note_stores (PATTERN (q), cse_check_loop_start);
7949
7950 /* If nothing was changed and we can replace our
7951 SET_SRC, add an insn after P to copy its destination
7952 to what we will be replacing SET_SRC with. */
7953 if (cse_check_loop_start_value
7954 && validate_change (insn, &SET_SRC (x),
7955 src_elt->exp, 0))
7956 emit_insn_after (gen_move_insn (src_elt->exp,
7957 SET_DEST (set)),
7958 p);
7959 break;
7960 }
7961 }
7962 }
7963
7964 /* Now invalidate anything modified by X. */
7965 note_mem_written (SET_DEST (x), &writes_memory);
7966
7967 if (writes_memory.var)
7968 invalidate_memory (&writes_memory);
7969
7970 /* See comment on similar code in cse_insn for explanation of these tests. */
7971 if (GET_CODE (SET_DEST (x)) == REG || GET_CODE (SET_DEST (x)) == SUBREG
7972 || (GET_CODE (SET_DEST (x)) == MEM && ! writes_memory.all
7973 && ! cse_rtx_addr_varies_p (SET_DEST (x))))
7974 invalidate (SET_DEST (x), VOIDmode);
7975 else if (GET_CODE (SET_DEST (x)) == STRICT_LOW_PART
7976 || GET_CODE (SET_DEST (x)) == ZERO_EXTRACT)
7977 invalidate (XEXP (SET_DEST (x), 0), GET_MODE (SET_DEST (x)));
7978 }
7979 \f
7980 /* Find the end of INSN's basic block and return its range,
7981 the total number of SETs in all the insns of the block, the last insn of the
7982 block, and the branch path.
7983
7984 The branch path indicates which branches should be followed. If a non-zero
7985 path size is specified, the block should be rescanned and a different set
7986 of branches will be taken. The branch path is only used if
7987 FLAG_CSE_FOLLOW_JUMPS or FLAG_CSE_SKIP_BLOCKS is non-zero.
7988
7989 DATA is a pointer to a struct cse_basic_block_data, defined below, that is
7990 used to describe the block. It is filled in with the information about
7991 the current block. The incoming structure's branch path, if any, is used
7992 to construct the output branch path. */
7993
7994 void
7995 cse_end_of_basic_block (insn, data, follow_jumps, after_loop, skip_blocks)
7996 rtx insn;
7997 struct cse_basic_block_data *data;
7998 int follow_jumps;
7999 int after_loop;
8000 int skip_blocks;
8001 {
8002 rtx p = insn, q;
8003 int nsets = 0;
8004 int low_cuid = INSN_CUID (insn), high_cuid = INSN_CUID (insn);
8005 rtx next = GET_RTX_CLASS (GET_CODE (insn)) == 'i' ? insn : next_real_insn (insn);
8006 int path_size = data->path_size;
8007 int path_entry = 0;
8008 int i;
8009
8010 /* Update the previous branch path, if any. If the last branch was
8011 previously TAKEN, mark it NOT_TAKEN. If it was previously NOT_TAKEN,
8012 shorten the path by one and look at the previous branch. We know that
8013 at least one branch must have been taken if PATH_SIZE is non-zero. */
8014 while (path_size > 0)
8015 {
8016 if (data->path[path_size - 1].status != NOT_TAKEN)
8017 {
8018 data->path[path_size - 1].status = NOT_TAKEN;
8019 break;
8020 }
8021 else
8022 path_size--;
8023 }
8024
8025 /* Scan to end of this basic block. */
8026 while (p && GET_CODE (p) != CODE_LABEL)
8027 {
8028 /* Don't cse out the end of a loop. This makes a difference
8029 only for the unusual loops that always execute at least once;
8030 all other loops have labels there so we will stop in any case.
8031 Cse'ing out the end of the loop is dangerous because it
8032 might cause an invariant expression inside the loop
8033 to be reused after the end of the loop. This would make it
8034 hard to move the expression out of the loop in loop.c,
8035 especially if it is one of several equivalent expressions
8036 and loop.c would like to eliminate it.
8037
8038 If we are running after loop.c has finished, we can ignore
8039 the NOTE_INSN_LOOP_END. */
8040
8041 if (! after_loop && GET_CODE (p) == NOTE
8042 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
8043 break;
8044
8045 /* Don't cse over a call to setjmp; on some machines (eg vax)
8046 the regs restored by the longjmp come from
8047 a later time than the setjmp. */
8048 if (GET_CODE (p) == NOTE
8049 && NOTE_LINE_NUMBER (p) == NOTE_INSN_SETJMP)
8050 break;
8051
8052 /* A PARALLEL can have lots of SETs in it,
8053 especially if it is really an ASM_OPERANDS. */
8054 if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
8055 && GET_CODE (PATTERN (p)) == PARALLEL)
8056 nsets += XVECLEN (PATTERN (p), 0);
8057 else if (GET_CODE (p) != NOTE)
8058 nsets += 1;
8059
8060 /* Ignore insns made by CSE; they cannot affect the boundaries of
8061 the basic block. */
8062
8063 if (INSN_UID (p) <= max_uid && INSN_CUID (p) > high_cuid)
8064 high_cuid = INSN_CUID (p);
8065 if (INSN_UID (p) <= max_uid && INSN_CUID (p) < low_cuid)
8066 low_cuid = INSN_CUID (p);
8067
8068 /* See if this insn is in our branch path. If it is and we are to
8069 take it, do so. */
8070 if (path_entry < path_size && data->path[path_entry].branch == p)
8071 {
8072 if (data->path[path_entry].status != NOT_TAKEN)
8073 p = JUMP_LABEL (p);
8074
8075 /* Point to next entry in path, if any. */
8076 path_entry++;
8077 }
8078
8079 /* If this is a conditional jump, we can follow it if -fcse-follow-jumps
8080 was specified, we haven't reached our maximum path length, there are
8081 insns following the target of the jump, this is the only use of the
8082 jump label, and the target label is preceded by a BARRIER.
8083
8084 Alternatively, we can follow the jump if it branches around a
8085 block of code and there are no other branches into the block.
8086 In this case invalidate_skipped_block will be called to invalidate any
8087 registers set in the block when following the jump. */
8088
8089 else if ((follow_jumps || skip_blocks) && path_size < PATHLENGTH - 1
8090 && GET_CODE (p) == JUMP_INSN
8091 && GET_CODE (PATTERN (p)) == SET
8092 && GET_CODE (SET_SRC (PATTERN (p))) == IF_THEN_ELSE
8093 && LABEL_NUSES (JUMP_LABEL (p)) == 1
8094 && NEXT_INSN (JUMP_LABEL (p)) != 0)
8095 {
8096 for (q = PREV_INSN (JUMP_LABEL (p)); q; q = PREV_INSN (q))
8097 if ((GET_CODE (q) != NOTE
8098 || NOTE_LINE_NUMBER (q) == NOTE_INSN_LOOP_END
8099 || NOTE_LINE_NUMBER (q) == NOTE_INSN_SETJMP)
8100 && (GET_CODE (q) != CODE_LABEL || LABEL_NUSES (q) != 0))
8101 break;
8102
8103 /* If we ran into a BARRIER, this code is an extension of the
8104 basic block when the branch is taken. */
8105 if (follow_jumps && q != 0 && GET_CODE (q) == BARRIER)
8106 {
8107 /* Don't allow ourself to keep walking around an
8108 always-executed loop. */
8109 if (next_real_insn (q) == next)
8110 {
8111 p = NEXT_INSN (p);
8112 continue;
8113 }
8114
8115 /* Similarly, don't put a branch in our path more than once. */
8116 for (i = 0; i < path_entry; i++)
8117 if (data->path[i].branch == p)
8118 break;
8119
8120 if (i != path_entry)
8121 break;
8122
8123 data->path[path_entry].branch = p;
8124 data->path[path_entry++].status = TAKEN;
8125
8126 /* This branch now ends our path. It was possible that we
8127 didn't see this branch the last time around (when the
8128 insn in front of the target was a JUMP_INSN that was
8129 turned into a no-op). */
8130 path_size = path_entry;
8131
8132 p = JUMP_LABEL (p);
8133 /* Mark block so we won't scan it again later. */
8134 PUT_MODE (NEXT_INSN (p), QImode);
8135 }
8136 /* Detect a branch around a block of code. */
8137 else if (skip_blocks && q != 0 && GET_CODE (q) != CODE_LABEL)
8138 {
8139 register rtx tmp;
8140
8141 if (next_real_insn (q) == next)
8142 {
8143 p = NEXT_INSN (p);
8144 continue;
8145 }
8146
8147 for (i = 0; i < path_entry; i++)
8148 if (data->path[i].branch == p)
8149 break;
8150
8151 if (i != path_entry)
8152 break;
8153
8154 /* This is no_labels_between_p (p, q) with an added check for
8155 reaching the end of a function (in case Q precedes P). */
8156 for (tmp = NEXT_INSN (p); tmp && tmp != q; tmp = NEXT_INSN (tmp))
8157 if (GET_CODE (tmp) == CODE_LABEL)
8158 break;
8159
8160 if (tmp == q)
8161 {
8162 data->path[path_entry].branch = p;
8163 data->path[path_entry++].status = AROUND;
8164
8165 path_size = path_entry;
8166
8167 p = JUMP_LABEL (p);
8168 /* Mark block so we won't scan it again later. */
8169 PUT_MODE (NEXT_INSN (p), QImode);
8170 }
8171 }
8172 }
8173 p = NEXT_INSN (p);
8174 }
8175
8176 data->low_cuid = low_cuid;
8177 data->high_cuid = high_cuid;
8178 data->nsets = nsets;
8179 data->last = p;
8180
8181 /* If all jumps in the path are not taken, set our path length to zero
8182 so a rescan won't be done. */
8183 for (i = path_size - 1; i >= 0; i--)
8184 if (data->path[i].status != NOT_TAKEN)
8185 break;
8186
8187 if (i == -1)
8188 data->path_size = 0;
8189 else
8190 data->path_size = path_size;
8191
8192 /* End the current branch path. */
8193 data->path[path_size].branch = 0;
8194 }
8195 \f
8196 /* Perform cse on the instructions of a function.
8197 F is the first instruction.
8198 NREGS is one plus the highest pseudo-reg number used in the instruction.
8199
8200 AFTER_LOOP is 1 if this is the cse call done after loop optimization
8201 (only if -frerun-cse-after-loop).
8202
8203 Returns 1 if jump_optimize should be redone due to simplifications
8204 in conditional jump instructions. */
8205
8206 int
8207 cse_main (f, nregs, after_loop, file)
8208 rtx f;
8209 int nregs;
8210 int after_loop;
8211 FILE *file;
8212 {
8213 struct cse_basic_block_data val;
8214 register rtx insn = f;
8215 register int i;
8216
8217 cse_jumps_altered = 0;
8218 constant_pool_entries_cost = 0;
8219 val.path_size = 0;
8220
8221 init_recog ();
8222
8223 max_reg = nregs;
8224
8225 all_minus_one = (int *) alloca (nregs * sizeof (int));
8226 consec_ints = (int *) alloca (nregs * sizeof (int));
8227
8228 for (i = 0; i < nregs; i++)
8229 {
8230 all_minus_one[i] = -1;
8231 consec_ints[i] = i;
8232 }
8233
8234 reg_next_eqv = (int *) alloca (nregs * sizeof (int));
8235 reg_prev_eqv = (int *) alloca (nregs * sizeof (int));
8236 reg_qty = (int *) alloca (nregs * sizeof (int));
8237 reg_in_table = (int *) alloca (nregs * sizeof (int));
8238 reg_tick = (int *) alloca (nregs * sizeof (int));
8239
8240 #ifdef LOAD_EXTEND_OP
8241
8242 /* Allocate scratch rtl here. cse_insn will fill in the memory reference
8243 and change the code and mode as appropriate. */
8244 memory_extend_rtx = gen_rtx (ZERO_EXTEND, VOIDmode, 0);
8245 #endif
8246
8247 /* Discard all the free elements of the previous function
8248 since they are allocated in the temporarily obstack. */
8249 bzero ((char *) table, sizeof table);
8250 free_element_chain = 0;
8251 n_elements_made = 0;
8252
8253 /* Find the largest uid. */
8254
8255 max_uid = get_max_uid ();
8256 uid_cuid = (int *) alloca ((max_uid + 1) * sizeof (int));
8257 bzero ((char *) uid_cuid, (max_uid + 1) * sizeof (int));
8258
8259 /* Compute the mapping from uids to cuids.
8260 CUIDs are numbers assigned to insns, like uids,
8261 except that cuids increase monotonically through the code.
8262 Don't assign cuids to line-number NOTEs, so that the distance in cuids
8263 between two insns is not affected by -g. */
8264
8265 for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
8266 {
8267 if (GET_CODE (insn) != NOTE
8268 || NOTE_LINE_NUMBER (insn) < 0)
8269 INSN_CUID (insn) = ++i;
8270 else
8271 /* Give a line number note the same cuid as preceding insn. */
8272 INSN_CUID (insn) = i;
8273 }
8274
8275 /* Initialize which registers are clobbered by calls. */
8276
8277 CLEAR_HARD_REG_SET (regs_invalidated_by_call);
8278
8279 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
8280 if ((call_used_regs[i]
8281 /* Used to check !fixed_regs[i] here, but that isn't safe;
8282 fixed regs are still call-clobbered, and sched can get
8283 confused if they can "live across calls".
8284
8285 The frame pointer is always preserved across calls. The arg
8286 pointer is if it is fixed. The stack pointer usually is, unless
8287 RETURN_POPS_ARGS, in which case an explicit CLOBBER
8288 will be present. If we are generating PIC code, the PIC offset
8289 table register is preserved across calls. */
8290
8291 && i != STACK_POINTER_REGNUM
8292 && i != FRAME_POINTER_REGNUM
8293 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
8294 && i != HARD_FRAME_POINTER_REGNUM
8295 #endif
8296 #if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
8297 && ! (i == ARG_POINTER_REGNUM && fixed_regs[i])
8298 #endif
8299 #if defined (PIC_OFFSET_TABLE_REGNUM) && !defined (PIC_OFFSET_TABLE_REG_CALL_CLOBBERED)
8300 && ! (i == PIC_OFFSET_TABLE_REGNUM && flag_pic)
8301 #endif
8302 )
8303 || global_regs[i])
8304 SET_HARD_REG_BIT (regs_invalidated_by_call, i);
8305
8306 /* Loop over basic blocks.
8307 Compute the maximum number of qty's needed for each basic block
8308 (which is 2 for each SET). */
8309 insn = f;
8310 while (insn)
8311 {
8312 cse_end_of_basic_block (insn, &val, flag_cse_follow_jumps, after_loop,
8313 flag_cse_skip_blocks);
8314
8315 /* If this basic block was already processed or has no sets, skip it. */
8316 if (val.nsets == 0 || GET_MODE (insn) == QImode)
8317 {
8318 PUT_MODE (insn, VOIDmode);
8319 insn = (val.last ? NEXT_INSN (val.last) : 0);
8320 val.path_size = 0;
8321 continue;
8322 }
8323
8324 cse_basic_block_start = val.low_cuid;
8325 cse_basic_block_end = val.high_cuid;
8326 max_qty = val.nsets * 2;
8327
8328 if (file)
8329 fprintf (file, ";; Processing block from %d to %d, %d sets.\n",
8330 INSN_UID (insn), val.last ? INSN_UID (val.last) : 0,
8331 val.nsets);
8332
8333 /* Make MAX_QTY bigger to give us room to optimize
8334 past the end of this basic block, if that should prove useful. */
8335 if (max_qty < 500)
8336 max_qty = 500;
8337
8338 max_qty += max_reg;
8339
8340 /* If this basic block is being extended by following certain jumps,
8341 (see `cse_end_of_basic_block'), we reprocess the code from the start.
8342 Otherwise, we start after this basic block. */
8343 if (val.path_size > 0)
8344 cse_basic_block (insn, val.last, val.path, 0);
8345 else
8346 {
8347 int old_cse_jumps_altered = cse_jumps_altered;
8348 rtx temp;
8349
8350 /* When cse changes a conditional jump to an unconditional
8351 jump, we want to reprocess the block, since it will give
8352 us a new branch path to investigate. */
8353 cse_jumps_altered = 0;
8354 temp = cse_basic_block (insn, val.last, val.path, ! after_loop);
8355 if (cse_jumps_altered == 0
8356 || (flag_cse_follow_jumps == 0 && flag_cse_skip_blocks == 0))
8357 insn = temp;
8358
8359 cse_jumps_altered |= old_cse_jumps_altered;
8360 }
8361
8362 #ifdef USE_C_ALLOCA
8363 alloca (0);
8364 #endif
8365 }
8366
8367 /* Tell refers_to_mem_p that qty_const info is not available. */
8368 qty_const = 0;
8369
8370 if (max_elements_made < n_elements_made)
8371 max_elements_made = n_elements_made;
8372
8373 return cse_jumps_altered;
8374 }
8375
8376 /* Process a single basic block. FROM and TO and the limits of the basic
8377 block. NEXT_BRANCH points to the branch path when following jumps or
8378 a null path when not following jumps.
8379
8380 AROUND_LOOP is non-zero if we are to try to cse around to the start of a
8381 loop. This is true when we are being called for the last time on a
8382 block and this CSE pass is before loop.c. */
8383
8384 static rtx
8385 cse_basic_block (from, to, next_branch, around_loop)
8386 register rtx from, to;
8387 struct branch_path *next_branch;
8388 int around_loop;
8389 {
8390 register rtx insn;
8391 int to_usage = 0;
8392 int in_libcall_block = 0;
8393
8394 /* Each of these arrays is undefined before max_reg, so only allocate
8395 the space actually needed and adjust the start below. */
8396
8397 qty_first_reg = (int *) alloca ((max_qty - max_reg) * sizeof (int));
8398 qty_last_reg = (int *) alloca ((max_qty - max_reg) * sizeof (int));
8399 qty_mode= (enum machine_mode *) alloca ((max_qty - max_reg) * sizeof (enum machine_mode));
8400 qty_const = (rtx *) alloca ((max_qty - max_reg) * sizeof (rtx));
8401 qty_const_insn = (rtx *) alloca ((max_qty - max_reg) * sizeof (rtx));
8402 qty_comparison_code
8403 = (enum rtx_code *) alloca ((max_qty - max_reg) * sizeof (enum rtx_code));
8404 qty_comparison_qty = (int *) alloca ((max_qty - max_reg) * sizeof (int));
8405 qty_comparison_const = (rtx *) alloca ((max_qty - max_reg) * sizeof (rtx));
8406
8407 qty_first_reg -= max_reg;
8408 qty_last_reg -= max_reg;
8409 qty_mode -= max_reg;
8410 qty_const -= max_reg;
8411 qty_const_insn -= max_reg;
8412 qty_comparison_code -= max_reg;
8413 qty_comparison_qty -= max_reg;
8414 qty_comparison_const -= max_reg;
8415
8416 new_basic_block ();
8417
8418 /* TO might be a label. If so, protect it from being deleted. */
8419 if (to != 0 && GET_CODE (to) == CODE_LABEL)
8420 ++LABEL_NUSES (to);
8421
8422 for (insn = from; insn != to; insn = NEXT_INSN (insn))
8423 {
8424 register enum rtx_code code;
8425
8426 /* See if this is a branch that is part of the path. If so, and it is
8427 to be taken, do so. */
8428 if (next_branch->branch == insn)
8429 {
8430 enum taken status = next_branch++->status;
8431 if (status != NOT_TAKEN)
8432 {
8433 if (status == TAKEN)
8434 record_jump_equiv (insn, 1);
8435 else
8436 invalidate_skipped_block (NEXT_INSN (insn));
8437
8438 /* Set the last insn as the jump insn; it doesn't affect cc0.
8439 Then follow this branch. */
8440 #ifdef HAVE_cc0
8441 prev_insn_cc0 = 0;
8442 #endif
8443 prev_insn = insn;
8444 insn = JUMP_LABEL (insn);
8445 continue;
8446 }
8447 }
8448
8449 code = GET_CODE (insn);
8450 if (GET_MODE (insn) == QImode)
8451 PUT_MODE (insn, VOIDmode);
8452
8453 if (GET_RTX_CLASS (code) == 'i')
8454 {
8455 /* Process notes first so we have all notes in canonical forms when
8456 looking for duplicate operations. */
8457
8458 if (REG_NOTES (insn))
8459 REG_NOTES (insn) = cse_process_notes (REG_NOTES (insn), NULL_RTX);
8460
8461 /* Track when we are inside in LIBCALL block. Inside such a block,
8462 we do not want to record destinations. The last insn of a
8463 LIBCALL block is not considered to be part of the block, since
8464 its destination is the result of the block and hence should be
8465 recorded. */
8466
8467 if (find_reg_note (insn, REG_LIBCALL, NULL_RTX))
8468 in_libcall_block = 1;
8469 else if (find_reg_note (insn, REG_RETVAL, NULL_RTX))
8470 in_libcall_block = 0;
8471
8472 cse_insn (insn, in_libcall_block);
8473 }
8474
8475 /* If INSN is now an unconditional jump, skip to the end of our
8476 basic block by pretending that we just did the last insn in the
8477 basic block. If we are jumping to the end of our block, show
8478 that we can have one usage of TO. */
8479
8480 if (simplejump_p (insn))
8481 {
8482 if (to == 0)
8483 return 0;
8484
8485 if (JUMP_LABEL (insn) == to)
8486 to_usage = 1;
8487
8488 /* Maybe TO was deleted because the jump is unconditional.
8489 If so, there is nothing left in this basic block. */
8490 /* ??? Perhaps it would be smarter to set TO
8491 to whatever follows this insn,
8492 and pretend the basic block had always ended here. */
8493 if (INSN_DELETED_P (to))
8494 break;
8495
8496 insn = PREV_INSN (to);
8497 }
8498
8499 /* See if it is ok to keep on going past the label
8500 which used to end our basic block. Remember that we incremented
8501 the count of that label, so we decrement it here. If we made
8502 a jump unconditional, TO_USAGE will be one; in that case, we don't
8503 want to count the use in that jump. */
8504
8505 if (to != 0 && NEXT_INSN (insn) == to
8506 && GET_CODE (to) == CODE_LABEL && --LABEL_NUSES (to) == to_usage)
8507 {
8508 struct cse_basic_block_data val;
8509 rtx prev;
8510
8511 insn = NEXT_INSN (to);
8512
8513 if (LABEL_NUSES (to) == 0)
8514 insn = delete_insn (to);
8515
8516 /* If TO was the last insn in the function, we are done. */
8517 if (insn == 0)
8518 return 0;
8519
8520 /* If TO was preceded by a BARRIER we are done with this block
8521 because it has no continuation. */
8522 prev = prev_nonnote_insn (to);
8523 if (prev && GET_CODE (prev) == BARRIER)
8524 return insn;
8525
8526 /* Find the end of the following block. Note that we won't be
8527 following branches in this case. */
8528 to_usage = 0;
8529 val.path_size = 0;
8530 cse_end_of_basic_block (insn, &val, 0, 0, 0);
8531
8532 /* If the tables we allocated have enough space left
8533 to handle all the SETs in the next basic block,
8534 continue through it. Otherwise, return,
8535 and that block will be scanned individually. */
8536 if (val.nsets * 2 + next_qty > max_qty)
8537 break;
8538
8539 cse_basic_block_start = val.low_cuid;
8540 cse_basic_block_end = val.high_cuid;
8541 to = val.last;
8542
8543 /* Prevent TO from being deleted if it is a label. */
8544 if (to != 0 && GET_CODE (to) == CODE_LABEL)
8545 ++LABEL_NUSES (to);
8546
8547 /* Back up so we process the first insn in the extension. */
8548 insn = PREV_INSN (insn);
8549 }
8550 }
8551
8552 if (next_qty > max_qty)
8553 abort ();
8554
8555 /* If we are running before loop.c, we stopped on a NOTE_INSN_LOOP_END, and
8556 the previous insn is the only insn that branches to the head of a loop,
8557 we can cse into the loop. Don't do this if we changed the jump
8558 structure of a loop unless we aren't going to be following jumps. */
8559
8560 if ((cse_jumps_altered == 0
8561 || (flag_cse_follow_jumps == 0 && flag_cse_skip_blocks == 0))
8562 && around_loop && to != 0
8563 && GET_CODE (to) == NOTE && NOTE_LINE_NUMBER (to) == NOTE_INSN_LOOP_END
8564 && GET_CODE (PREV_INSN (to)) == JUMP_INSN
8565 && JUMP_LABEL (PREV_INSN (to)) != 0
8566 && LABEL_NUSES (JUMP_LABEL (PREV_INSN (to))) == 1)
8567 cse_around_loop (JUMP_LABEL (PREV_INSN (to)));
8568
8569 return to ? NEXT_INSN (to) : 0;
8570 }
8571 \f
8572 /* Count the number of times registers are used (not set) in X.
8573 COUNTS is an array in which we accumulate the count, INCR is how much
8574 we count each register usage.
8575
8576 Don't count a usage of DEST, which is the SET_DEST of a SET which
8577 contains X in its SET_SRC. This is because such a SET does not
8578 modify the liveness of DEST. */
8579
8580 static void
8581 count_reg_usage (x, counts, dest, incr)
8582 rtx x;
8583 int *counts;
8584 rtx dest;
8585 int incr;
8586 {
8587 enum rtx_code code;
8588 char *fmt;
8589 int i, j;
8590
8591 if (x == 0)
8592 return;
8593
8594 switch (code = GET_CODE (x))
8595 {
8596 case REG:
8597 if (x != dest)
8598 counts[REGNO (x)] += incr;
8599 return;
8600
8601 case PC:
8602 case CC0:
8603 case CONST:
8604 case CONST_INT:
8605 case CONST_DOUBLE:
8606 case SYMBOL_REF:
8607 case LABEL_REF:
8608 case CLOBBER:
8609 return;
8610
8611 case SET:
8612 /* Unless we are setting a REG, count everything in SET_DEST. */
8613 if (GET_CODE (SET_DEST (x)) != REG)
8614 count_reg_usage (SET_DEST (x), counts, NULL_RTX, incr);
8615
8616 /* If SRC has side-effects, then we can't delete this insn, so the
8617 usage of SET_DEST inside SRC counts.
8618
8619 ??? Strictly-speaking, we might be preserving this insn
8620 because some other SET has side-effects, but that's hard
8621 to do and can't happen now. */
8622 count_reg_usage (SET_SRC (x), counts,
8623 side_effects_p (SET_SRC (x)) ? NULL_RTX : SET_DEST (x),
8624 incr);
8625 return;
8626
8627 case CALL_INSN:
8628 count_reg_usage (CALL_INSN_FUNCTION_USAGE (x), counts, NULL_RTX, incr);
8629
8630 /* ... falls through ... */
8631 case INSN:
8632 case JUMP_INSN:
8633 count_reg_usage (PATTERN (x), counts, NULL_RTX, incr);
8634
8635 /* Things used in a REG_EQUAL note aren't dead since loop may try to
8636 use them. */
8637
8638 count_reg_usage (REG_NOTES (x), counts, NULL_RTX, incr);
8639 return;
8640
8641 case EXPR_LIST:
8642 case INSN_LIST:
8643 if (REG_NOTE_KIND (x) == REG_EQUAL
8644 || GET_CODE (XEXP (x,0)) == USE)
8645 count_reg_usage (XEXP (x, 0), counts, NULL_RTX, incr);
8646 count_reg_usage (XEXP (x, 1), counts, NULL_RTX, incr);
8647 return;
8648 }
8649
8650 fmt = GET_RTX_FORMAT (code);
8651 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8652 {
8653 if (fmt[i] == 'e')
8654 count_reg_usage (XEXP (x, i), counts, dest, incr);
8655 else if (fmt[i] == 'E')
8656 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8657 count_reg_usage (XVECEXP (x, i, j), counts, dest, incr);
8658 }
8659 }
8660 \f
8661 /* Scan all the insns and delete any that are dead; i.e., they store a register
8662 that is never used or they copy a register to itself.
8663
8664 This is used to remove insns made obviously dead by cse. It improves the
8665 heuristics in loop since it won't try to move dead invariants out of loops
8666 or make givs for dead quantities. The remaining passes of the compilation
8667 are also sped up. */
8668
8669 void
8670 delete_dead_from_cse (insns, nreg)
8671 rtx insns;
8672 int nreg;
8673 {
8674 int *counts = (int *) alloca (nreg * sizeof (int));
8675 rtx insn, prev;
8676 rtx tem;
8677 int i;
8678 int in_libcall = 0;
8679
8680 /* First count the number of times each register is used. */
8681 bzero ((char *) counts, sizeof (int) * nreg);
8682 for (insn = next_real_insn (insns); insn; insn = next_real_insn (insn))
8683 count_reg_usage (insn, counts, NULL_RTX, 1);
8684
8685 /* Go from the last insn to the first and delete insns that only set unused
8686 registers or copy a register to itself. As we delete an insn, remove
8687 usage counts for registers it uses. */
8688 for (insn = prev_real_insn (get_last_insn ()); insn; insn = prev)
8689 {
8690 int live_insn = 0;
8691
8692 prev = prev_real_insn (insn);
8693
8694 /* Don't delete any insns that are part of a libcall block.
8695 Flow or loop might get confused if we did that. Remember
8696 that we are scanning backwards. */
8697 if (find_reg_note (insn, REG_RETVAL, NULL_RTX))
8698 in_libcall = 1;
8699
8700 if (in_libcall)
8701 live_insn = 1;
8702 else if (GET_CODE (PATTERN (insn)) == SET)
8703 {
8704 if (GET_CODE (SET_DEST (PATTERN (insn))) == REG
8705 && SET_DEST (PATTERN (insn)) == SET_SRC (PATTERN (insn)))
8706 ;
8707
8708 #ifdef HAVE_cc0
8709 else if (GET_CODE (SET_DEST (PATTERN (insn))) == CC0
8710 && ! side_effects_p (SET_SRC (PATTERN (insn)))
8711 && ((tem = next_nonnote_insn (insn)) == 0
8712 || GET_RTX_CLASS (GET_CODE (tem)) != 'i'
8713 || ! reg_referenced_p (cc0_rtx, PATTERN (tem))))
8714 ;
8715 #endif
8716 else if (GET_CODE (SET_DEST (PATTERN (insn))) != REG
8717 || REGNO (SET_DEST (PATTERN (insn))) < FIRST_PSEUDO_REGISTER
8718 || counts[REGNO (SET_DEST (PATTERN (insn)))] != 0
8719 || side_effects_p (SET_SRC (PATTERN (insn))))
8720 live_insn = 1;
8721 }
8722 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
8723 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
8724 {
8725 rtx elt = XVECEXP (PATTERN (insn), 0, i);
8726
8727 if (GET_CODE (elt) == SET)
8728 {
8729 if (GET_CODE (SET_DEST (elt)) == REG
8730 && SET_DEST (elt) == SET_SRC (elt))
8731 ;
8732
8733 #ifdef HAVE_cc0
8734 else if (GET_CODE (SET_DEST (elt)) == CC0
8735 && ! side_effects_p (SET_SRC (elt))
8736 && ((tem = next_nonnote_insn (insn)) == 0
8737 || GET_RTX_CLASS (GET_CODE (tem)) != 'i'
8738 || ! reg_referenced_p (cc0_rtx, PATTERN (tem))))
8739 ;
8740 #endif
8741 else if (GET_CODE (SET_DEST (elt)) != REG
8742 || REGNO (SET_DEST (elt)) < FIRST_PSEUDO_REGISTER
8743 || counts[REGNO (SET_DEST (elt))] != 0
8744 || side_effects_p (SET_SRC (elt)))
8745 live_insn = 1;
8746 }
8747 else if (GET_CODE (elt) != CLOBBER && GET_CODE (elt) != USE)
8748 live_insn = 1;
8749 }
8750 else
8751 live_insn = 1;
8752
8753 /* If this is a dead insn, delete it and show registers in it aren't
8754 being used. */
8755
8756 if (! live_insn)
8757 {
8758 count_reg_usage (insn, counts, NULL_RTX, -1);
8759 delete_insn (insn);
8760 }
8761
8762 if (find_reg_note (insn, REG_LIBCALL, NULL_RTX))
8763 in_libcall = 0;
8764 }
8765 }