(note_mem_written): Varying structure memory access with
[gcc.git] / gcc / cse.c
1 /* Common subexpression elimination for GNU compiler.
2 Copyright (C) 1987, 88, 89, 92-5, 1996 Free Software Foundation, Inc.
3
4 This file is part of GNU CC.
5
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
10
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
20
21
22 #include "config.h"
23 /* Must precede rtl.h for FFS. */
24 #include <stdio.h>
25
26 #include "rtl.h"
27 #include "regs.h"
28 #include "hard-reg-set.h"
29 #include "flags.h"
30 #include "real.h"
31 #include "insn-config.h"
32 #include "recog.h"
33
34 #include <setjmp.h>
35
36 /* The basic idea of common subexpression elimination is to go
37 through the code, keeping a record of expressions that would
38 have the same value at the current scan point, and replacing
39 expressions encountered with the cheapest equivalent expression.
40
41 It is too complicated to keep track of the different possibilities
42 when control paths merge; so, at each label, we forget all that is
43 known and start fresh. This can be described as processing each
44 basic block separately. Note, however, that these are not quite
45 the same as the basic blocks found by a later pass and used for
46 data flow analysis and register packing. We do not need to start fresh
47 after a conditional jump instruction if there is no label there.
48
49 We use two data structures to record the equivalent expressions:
50 a hash table for most expressions, and several vectors together
51 with "quantity numbers" to record equivalent (pseudo) registers.
52
53 The use of the special data structure for registers is desirable
54 because it is faster. It is possible because registers references
55 contain a fairly small number, the register number, taken from
56 a contiguously allocated series, and two register references are
57 identical if they have the same number. General expressions
58 do not have any such thing, so the only way to retrieve the
59 information recorded on an expression other than a register
60 is to keep it in a hash table.
61
62 Registers and "quantity numbers":
63
64 At the start of each basic block, all of the (hardware and pseudo)
65 registers used in the function are given distinct quantity
66 numbers to indicate their contents. During scan, when the code
67 copies one register into another, we copy the quantity number.
68 When a register is loaded in any other way, we allocate a new
69 quantity number to describe the value generated by this operation.
70 `reg_qty' records what quantity a register is currently thought
71 of as containing.
72
73 All real quantity numbers are greater than or equal to `max_reg'.
74 If register N has not been assigned a quantity, reg_qty[N] will equal N.
75
76 Quantity numbers below `max_reg' do not exist and none of the `qty_...'
77 variables should be referenced with an index below `max_reg'.
78
79 We also maintain a bidirectional chain of registers for each
80 quantity number. `qty_first_reg', `qty_last_reg',
81 `reg_next_eqv' and `reg_prev_eqv' hold these chains.
82
83 The first register in a chain is the one whose lifespan is least local.
84 Among equals, it is the one that was seen first.
85 We replace any equivalent register with that one.
86
87 If two registers have the same quantity number, it must be true that
88 REG expressions with `qty_mode' must be in the hash table for both
89 registers and must be in the same class.
90
91 The converse is not true. Since hard registers may be referenced in
92 any mode, two REG expressions might be equivalent in the hash table
93 but not have the same quantity number if the quantity number of one
94 of the registers is not the same mode as those expressions.
95
96 Constants and quantity numbers
97
98 When a quantity has a known constant value, that value is stored
99 in the appropriate element of qty_const. This is in addition to
100 putting the constant in the hash table as is usual for non-regs.
101
102 Whether a reg or a constant is preferred is determined by the configuration
103 macro CONST_COSTS and will often depend on the constant value. In any
104 event, expressions containing constants can be simplified, by fold_rtx.
105
106 When a quantity has a known nearly constant value (such as an address
107 of a stack slot), that value is stored in the appropriate element
108 of qty_const.
109
110 Integer constants don't have a machine mode. However, cse
111 determines the intended machine mode from the destination
112 of the instruction that moves the constant. The machine mode
113 is recorded in the hash table along with the actual RTL
114 constant expression so that different modes are kept separate.
115
116 Other expressions:
117
118 To record known equivalences among expressions in general
119 we use a hash table called `table'. It has a fixed number of buckets
120 that contain chains of `struct table_elt' elements for expressions.
121 These chains connect the elements whose expressions have the same
122 hash codes.
123
124 Other chains through the same elements connect the elements which
125 currently have equivalent values.
126
127 Register references in an expression are canonicalized before hashing
128 the expression. This is done using `reg_qty' and `qty_first_reg'.
129 The hash code of a register reference is computed using the quantity
130 number, not the register number.
131
132 When the value of an expression changes, it is necessary to remove from the
133 hash table not just that expression but all expressions whose values
134 could be different as a result.
135
136 1. If the value changing is in memory, except in special cases
137 ANYTHING referring to memory could be changed. That is because
138 nobody knows where a pointer does not point.
139 The function `invalidate_memory' removes what is necessary.
140
141 The special cases are when the address is constant or is
142 a constant plus a fixed register such as the frame pointer
143 or a static chain pointer. When such addresses are stored in,
144 we can tell exactly which other such addresses must be invalidated
145 due to overlap. `invalidate' does this.
146 All expressions that refer to non-constant
147 memory addresses are also invalidated. `invalidate_memory' does this.
148
149 2. If the value changing is a register, all expressions
150 containing references to that register, and only those,
151 must be removed.
152
153 Because searching the entire hash table for expressions that contain
154 a register is very slow, we try to figure out when it isn't necessary.
155 Precisely, this is necessary only when expressions have been
156 entered in the hash table using this register, and then the value has
157 changed, and then another expression wants to be added to refer to
158 the register's new value. This sequence of circumstances is rare
159 within any one basic block.
160
161 The vectors `reg_tick' and `reg_in_table' are used to detect this case.
162 reg_tick[i] is incremented whenever a value is stored in register i.
163 reg_in_table[i] holds -1 if no references to register i have been
164 entered in the table; otherwise, it contains the value reg_tick[i] had
165 when the references were entered. If we want to enter a reference
166 and reg_in_table[i] != reg_tick[i], we must scan and remove old references.
167 Until we want to enter a new entry, the mere fact that the two vectors
168 don't match makes the entries be ignored if anyone tries to match them.
169
170 Registers themselves are entered in the hash table as well as in
171 the equivalent-register chains. However, the vectors `reg_tick'
172 and `reg_in_table' do not apply to expressions which are simple
173 register references. These expressions are removed from the table
174 immediately when they become invalid, and this can be done even if
175 we do not immediately search for all the expressions that refer to
176 the register.
177
178 A CLOBBER rtx in an instruction invalidates its operand for further
179 reuse. A CLOBBER or SET rtx whose operand is a MEM:BLK
180 invalidates everything that resides in memory.
181
182 Related expressions:
183
184 Constant expressions that differ only by an additive integer
185 are called related. When a constant expression is put in
186 the table, the related expression with no constant term
187 is also entered. These are made to point at each other
188 so that it is possible to find out if there exists any
189 register equivalent to an expression related to a given expression. */
190
191 /* One plus largest register number used in this function. */
192
193 static int max_reg;
194
195 /* Length of vectors indexed by quantity number.
196 We know in advance we will not need a quantity number this big. */
197
198 static int max_qty;
199
200 /* Next quantity number to be allocated.
201 This is 1 + the largest number needed so far. */
202
203 static int next_qty;
204
205 /* Indexed by quantity number, gives the first (or last) (pseudo) register
206 in the chain of registers that currently contain this quantity. */
207
208 static int *qty_first_reg;
209 static int *qty_last_reg;
210
211 /* Index by quantity number, gives the mode of the quantity. */
212
213 static enum machine_mode *qty_mode;
214
215 /* Indexed by quantity number, gives the rtx of the constant value of the
216 quantity, or zero if it does not have a known value.
217 A sum of the frame pointer (or arg pointer) plus a constant
218 can also be entered here. */
219
220 static rtx *qty_const;
221
222 /* Indexed by qty number, gives the insn that stored the constant value
223 recorded in `qty_const'. */
224
225 static rtx *qty_const_insn;
226
227 /* The next three variables are used to track when a comparison between a
228 quantity and some constant or register has been passed. In that case, we
229 know the results of the comparison in case we see it again. These variables
230 record a comparison that is known to be true. */
231
232 /* Indexed by qty number, gives the rtx code of a comparison with a known
233 result involving this quantity. If none, it is UNKNOWN. */
234 static enum rtx_code *qty_comparison_code;
235
236 /* Indexed by qty number, gives the constant being compared against in a
237 comparison of known result. If no such comparison, it is undefined.
238 If the comparison is not with a constant, it is zero. */
239
240 static rtx *qty_comparison_const;
241
242 /* Indexed by qty number, gives the quantity being compared against in a
243 comparison of known result. If no such comparison, if it undefined.
244 If the comparison is not with a register, it is -1. */
245
246 static int *qty_comparison_qty;
247
248 #ifdef HAVE_cc0
249 /* For machines that have a CC0, we do not record its value in the hash
250 table since its use is guaranteed to be the insn immediately following
251 its definition and any other insn is presumed to invalidate it.
252
253 Instead, we store below the value last assigned to CC0. If it should
254 happen to be a constant, it is stored in preference to the actual
255 assigned value. In case it is a constant, we store the mode in which
256 the constant should be interpreted. */
257
258 static rtx prev_insn_cc0;
259 static enum machine_mode prev_insn_cc0_mode;
260 #endif
261
262 /* Previous actual insn. 0 if at first insn of basic block. */
263
264 static rtx prev_insn;
265
266 /* Insn being scanned. */
267
268 static rtx this_insn;
269
270 /* Index by (pseudo) register number, gives the quantity number
271 of the register's current contents. */
272
273 static int *reg_qty;
274
275 /* Index by (pseudo) register number, gives the number of the next (or
276 previous) (pseudo) register in the chain of registers sharing the same
277 value.
278
279 Or -1 if this register is at the end of the chain.
280
281 If reg_qty[N] == N, reg_next_eqv[N] is undefined. */
282
283 static int *reg_next_eqv;
284 static int *reg_prev_eqv;
285
286 /* Index by (pseudo) register number, gives the number of times
287 that register has been altered in the current basic block. */
288
289 static int *reg_tick;
290
291 /* Index by (pseudo) register number, gives the reg_tick value at which
292 rtx's containing this register are valid in the hash table.
293 If this does not equal the current reg_tick value, such expressions
294 existing in the hash table are invalid.
295 If this is -1, no expressions containing this register have been
296 entered in the table. */
297
298 static int *reg_in_table;
299
300 /* A HARD_REG_SET containing all the hard registers for which there is
301 currently a REG expression in the hash table. Note the difference
302 from the above variables, which indicate if the REG is mentioned in some
303 expression in the table. */
304
305 static HARD_REG_SET hard_regs_in_table;
306
307 /* A HARD_REG_SET containing all the hard registers that are invalidated
308 by a CALL_INSN. */
309
310 static HARD_REG_SET regs_invalidated_by_call;
311
312 /* Two vectors of ints:
313 one containing max_reg -1's; the other max_reg + 500 (an approximation
314 for max_qty) elements where element i contains i.
315 These are used to initialize various other vectors fast. */
316
317 static int *all_minus_one;
318 static int *consec_ints;
319
320 /* CUID of insn that starts the basic block currently being cse-processed. */
321
322 static int cse_basic_block_start;
323
324 /* CUID of insn that ends the basic block currently being cse-processed. */
325
326 static int cse_basic_block_end;
327
328 /* Vector mapping INSN_UIDs to cuids.
329 The cuids are like uids but increase monotonically always.
330 We use them to see whether a reg is used outside a given basic block. */
331
332 static int *uid_cuid;
333
334 /* Highest UID in UID_CUID. */
335 static int max_uid;
336
337 /* Get the cuid of an insn. */
338
339 #define INSN_CUID(INSN) (uid_cuid[INSN_UID (INSN)])
340
341 /* Nonzero if cse has altered conditional jump insns
342 in such a way that jump optimization should be redone. */
343
344 static int cse_jumps_altered;
345
346 /* Nonzero if we put a LABEL_REF into the hash table. Since we may have put
347 it into an INSN without a REG_LABEL, we have to rerun jump after CSE
348 to put in the note. */
349 static int recorded_label_ref;
350
351 /* canon_hash stores 1 in do_not_record
352 if it notices a reference to CC0, PC, or some other volatile
353 subexpression. */
354
355 static int do_not_record;
356
357 #ifdef LOAD_EXTEND_OP
358
359 /* Scratch rtl used when looking for load-extended copy of a MEM. */
360 static rtx memory_extend_rtx;
361 #endif
362
363 /* canon_hash stores 1 in hash_arg_in_memory
364 if it notices a reference to memory within the expression being hashed. */
365
366 static int hash_arg_in_memory;
367
368 /* canon_hash stores 1 in hash_arg_in_struct
369 if it notices a reference to memory that's part of a structure. */
370
371 static int hash_arg_in_struct;
372
373 /* The hash table contains buckets which are chains of `struct table_elt's,
374 each recording one expression's information.
375 That expression is in the `exp' field.
376
377 Those elements with the same hash code are chained in both directions
378 through the `next_same_hash' and `prev_same_hash' fields.
379
380 Each set of expressions with equivalent values
381 are on a two-way chain through the `next_same_value'
382 and `prev_same_value' fields, and all point with
383 the `first_same_value' field at the first element in
384 that chain. The chain is in order of increasing cost.
385 Each element's cost value is in its `cost' field.
386
387 The `in_memory' field is nonzero for elements that
388 involve any reference to memory. These elements are removed
389 whenever a write is done to an unidentified location in memory.
390 To be safe, we assume that a memory address is unidentified unless
391 the address is either a symbol constant or a constant plus
392 the frame pointer or argument pointer.
393
394 The `in_struct' field is nonzero for elements that
395 involve any reference to memory inside a structure or array.
396
397 The `related_value' field is used to connect related expressions
398 (that differ by adding an integer).
399 The related expressions are chained in a circular fashion.
400 `related_value' is zero for expressions for which this
401 chain is not useful.
402
403 The `cost' field stores the cost of this element's expression.
404
405 The `is_const' flag is set if the element is a constant (including
406 a fixed address).
407
408 The `flag' field is used as a temporary during some search routines.
409
410 The `mode' field is usually the same as GET_MODE (`exp'), but
411 if `exp' is a CONST_INT and has no machine mode then the `mode'
412 field is the mode it was being used as. Each constant is
413 recorded separately for each mode it is used with. */
414
415
416 struct table_elt
417 {
418 rtx exp;
419 struct table_elt *next_same_hash;
420 struct table_elt *prev_same_hash;
421 struct table_elt *next_same_value;
422 struct table_elt *prev_same_value;
423 struct table_elt *first_same_value;
424 struct table_elt *related_value;
425 int cost;
426 enum machine_mode mode;
427 char in_memory;
428 char in_struct;
429 char is_const;
430 char flag;
431 };
432
433 /* We don't want a lot of buckets, because we rarely have very many
434 things stored in the hash table, and a lot of buckets slows
435 down a lot of loops that happen frequently. */
436 #define NBUCKETS 31
437
438 /* Compute hash code of X in mode M. Special-case case where X is a pseudo
439 register (hard registers may require `do_not_record' to be set). */
440
441 #define HASH(X, M) \
442 (GET_CODE (X) == REG && REGNO (X) >= FIRST_PSEUDO_REGISTER \
443 ? (((unsigned) REG << 7) + (unsigned) reg_qty[REGNO (X)]) % NBUCKETS \
444 : canon_hash (X, M) % NBUCKETS)
445
446 /* Determine whether register number N is considered a fixed register for CSE.
447 It is desirable to replace other regs with fixed regs, to reduce need for
448 non-fixed hard regs.
449 A reg wins if it is either the frame pointer or designated as fixed,
450 but not if it is an overlapping register. */
451 #ifdef OVERLAPPING_REGNO_P
452 #define FIXED_REGNO_P(N) \
453 (((N) == FRAME_POINTER_REGNUM || (N) == HARD_FRAME_POINTER_REGNUM \
454 || fixed_regs[N] || global_regs[N]) \
455 && ! OVERLAPPING_REGNO_P ((N)))
456 #else
457 #define FIXED_REGNO_P(N) \
458 ((N) == FRAME_POINTER_REGNUM || (N) == HARD_FRAME_POINTER_REGNUM \
459 || fixed_regs[N] || global_regs[N])
460 #endif
461
462 /* Compute cost of X, as stored in the `cost' field of a table_elt. Fixed
463 hard registers and pointers into the frame are the cheapest with a cost
464 of 0. Next come pseudos with a cost of one and other hard registers with
465 a cost of 2. Aside from these special cases, call `rtx_cost'. */
466
467 #define CHEAP_REGNO(N) \
468 ((N) == FRAME_POINTER_REGNUM || (N) == HARD_FRAME_POINTER_REGNUM \
469 || (N) == STACK_POINTER_REGNUM || (N) == ARG_POINTER_REGNUM \
470 || ((N) >= FIRST_VIRTUAL_REGISTER && (N) <= LAST_VIRTUAL_REGISTER) \
471 || ((N) < FIRST_PSEUDO_REGISTER \
472 && FIXED_REGNO_P (N) && REGNO_REG_CLASS (N) != NO_REGS))
473
474 /* A register is cheap if it is a user variable assigned to the register
475 or if its register number always corresponds to a cheap register. */
476
477 #define CHEAP_REG(N) \
478 ((REG_USERVAR_P (N) && REGNO (N) < FIRST_PSEUDO_REGISTER) \
479 || CHEAP_REGNO (REGNO (N)))
480
481 #define COST(X) \
482 (GET_CODE (X) == REG \
483 ? (CHEAP_REG (X) ? 0 \
484 : REGNO (X) >= FIRST_PSEUDO_REGISTER ? 1 \
485 : 2) \
486 : rtx_cost (X, SET) * 2)
487
488 /* Determine if the quantity number for register X represents a valid index
489 into the `qty_...' variables. */
490
491 #define REGNO_QTY_VALID_P(N) (reg_qty[N] != (N))
492
493 static struct table_elt *table[NBUCKETS];
494
495 /* Chain of `struct table_elt's made so far for this function
496 but currently removed from the table. */
497
498 static struct table_elt *free_element_chain;
499
500 /* Number of `struct table_elt' structures made so far for this function. */
501
502 static int n_elements_made;
503
504 /* Maximum value `n_elements_made' has had so far in this compilation
505 for functions previously processed. */
506
507 static int max_elements_made;
508
509 /* Surviving equivalence class when two equivalence classes are merged
510 by recording the effects of a jump in the last insn. Zero if the
511 last insn was not a conditional jump. */
512
513 static struct table_elt *last_jump_equiv_class;
514
515 /* Set to the cost of a constant pool reference if one was found for a
516 symbolic constant. If this was found, it means we should try to
517 convert constants into constant pool entries if they don't fit in
518 the insn. */
519
520 static int constant_pool_entries_cost;
521
522 /* Bits describing what kind of values in memory must be invalidated
523 for a particular instruction. If all three bits are zero,
524 no memory refs need to be invalidated. Each bit is more powerful
525 than the preceding ones, and if a bit is set then the preceding
526 bits are also set.
527
528 Here is how the bits are set:
529 Pushing onto the stack invalidates only the stack pointer,
530 writing at a fixed address invalidates only variable addresses,
531 writing in a structure element at variable address
532 invalidates all but scalar variables,
533 and writing in anything else at variable address invalidates everything. */
534
535 struct write_data
536 {
537 int sp : 1; /* Invalidate stack pointer. */
538 int var : 1; /* Invalidate variable addresses. */
539 int nonscalar : 1; /* Invalidate all but scalar variables. */
540 int all : 1; /* Invalidate all memory refs. */
541 };
542
543 /* Define maximum length of a branch path. */
544
545 #define PATHLENGTH 10
546
547 /* This data describes a block that will be processed by cse_basic_block. */
548
549 struct cse_basic_block_data {
550 /* Lowest CUID value of insns in block. */
551 int low_cuid;
552 /* Highest CUID value of insns in block. */
553 int high_cuid;
554 /* Total number of SETs in block. */
555 int nsets;
556 /* Last insn in the block. */
557 rtx last;
558 /* Size of current branch path, if any. */
559 int path_size;
560 /* Current branch path, indicating which branches will be taken. */
561 struct branch_path {
562 /* The branch insn. */
563 rtx branch;
564 /* Whether it should be taken or not. AROUND is the same as taken
565 except that it is used when the destination label is not preceded
566 by a BARRIER. */
567 enum taken {TAKEN, NOT_TAKEN, AROUND} status;
568 } path[PATHLENGTH];
569 };
570
571 /* Nonzero if X has the form (PLUS frame-pointer integer). We check for
572 virtual regs here because the simplify_*_operation routines are called
573 by integrate.c, which is called before virtual register instantiation. */
574
575 #define FIXED_BASE_PLUS_P(X) \
576 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
577 || (X) == arg_pointer_rtx \
578 || (X) == virtual_stack_vars_rtx \
579 || (X) == virtual_incoming_args_rtx \
580 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
581 && (XEXP (X, 0) == frame_pointer_rtx \
582 || XEXP (X, 0) == hard_frame_pointer_rtx \
583 || XEXP (X, 0) == arg_pointer_rtx \
584 || XEXP (X, 0) == virtual_stack_vars_rtx \
585 || XEXP (X, 0) == virtual_incoming_args_rtx)))
586
587 /* Similar, but also allows reference to the stack pointer.
588
589 This used to include FIXED_BASE_PLUS_P, however, we can't assume that
590 arg_pointer_rtx by itself is nonzero, because on at least one machine,
591 the i960, the arg pointer is zero when it is unused. */
592
593 #define NONZERO_BASE_PLUS_P(X) \
594 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
595 || (X) == virtual_stack_vars_rtx \
596 || (X) == virtual_incoming_args_rtx \
597 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
598 && (XEXP (X, 0) == frame_pointer_rtx \
599 || XEXP (X, 0) == hard_frame_pointer_rtx \
600 || XEXP (X, 0) == arg_pointer_rtx \
601 || XEXP (X, 0) == virtual_stack_vars_rtx \
602 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
603 || (X) == stack_pointer_rtx \
604 || (X) == virtual_stack_dynamic_rtx \
605 || (X) == virtual_outgoing_args_rtx \
606 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
607 && (XEXP (X, 0) == stack_pointer_rtx \
608 || XEXP (X, 0) == virtual_stack_dynamic_rtx \
609 || XEXP (X, 0) == virtual_outgoing_args_rtx)))
610
611 static void new_basic_block PROTO((void));
612 static void make_new_qty PROTO((int));
613 static void make_regs_eqv PROTO((int, int));
614 static void delete_reg_equiv PROTO((int));
615 static int mention_regs PROTO((rtx));
616 static int insert_regs PROTO((rtx, struct table_elt *, int));
617 static void free_element PROTO((struct table_elt *));
618 static void remove_from_table PROTO((struct table_elt *, unsigned));
619 static struct table_elt *get_element PROTO((void));
620 static struct table_elt *lookup PROTO((rtx, unsigned, enum machine_mode)),
621 *lookup_for_remove PROTO((rtx, unsigned, enum machine_mode));
622 static rtx lookup_as_function PROTO((rtx, enum rtx_code));
623 static struct table_elt *insert PROTO((rtx, struct table_elt *, unsigned,
624 enum machine_mode));
625 static void merge_equiv_classes PROTO((struct table_elt *,
626 struct table_elt *));
627 static void invalidate PROTO((rtx, enum machine_mode));
628 static void remove_invalid_refs PROTO((int));
629 static void rehash_using_reg PROTO((rtx));
630 static void invalidate_memory PROTO((struct write_data *));
631 static void invalidate_for_call PROTO((void));
632 static rtx use_related_value PROTO((rtx, struct table_elt *));
633 static unsigned canon_hash PROTO((rtx, enum machine_mode));
634 static unsigned safe_hash PROTO((rtx, enum machine_mode));
635 static int exp_equiv_p PROTO((rtx, rtx, int, int));
636 static void set_nonvarying_address_components PROTO((rtx, int, rtx *,
637 HOST_WIDE_INT *,
638 HOST_WIDE_INT *));
639 static int refers_to_p PROTO((rtx, rtx));
640 static int refers_to_mem_p PROTO((rtx, rtx, HOST_WIDE_INT,
641 HOST_WIDE_INT));
642 static int cse_rtx_addr_varies_p PROTO((rtx));
643 static rtx canon_reg PROTO((rtx, rtx));
644 static void find_best_addr PROTO((rtx, rtx *));
645 static enum rtx_code find_comparison_args PROTO((enum rtx_code, rtx *, rtx *,
646 enum machine_mode *,
647 enum machine_mode *));
648 static rtx cse_gen_binary PROTO((enum rtx_code, enum machine_mode,
649 rtx, rtx));
650 static rtx simplify_plus_minus PROTO((enum rtx_code, enum machine_mode,
651 rtx, rtx));
652 static rtx fold_rtx PROTO((rtx, rtx));
653 static rtx equiv_constant PROTO((rtx));
654 static void record_jump_equiv PROTO((rtx, int));
655 static void record_jump_cond PROTO((enum rtx_code, enum machine_mode,
656 rtx, rtx, int));
657 static void cse_insn PROTO((rtx, int));
658 static void note_mem_written PROTO((rtx, struct write_data *));
659 static void invalidate_from_clobbers PROTO((struct write_data *, rtx));
660 static rtx cse_process_notes PROTO((rtx, rtx));
661 static void cse_around_loop PROTO((rtx));
662 static void invalidate_skipped_set PROTO((rtx, rtx));
663 static void invalidate_skipped_block PROTO((rtx));
664 static void cse_check_loop_start PROTO((rtx, rtx));
665 static void cse_set_around_loop PROTO((rtx, rtx, rtx));
666 static rtx cse_basic_block PROTO((rtx, rtx, struct branch_path *, int));
667 static void count_reg_usage PROTO((rtx, int *, rtx, int));
668
669 extern int rtx_equal_function_value_matters;
670 \f
671 /* Return an estimate of the cost of computing rtx X.
672 One use is in cse, to decide which expression to keep in the hash table.
673 Another is in rtl generation, to pick the cheapest way to multiply.
674 Other uses like the latter are expected in the future. */
675
676 /* Return the right cost to give to an operation
677 to make the cost of the corresponding register-to-register instruction
678 N times that of a fast register-to-register instruction. */
679
680 #define COSTS_N_INSNS(N) ((N) * 4 - 2)
681
682 int
683 rtx_cost (x, outer_code)
684 rtx x;
685 enum rtx_code outer_code;
686 {
687 register int i, j;
688 register enum rtx_code code;
689 register char *fmt;
690 register int total;
691
692 if (x == 0)
693 return 0;
694
695 /* Compute the default costs of certain things.
696 Note that RTX_COSTS can override the defaults. */
697
698 code = GET_CODE (x);
699 switch (code)
700 {
701 case MULT:
702 /* Count multiplication by 2**n as a shift,
703 because if we are considering it, we would output it as a shift. */
704 if (GET_CODE (XEXP (x, 1)) == CONST_INT
705 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
706 total = 2;
707 else
708 total = COSTS_N_INSNS (5);
709 break;
710 case DIV:
711 case UDIV:
712 case MOD:
713 case UMOD:
714 total = COSTS_N_INSNS (7);
715 break;
716 case USE:
717 /* Used in loop.c and combine.c as a marker. */
718 total = 0;
719 break;
720 case ASM_OPERANDS:
721 /* We don't want these to be used in substitutions because
722 we have no way of validating the resulting insn. So assign
723 anything containing an ASM_OPERANDS a very high cost. */
724 total = 1000;
725 break;
726 default:
727 total = 2;
728 }
729
730 switch (code)
731 {
732 case REG:
733 return ! CHEAP_REG (x);
734
735 case SUBREG:
736 /* If we can't tie these modes, make this expensive. The larger
737 the mode, the more expensive it is. */
738 if (! MODES_TIEABLE_P (GET_MODE (x), GET_MODE (SUBREG_REG (x))))
739 return COSTS_N_INSNS (2
740 + GET_MODE_SIZE (GET_MODE (x)) / UNITS_PER_WORD);
741 return 2;
742 #ifdef RTX_COSTS
743 RTX_COSTS (x, code, outer_code);
744 #endif
745 CONST_COSTS (x, code, outer_code);
746 }
747
748 /* Sum the costs of the sub-rtx's, plus cost of this operation,
749 which is already in total. */
750
751 fmt = GET_RTX_FORMAT (code);
752 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
753 if (fmt[i] == 'e')
754 total += rtx_cost (XEXP (x, i), code);
755 else if (fmt[i] == 'E')
756 for (j = 0; j < XVECLEN (x, i); j++)
757 total += rtx_cost (XVECEXP (x, i, j), code);
758
759 return total;
760 }
761 \f
762 /* Clear the hash table and initialize each register with its own quantity,
763 for a new basic block. */
764
765 static void
766 new_basic_block ()
767 {
768 register int i;
769
770 next_qty = max_reg;
771
772 bzero ((char *) reg_tick, max_reg * sizeof (int));
773
774 bcopy ((char *) all_minus_one, (char *) reg_in_table,
775 max_reg * sizeof (int));
776 bcopy ((char *) consec_ints, (char *) reg_qty, max_reg * sizeof (int));
777 CLEAR_HARD_REG_SET (hard_regs_in_table);
778
779 /* The per-quantity values used to be initialized here, but it is
780 much faster to initialize each as it is made in `make_new_qty'. */
781
782 for (i = 0; i < NBUCKETS; i++)
783 {
784 register struct table_elt *this, *next;
785 for (this = table[i]; this; this = next)
786 {
787 next = this->next_same_hash;
788 free_element (this);
789 }
790 }
791
792 bzero ((char *) table, sizeof table);
793
794 prev_insn = 0;
795
796 #ifdef HAVE_cc0
797 prev_insn_cc0 = 0;
798 #endif
799 }
800
801 /* Say that register REG contains a quantity not in any register before
802 and initialize that quantity. */
803
804 static void
805 make_new_qty (reg)
806 register int reg;
807 {
808 register int q;
809
810 if (next_qty >= max_qty)
811 abort ();
812
813 q = reg_qty[reg] = next_qty++;
814 qty_first_reg[q] = reg;
815 qty_last_reg[q] = reg;
816 qty_const[q] = qty_const_insn[q] = 0;
817 qty_comparison_code[q] = UNKNOWN;
818
819 reg_next_eqv[reg] = reg_prev_eqv[reg] = -1;
820 }
821
822 /* Make reg NEW equivalent to reg OLD.
823 OLD is not changing; NEW is. */
824
825 static void
826 make_regs_eqv (new, old)
827 register int new, old;
828 {
829 register int lastr, firstr;
830 register int q = reg_qty[old];
831
832 /* Nothing should become eqv until it has a "non-invalid" qty number. */
833 if (! REGNO_QTY_VALID_P (old))
834 abort ();
835
836 reg_qty[new] = q;
837 firstr = qty_first_reg[q];
838 lastr = qty_last_reg[q];
839
840 /* Prefer fixed hard registers to anything. Prefer pseudo regs to other
841 hard regs. Among pseudos, if NEW will live longer than any other reg
842 of the same qty, and that is beyond the current basic block,
843 make it the new canonical replacement for this qty. */
844 if (! (firstr < FIRST_PSEUDO_REGISTER && FIXED_REGNO_P (firstr))
845 /* Certain fixed registers might be of the class NO_REGS. This means
846 that not only can they not be allocated by the compiler, but
847 they cannot be used in substitutions or canonicalizations
848 either. */
849 && (new >= FIRST_PSEUDO_REGISTER || REGNO_REG_CLASS (new) != NO_REGS)
850 && ((new < FIRST_PSEUDO_REGISTER && FIXED_REGNO_P (new))
851 || (new >= FIRST_PSEUDO_REGISTER
852 && (firstr < FIRST_PSEUDO_REGISTER
853 || ((uid_cuid[regno_last_uid[new]] > cse_basic_block_end
854 || (uid_cuid[regno_first_uid[new]]
855 < cse_basic_block_start))
856 && (uid_cuid[regno_last_uid[new]]
857 > uid_cuid[regno_last_uid[firstr]]))))))
858 {
859 reg_prev_eqv[firstr] = new;
860 reg_next_eqv[new] = firstr;
861 reg_prev_eqv[new] = -1;
862 qty_first_reg[q] = new;
863 }
864 else
865 {
866 /* If NEW is a hard reg (known to be non-fixed), insert at end.
867 Otherwise, insert before any non-fixed hard regs that are at the
868 end. Registers of class NO_REGS cannot be used as an
869 equivalent for anything. */
870 while (lastr < FIRST_PSEUDO_REGISTER && reg_prev_eqv[lastr] >= 0
871 && (REGNO_REG_CLASS (lastr) == NO_REGS || ! FIXED_REGNO_P (lastr))
872 && new >= FIRST_PSEUDO_REGISTER)
873 lastr = reg_prev_eqv[lastr];
874 reg_next_eqv[new] = reg_next_eqv[lastr];
875 if (reg_next_eqv[lastr] >= 0)
876 reg_prev_eqv[reg_next_eqv[lastr]] = new;
877 else
878 qty_last_reg[q] = new;
879 reg_next_eqv[lastr] = new;
880 reg_prev_eqv[new] = lastr;
881 }
882 }
883
884 /* Remove REG from its equivalence class. */
885
886 static void
887 delete_reg_equiv (reg)
888 register int reg;
889 {
890 register int q = reg_qty[reg];
891 register int p, n;
892
893 /* If invalid, do nothing. */
894 if (q == reg)
895 return;
896
897 p = reg_prev_eqv[reg];
898 n = reg_next_eqv[reg];
899
900 if (n != -1)
901 reg_prev_eqv[n] = p;
902 else
903 qty_last_reg[q] = p;
904 if (p != -1)
905 reg_next_eqv[p] = n;
906 else
907 qty_first_reg[q] = n;
908
909 reg_qty[reg] = reg;
910 }
911
912 /* Remove any invalid expressions from the hash table
913 that refer to any of the registers contained in expression X.
914
915 Make sure that newly inserted references to those registers
916 as subexpressions will be considered valid.
917
918 mention_regs is not called when a register itself
919 is being stored in the table.
920
921 Return 1 if we have done something that may have changed the hash code
922 of X. */
923
924 static int
925 mention_regs (x)
926 rtx x;
927 {
928 register enum rtx_code code;
929 register int i, j;
930 register char *fmt;
931 register int changed = 0;
932
933 if (x == 0)
934 return 0;
935
936 code = GET_CODE (x);
937 if (code == REG)
938 {
939 register int regno = REGNO (x);
940 register int endregno
941 = regno + (regno >= FIRST_PSEUDO_REGISTER ? 1
942 : HARD_REGNO_NREGS (regno, GET_MODE (x)));
943 int i;
944
945 for (i = regno; i < endregno; i++)
946 {
947 if (reg_in_table[i] >= 0 && reg_in_table[i] != reg_tick[i])
948 remove_invalid_refs (i);
949
950 reg_in_table[i] = reg_tick[i];
951 }
952
953 return 0;
954 }
955
956 /* If X is a comparison or a COMPARE and either operand is a register
957 that does not have a quantity, give it one. This is so that a later
958 call to record_jump_equiv won't cause X to be assigned a different
959 hash code and not found in the table after that call.
960
961 It is not necessary to do this here, since rehash_using_reg can
962 fix up the table later, but doing this here eliminates the need to
963 call that expensive function in the most common case where the only
964 use of the register is in the comparison. */
965
966 if (code == COMPARE || GET_RTX_CLASS (code) == '<')
967 {
968 if (GET_CODE (XEXP (x, 0)) == REG
969 && ! REGNO_QTY_VALID_P (REGNO (XEXP (x, 0))))
970 if (insert_regs (XEXP (x, 0), NULL_PTR, 0))
971 {
972 rehash_using_reg (XEXP (x, 0));
973 changed = 1;
974 }
975
976 if (GET_CODE (XEXP (x, 1)) == REG
977 && ! REGNO_QTY_VALID_P (REGNO (XEXP (x, 1))))
978 if (insert_regs (XEXP (x, 1), NULL_PTR, 0))
979 {
980 rehash_using_reg (XEXP (x, 1));
981 changed = 1;
982 }
983 }
984
985 fmt = GET_RTX_FORMAT (code);
986 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
987 if (fmt[i] == 'e')
988 changed |= mention_regs (XEXP (x, i));
989 else if (fmt[i] == 'E')
990 for (j = 0; j < XVECLEN (x, i); j++)
991 changed |= mention_regs (XVECEXP (x, i, j));
992
993 return changed;
994 }
995
996 /* Update the register quantities for inserting X into the hash table
997 with a value equivalent to CLASSP.
998 (If the class does not contain a REG, it is irrelevant.)
999 If MODIFIED is nonzero, X is a destination; it is being modified.
1000 Note that delete_reg_equiv should be called on a register
1001 before insert_regs is done on that register with MODIFIED != 0.
1002
1003 Nonzero value means that elements of reg_qty have changed
1004 so X's hash code may be different. */
1005
1006 static int
1007 insert_regs (x, classp, modified)
1008 rtx x;
1009 struct table_elt *classp;
1010 int modified;
1011 {
1012 if (GET_CODE (x) == REG)
1013 {
1014 register int regno = REGNO (x);
1015
1016 /* If REGNO is in the equivalence table already but is of the
1017 wrong mode for that equivalence, don't do anything here. */
1018
1019 if (REGNO_QTY_VALID_P (regno)
1020 && qty_mode[reg_qty[regno]] != GET_MODE (x))
1021 return 0;
1022
1023 if (modified || ! REGNO_QTY_VALID_P (regno))
1024 {
1025 if (classp)
1026 for (classp = classp->first_same_value;
1027 classp != 0;
1028 classp = classp->next_same_value)
1029 if (GET_CODE (classp->exp) == REG
1030 && GET_MODE (classp->exp) == GET_MODE (x))
1031 {
1032 make_regs_eqv (regno, REGNO (classp->exp));
1033 return 1;
1034 }
1035
1036 make_new_qty (regno);
1037 qty_mode[reg_qty[regno]] = GET_MODE (x);
1038 return 1;
1039 }
1040
1041 return 0;
1042 }
1043
1044 /* If X is a SUBREG, we will likely be inserting the inner register in the
1045 table. If that register doesn't have an assigned quantity number at
1046 this point but does later, the insertion that we will be doing now will
1047 not be accessible because its hash code will have changed. So assign
1048 a quantity number now. */
1049
1050 else if (GET_CODE (x) == SUBREG && GET_CODE (SUBREG_REG (x)) == REG
1051 && ! REGNO_QTY_VALID_P (REGNO (SUBREG_REG (x))))
1052 {
1053 insert_regs (SUBREG_REG (x), NULL_PTR, 0);
1054 mention_regs (SUBREG_REG (x));
1055 return 1;
1056 }
1057 else
1058 return mention_regs (x);
1059 }
1060 \f
1061 /* Look in or update the hash table. */
1062
1063 /* Put the element ELT on the list of free elements. */
1064
1065 static void
1066 free_element (elt)
1067 struct table_elt *elt;
1068 {
1069 elt->next_same_hash = free_element_chain;
1070 free_element_chain = elt;
1071 }
1072
1073 /* Return an element that is free for use. */
1074
1075 static struct table_elt *
1076 get_element ()
1077 {
1078 struct table_elt *elt = free_element_chain;
1079 if (elt)
1080 {
1081 free_element_chain = elt->next_same_hash;
1082 return elt;
1083 }
1084 n_elements_made++;
1085 return (struct table_elt *) oballoc (sizeof (struct table_elt));
1086 }
1087
1088 /* Remove table element ELT from use in the table.
1089 HASH is its hash code, made using the HASH macro.
1090 It's an argument because often that is known in advance
1091 and we save much time not recomputing it. */
1092
1093 static void
1094 remove_from_table (elt, hash)
1095 register struct table_elt *elt;
1096 unsigned hash;
1097 {
1098 if (elt == 0)
1099 return;
1100
1101 /* Mark this element as removed. See cse_insn. */
1102 elt->first_same_value = 0;
1103
1104 /* Remove the table element from its equivalence class. */
1105
1106 {
1107 register struct table_elt *prev = elt->prev_same_value;
1108 register struct table_elt *next = elt->next_same_value;
1109
1110 if (next) next->prev_same_value = prev;
1111
1112 if (prev)
1113 prev->next_same_value = next;
1114 else
1115 {
1116 register struct table_elt *newfirst = next;
1117 while (next)
1118 {
1119 next->first_same_value = newfirst;
1120 next = next->next_same_value;
1121 }
1122 }
1123 }
1124
1125 /* Remove the table element from its hash bucket. */
1126
1127 {
1128 register struct table_elt *prev = elt->prev_same_hash;
1129 register struct table_elt *next = elt->next_same_hash;
1130
1131 if (next) next->prev_same_hash = prev;
1132
1133 if (prev)
1134 prev->next_same_hash = next;
1135 else if (table[hash] == elt)
1136 table[hash] = next;
1137 else
1138 {
1139 /* This entry is not in the proper hash bucket. This can happen
1140 when two classes were merged by `merge_equiv_classes'. Search
1141 for the hash bucket that it heads. This happens only very
1142 rarely, so the cost is acceptable. */
1143 for (hash = 0; hash < NBUCKETS; hash++)
1144 if (table[hash] == elt)
1145 table[hash] = next;
1146 }
1147 }
1148
1149 /* Remove the table element from its related-value circular chain. */
1150
1151 if (elt->related_value != 0 && elt->related_value != elt)
1152 {
1153 register struct table_elt *p = elt->related_value;
1154 while (p->related_value != elt)
1155 p = p->related_value;
1156 p->related_value = elt->related_value;
1157 if (p->related_value == p)
1158 p->related_value = 0;
1159 }
1160
1161 free_element (elt);
1162 }
1163
1164 /* Look up X in the hash table and return its table element,
1165 or 0 if X is not in the table.
1166
1167 MODE is the machine-mode of X, or if X is an integer constant
1168 with VOIDmode then MODE is the mode with which X will be used.
1169
1170 Here we are satisfied to find an expression whose tree structure
1171 looks like X. */
1172
1173 static struct table_elt *
1174 lookup (x, hash, mode)
1175 rtx x;
1176 unsigned hash;
1177 enum machine_mode mode;
1178 {
1179 register struct table_elt *p;
1180
1181 for (p = table[hash]; p; p = p->next_same_hash)
1182 if (mode == p->mode && ((x == p->exp && GET_CODE (x) == REG)
1183 || exp_equiv_p (x, p->exp, GET_CODE (x) != REG, 0)))
1184 return p;
1185
1186 return 0;
1187 }
1188
1189 /* Like `lookup' but don't care whether the table element uses invalid regs.
1190 Also ignore discrepancies in the machine mode of a register. */
1191
1192 static struct table_elt *
1193 lookup_for_remove (x, hash, mode)
1194 rtx x;
1195 unsigned hash;
1196 enum machine_mode mode;
1197 {
1198 register struct table_elt *p;
1199
1200 if (GET_CODE (x) == REG)
1201 {
1202 int regno = REGNO (x);
1203 /* Don't check the machine mode when comparing registers;
1204 invalidating (REG:SI 0) also invalidates (REG:DF 0). */
1205 for (p = table[hash]; p; p = p->next_same_hash)
1206 if (GET_CODE (p->exp) == REG
1207 && REGNO (p->exp) == regno)
1208 return p;
1209 }
1210 else
1211 {
1212 for (p = table[hash]; p; p = p->next_same_hash)
1213 if (mode == p->mode && (x == p->exp || exp_equiv_p (x, p->exp, 0, 0)))
1214 return p;
1215 }
1216
1217 return 0;
1218 }
1219
1220 /* Look for an expression equivalent to X and with code CODE.
1221 If one is found, return that expression. */
1222
1223 static rtx
1224 lookup_as_function (x, code)
1225 rtx x;
1226 enum rtx_code code;
1227 {
1228 register struct table_elt *p = lookup (x, safe_hash (x, VOIDmode) % NBUCKETS,
1229 GET_MODE (x));
1230 if (p == 0)
1231 return 0;
1232
1233 for (p = p->first_same_value; p; p = p->next_same_value)
1234 {
1235 if (GET_CODE (p->exp) == code
1236 /* Make sure this is a valid entry in the table. */
1237 && exp_equiv_p (p->exp, p->exp, 1, 0))
1238 return p->exp;
1239 }
1240
1241 return 0;
1242 }
1243
1244 /* Insert X in the hash table, assuming HASH is its hash code
1245 and CLASSP is an element of the class it should go in
1246 (or 0 if a new class should be made).
1247 It is inserted at the proper position to keep the class in
1248 the order cheapest first.
1249
1250 MODE is the machine-mode of X, or if X is an integer constant
1251 with VOIDmode then MODE is the mode with which X will be used.
1252
1253 For elements of equal cheapness, the most recent one
1254 goes in front, except that the first element in the list
1255 remains first unless a cheaper element is added. The order of
1256 pseudo-registers does not matter, as canon_reg will be called to
1257 find the cheapest when a register is retrieved from the table.
1258
1259 The in_memory field in the hash table element is set to 0.
1260 The caller must set it nonzero if appropriate.
1261
1262 You should call insert_regs (X, CLASSP, MODIFY) before calling here,
1263 and if insert_regs returns a nonzero value
1264 you must then recompute its hash code before calling here.
1265
1266 If necessary, update table showing constant values of quantities. */
1267
1268 #define CHEAPER(X,Y) ((X)->cost < (Y)->cost)
1269
1270 static struct table_elt *
1271 insert (x, classp, hash, mode)
1272 register rtx x;
1273 register struct table_elt *classp;
1274 unsigned hash;
1275 enum machine_mode mode;
1276 {
1277 register struct table_elt *elt;
1278
1279 /* If X is a register and we haven't made a quantity for it,
1280 something is wrong. */
1281 if (GET_CODE (x) == REG && ! REGNO_QTY_VALID_P (REGNO (x)))
1282 abort ();
1283
1284 /* If X is a hard register, show it is being put in the table. */
1285 if (GET_CODE (x) == REG && REGNO (x) < FIRST_PSEUDO_REGISTER)
1286 {
1287 int regno = REGNO (x);
1288 int endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (x));
1289 int i;
1290
1291 for (i = regno; i < endregno; i++)
1292 SET_HARD_REG_BIT (hard_regs_in_table, i);
1293 }
1294
1295 /* If X is a label, show we recorded it. */
1296 if (GET_CODE (x) == LABEL_REF
1297 || (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
1298 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF))
1299 recorded_label_ref = 1;
1300
1301 /* Put an element for X into the right hash bucket. */
1302
1303 elt = get_element ();
1304 elt->exp = x;
1305 elt->cost = COST (x);
1306 elt->next_same_value = 0;
1307 elt->prev_same_value = 0;
1308 elt->next_same_hash = table[hash];
1309 elt->prev_same_hash = 0;
1310 elt->related_value = 0;
1311 elt->in_memory = 0;
1312 elt->mode = mode;
1313 elt->is_const = (CONSTANT_P (x)
1314 /* GNU C++ takes advantage of this for `this'
1315 (and other const values). */
1316 || (RTX_UNCHANGING_P (x)
1317 && GET_CODE (x) == REG
1318 && REGNO (x) >= FIRST_PSEUDO_REGISTER)
1319 || FIXED_BASE_PLUS_P (x));
1320
1321 if (table[hash])
1322 table[hash]->prev_same_hash = elt;
1323 table[hash] = elt;
1324
1325 /* Put it into the proper value-class. */
1326 if (classp)
1327 {
1328 classp = classp->first_same_value;
1329 if (CHEAPER (elt, classp))
1330 /* Insert at the head of the class */
1331 {
1332 register struct table_elt *p;
1333 elt->next_same_value = classp;
1334 classp->prev_same_value = elt;
1335 elt->first_same_value = elt;
1336
1337 for (p = classp; p; p = p->next_same_value)
1338 p->first_same_value = elt;
1339 }
1340 else
1341 {
1342 /* Insert not at head of the class. */
1343 /* Put it after the last element cheaper than X. */
1344 register struct table_elt *p, *next;
1345 for (p = classp; (next = p->next_same_value) && CHEAPER (next, elt);
1346 p = next);
1347 /* Put it after P and before NEXT. */
1348 elt->next_same_value = next;
1349 if (next)
1350 next->prev_same_value = elt;
1351 elt->prev_same_value = p;
1352 p->next_same_value = elt;
1353 elt->first_same_value = classp;
1354 }
1355 }
1356 else
1357 elt->first_same_value = elt;
1358
1359 /* If this is a constant being set equivalent to a register or a register
1360 being set equivalent to a constant, note the constant equivalence.
1361
1362 If this is a constant, it cannot be equivalent to a different constant,
1363 and a constant is the only thing that can be cheaper than a register. So
1364 we know the register is the head of the class (before the constant was
1365 inserted).
1366
1367 If this is a register that is not already known equivalent to a
1368 constant, we must check the entire class.
1369
1370 If this is a register that is already known equivalent to an insn,
1371 update `qty_const_insn' to show that `this_insn' is the latest
1372 insn making that quantity equivalent to the constant. */
1373
1374 if (elt->is_const && classp && GET_CODE (classp->exp) == REG
1375 && GET_CODE (x) != REG)
1376 {
1377 qty_const[reg_qty[REGNO (classp->exp)]]
1378 = gen_lowpart_if_possible (qty_mode[reg_qty[REGNO (classp->exp)]], x);
1379 qty_const_insn[reg_qty[REGNO (classp->exp)]] = this_insn;
1380 }
1381
1382 else if (GET_CODE (x) == REG && classp && ! qty_const[reg_qty[REGNO (x)]]
1383 && ! elt->is_const)
1384 {
1385 register struct table_elt *p;
1386
1387 for (p = classp; p != 0; p = p->next_same_value)
1388 {
1389 if (p->is_const && GET_CODE (p->exp) != REG)
1390 {
1391 qty_const[reg_qty[REGNO (x)]]
1392 = gen_lowpart_if_possible (GET_MODE (x), p->exp);
1393 qty_const_insn[reg_qty[REGNO (x)]] = this_insn;
1394 break;
1395 }
1396 }
1397 }
1398
1399 else if (GET_CODE (x) == REG && qty_const[reg_qty[REGNO (x)]]
1400 && GET_MODE (x) == qty_mode[reg_qty[REGNO (x)]])
1401 qty_const_insn[reg_qty[REGNO (x)]] = this_insn;
1402
1403 /* If this is a constant with symbolic value,
1404 and it has a term with an explicit integer value,
1405 link it up with related expressions. */
1406 if (GET_CODE (x) == CONST)
1407 {
1408 rtx subexp = get_related_value (x);
1409 unsigned subhash;
1410 struct table_elt *subelt, *subelt_prev;
1411
1412 if (subexp != 0)
1413 {
1414 /* Get the integer-free subexpression in the hash table. */
1415 subhash = safe_hash (subexp, mode) % NBUCKETS;
1416 subelt = lookup (subexp, subhash, mode);
1417 if (subelt == 0)
1418 subelt = insert (subexp, NULL_PTR, subhash, mode);
1419 /* Initialize SUBELT's circular chain if it has none. */
1420 if (subelt->related_value == 0)
1421 subelt->related_value = subelt;
1422 /* Find the element in the circular chain that precedes SUBELT. */
1423 subelt_prev = subelt;
1424 while (subelt_prev->related_value != subelt)
1425 subelt_prev = subelt_prev->related_value;
1426 /* Put new ELT into SUBELT's circular chain just before SUBELT.
1427 This way the element that follows SUBELT is the oldest one. */
1428 elt->related_value = subelt_prev->related_value;
1429 subelt_prev->related_value = elt;
1430 }
1431 }
1432
1433 return elt;
1434 }
1435 \f
1436 /* Given two equivalence classes, CLASS1 and CLASS2, put all the entries from
1437 CLASS2 into CLASS1. This is done when we have reached an insn which makes
1438 the two classes equivalent.
1439
1440 CLASS1 will be the surviving class; CLASS2 should not be used after this
1441 call.
1442
1443 Any invalid entries in CLASS2 will not be copied. */
1444
1445 static void
1446 merge_equiv_classes (class1, class2)
1447 struct table_elt *class1, *class2;
1448 {
1449 struct table_elt *elt, *next, *new;
1450
1451 /* Ensure we start with the head of the classes. */
1452 class1 = class1->first_same_value;
1453 class2 = class2->first_same_value;
1454
1455 /* If they were already equal, forget it. */
1456 if (class1 == class2)
1457 return;
1458
1459 for (elt = class2; elt; elt = next)
1460 {
1461 unsigned hash;
1462 rtx exp = elt->exp;
1463 enum machine_mode mode = elt->mode;
1464
1465 next = elt->next_same_value;
1466
1467 /* Remove old entry, make a new one in CLASS1's class.
1468 Don't do this for invalid entries as we cannot find their
1469 hash code (it also isn't necessary). */
1470 if (GET_CODE (exp) == REG || exp_equiv_p (exp, exp, 1, 0))
1471 {
1472 hash_arg_in_memory = 0;
1473 hash_arg_in_struct = 0;
1474 hash = HASH (exp, mode);
1475
1476 if (GET_CODE (exp) == REG)
1477 delete_reg_equiv (REGNO (exp));
1478
1479 remove_from_table (elt, hash);
1480
1481 if (insert_regs (exp, class1, 0))
1482 {
1483 rehash_using_reg (exp);
1484 hash = HASH (exp, mode);
1485 }
1486 new = insert (exp, class1, hash, mode);
1487 new->in_memory = hash_arg_in_memory;
1488 new->in_struct = hash_arg_in_struct;
1489 }
1490 }
1491 }
1492 \f
1493 /* Remove from the hash table, or mark as invalid,
1494 all expressions whose values could be altered by storing in X.
1495 X is a register, a subreg, or a memory reference with nonvarying address
1496 (because, when a memory reference with a varying address is stored in,
1497 all memory references are removed by invalidate_memory
1498 so specific invalidation is superfluous).
1499 FULL_MODE, if not VOIDmode, indicates that this much should be invalidated
1500 instead of just the amount indicated by the mode of X. This is only used
1501 for bitfield stores into memory.
1502
1503 A nonvarying address may be just a register or just
1504 a symbol reference, or it may be either of those plus
1505 a numeric offset. */
1506
1507 static void
1508 invalidate (x, full_mode)
1509 rtx x;
1510 enum machine_mode full_mode;
1511 {
1512 register int i;
1513 register struct table_elt *p;
1514 rtx base;
1515 HOST_WIDE_INT start, end;
1516
1517 /* If X is a register, dependencies on its contents
1518 are recorded through the qty number mechanism.
1519 Just change the qty number of the register,
1520 mark it as invalid for expressions that refer to it,
1521 and remove it itself. */
1522
1523 if (GET_CODE (x) == REG)
1524 {
1525 register int regno = REGNO (x);
1526 register unsigned hash = HASH (x, GET_MODE (x));
1527
1528 /* Remove REGNO from any quantity list it might be on and indicate
1529 that it's value might have changed. If it is a pseudo, remove its
1530 entry from the hash table.
1531
1532 For a hard register, we do the first two actions above for any
1533 additional hard registers corresponding to X. Then, if any of these
1534 registers are in the table, we must remove any REG entries that
1535 overlap these registers. */
1536
1537 delete_reg_equiv (regno);
1538 reg_tick[regno]++;
1539
1540 if (regno >= FIRST_PSEUDO_REGISTER)
1541 {
1542 /* Because a register can be referenced in more than one mode,
1543 we might have to remove more than one table entry. */
1544
1545 struct table_elt *elt;
1546
1547 while (elt = lookup_for_remove (x, hash, GET_MODE (x)))
1548 remove_from_table (elt, hash);
1549 }
1550 else
1551 {
1552 HOST_WIDE_INT in_table
1553 = TEST_HARD_REG_BIT (hard_regs_in_table, regno);
1554 int endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (x));
1555 int tregno, tendregno;
1556 register struct table_elt *p, *next;
1557
1558 CLEAR_HARD_REG_BIT (hard_regs_in_table, regno);
1559
1560 for (i = regno + 1; i < endregno; i++)
1561 {
1562 in_table |= TEST_HARD_REG_BIT (hard_regs_in_table, i);
1563 CLEAR_HARD_REG_BIT (hard_regs_in_table, i);
1564 delete_reg_equiv (i);
1565 reg_tick[i]++;
1566 }
1567
1568 if (in_table)
1569 for (hash = 0; hash < NBUCKETS; hash++)
1570 for (p = table[hash]; p; p = next)
1571 {
1572 next = p->next_same_hash;
1573
1574 if (GET_CODE (p->exp) != REG
1575 || REGNO (p->exp) >= FIRST_PSEUDO_REGISTER)
1576 continue;
1577
1578 tregno = REGNO (p->exp);
1579 tendregno
1580 = tregno + HARD_REGNO_NREGS (tregno, GET_MODE (p->exp));
1581 if (tendregno > regno && tregno < endregno)
1582 remove_from_table (p, hash);
1583 }
1584 }
1585
1586 return;
1587 }
1588
1589 if (GET_CODE (x) == SUBREG)
1590 {
1591 if (GET_CODE (SUBREG_REG (x)) != REG)
1592 abort ();
1593 invalidate (SUBREG_REG (x), VOIDmode);
1594 return;
1595 }
1596
1597 /* X is not a register; it must be a memory reference with
1598 a nonvarying address. Remove all hash table elements
1599 that refer to overlapping pieces of memory. */
1600
1601 if (GET_CODE (x) != MEM)
1602 abort ();
1603
1604 if (full_mode == VOIDmode)
1605 full_mode = GET_MODE (x);
1606
1607 set_nonvarying_address_components (XEXP (x, 0), GET_MODE_SIZE (full_mode),
1608 &base, &start, &end);
1609
1610 for (i = 0; i < NBUCKETS; i++)
1611 {
1612 register struct table_elt *next;
1613 for (p = table[i]; p; p = next)
1614 {
1615 next = p->next_same_hash;
1616 if (refers_to_mem_p (p->exp, base, start, end))
1617 remove_from_table (p, i);
1618 }
1619 }
1620 }
1621
1622 /* Remove all expressions that refer to register REGNO,
1623 since they are already invalid, and we are about to
1624 mark that register valid again and don't want the old
1625 expressions to reappear as valid. */
1626
1627 static void
1628 remove_invalid_refs (regno)
1629 int regno;
1630 {
1631 register int i;
1632 register struct table_elt *p, *next;
1633
1634 for (i = 0; i < NBUCKETS; i++)
1635 for (p = table[i]; p; p = next)
1636 {
1637 next = p->next_same_hash;
1638 if (GET_CODE (p->exp) != REG
1639 && refers_to_regno_p (regno, regno + 1, p->exp, NULL_PTR))
1640 remove_from_table (p, i);
1641 }
1642 }
1643 \f
1644 /* Recompute the hash codes of any valid entries in the hash table that
1645 reference X, if X is a register, or SUBREG_REG (X) if X is a SUBREG.
1646
1647 This is called when we make a jump equivalence. */
1648
1649 static void
1650 rehash_using_reg (x)
1651 rtx x;
1652 {
1653 int i;
1654 struct table_elt *p, *next;
1655 unsigned hash;
1656
1657 if (GET_CODE (x) == SUBREG)
1658 x = SUBREG_REG (x);
1659
1660 /* If X is not a register or if the register is known not to be in any
1661 valid entries in the table, we have no work to do. */
1662
1663 if (GET_CODE (x) != REG
1664 || reg_in_table[REGNO (x)] < 0
1665 || reg_in_table[REGNO (x)] != reg_tick[REGNO (x)])
1666 return;
1667
1668 /* Scan all hash chains looking for valid entries that mention X.
1669 If we find one and it is in the wrong hash chain, move it. We can skip
1670 objects that are registers, since they are handled specially. */
1671
1672 for (i = 0; i < NBUCKETS; i++)
1673 for (p = table[i]; p; p = next)
1674 {
1675 next = p->next_same_hash;
1676 if (GET_CODE (p->exp) != REG && reg_mentioned_p (x, p->exp)
1677 && exp_equiv_p (p->exp, p->exp, 1, 0)
1678 && i != (hash = safe_hash (p->exp, p->mode) % NBUCKETS))
1679 {
1680 if (p->next_same_hash)
1681 p->next_same_hash->prev_same_hash = p->prev_same_hash;
1682
1683 if (p->prev_same_hash)
1684 p->prev_same_hash->next_same_hash = p->next_same_hash;
1685 else
1686 table[i] = p->next_same_hash;
1687
1688 p->next_same_hash = table[hash];
1689 p->prev_same_hash = 0;
1690 if (table[hash])
1691 table[hash]->prev_same_hash = p;
1692 table[hash] = p;
1693 }
1694 }
1695 }
1696 \f
1697 /* Remove from the hash table all expressions that reference memory,
1698 or some of them as specified by *WRITES. */
1699
1700 static void
1701 invalidate_memory (writes)
1702 struct write_data *writes;
1703 {
1704 register int i;
1705 register struct table_elt *p, *next;
1706 int all = writes->all;
1707 int nonscalar = writes->nonscalar;
1708
1709 for (i = 0; i < NBUCKETS; i++)
1710 for (p = table[i]; p; p = next)
1711 {
1712 next = p->next_same_hash;
1713 if (p->in_memory
1714 && (all
1715 || (nonscalar && p->in_struct)
1716 || cse_rtx_addr_varies_p (p->exp)))
1717 remove_from_table (p, i);
1718 }
1719 }
1720 \f
1721 /* Remove from the hash table any expression that is a call-clobbered
1722 register. Also update their TICK values. */
1723
1724 static void
1725 invalidate_for_call ()
1726 {
1727 int regno, endregno;
1728 int i;
1729 unsigned hash;
1730 struct table_elt *p, *next;
1731 int in_table = 0;
1732
1733 /* Go through all the hard registers. For each that is clobbered in
1734 a CALL_INSN, remove the register from quantity chains and update
1735 reg_tick if defined. Also see if any of these registers is currently
1736 in the table. */
1737
1738 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
1739 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
1740 {
1741 delete_reg_equiv (regno);
1742 if (reg_tick[regno] >= 0)
1743 reg_tick[regno]++;
1744
1745 in_table |= (TEST_HARD_REG_BIT (hard_regs_in_table, regno) != 0);
1746 }
1747
1748 /* In the case where we have no call-clobbered hard registers in the
1749 table, we are done. Otherwise, scan the table and remove any
1750 entry that overlaps a call-clobbered register. */
1751
1752 if (in_table)
1753 for (hash = 0; hash < NBUCKETS; hash++)
1754 for (p = table[hash]; p; p = next)
1755 {
1756 next = p->next_same_hash;
1757
1758 if (GET_CODE (p->exp) != REG
1759 || REGNO (p->exp) >= FIRST_PSEUDO_REGISTER)
1760 continue;
1761
1762 regno = REGNO (p->exp);
1763 endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (p->exp));
1764
1765 for (i = regno; i < endregno; i++)
1766 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
1767 {
1768 remove_from_table (p, hash);
1769 break;
1770 }
1771 }
1772 }
1773 \f
1774 /* Given an expression X of type CONST,
1775 and ELT which is its table entry (or 0 if it
1776 is not in the hash table),
1777 return an alternate expression for X as a register plus integer.
1778 If none can be found, return 0. */
1779
1780 static rtx
1781 use_related_value (x, elt)
1782 rtx x;
1783 struct table_elt *elt;
1784 {
1785 register struct table_elt *relt = 0;
1786 register struct table_elt *p, *q;
1787 HOST_WIDE_INT offset;
1788
1789 /* First, is there anything related known?
1790 If we have a table element, we can tell from that.
1791 Otherwise, must look it up. */
1792
1793 if (elt != 0 && elt->related_value != 0)
1794 relt = elt;
1795 else if (elt == 0 && GET_CODE (x) == CONST)
1796 {
1797 rtx subexp = get_related_value (x);
1798 if (subexp != 0)
1799 relt = lookup (subexp,
1800 safe_hash (subexp, GET_MODE (subexp)) % NBUCKETS,
1801 GET_MODE (subexp));
1802 }
1803
1804 if (relt == 0)
1805 return 0;
1806
1807 /* Search all related table entries for one that has an
1808 equivalent register. */
1809
1810 p = relt;
1811 while (1)
1812 {
1813 /* This loop is strange in that it is executed in two different cases.
1814 The first is when X is already in the table. Then it is searching
1815 the RELATED_VALUE list of X's class (RELT). The second case is when
1816 X is not in the table. Then RELT points to a class for the related
1817 value.
1818
1819 Ensure that, whatever case we are in, that we ignore classes that have
1820 the same value as X. */
1821
1822 if (rtx_equal_p (x, p->exp))
1823 q = 0;
1824 else
1825 for (q = p->first_same_value; q; q = q->next_same_value)
1826 if (GET_CODE (q->exp) == REG)
1827 break;
1828
1829 if (q)
1830 break;
1831
1832 p = p->related_value;
1833
1834 /* We went all the way around, so there is nothing to be found.
1835 Alternatively, perhaps RELT was in the table for some other reason
1836 and it has no related values recorded. */
1837 if (p == relt || p == 0)
1838 break;
1839 }
1840
1841 if (q == 0)
1842 return 0;
1843
1844 offset = (get_integer_term (x) - get_integer_term (p->exp));
1845 /* Note: OFFSET may be 0 if P->xexp and X are related by commutativity. */
1846 return plus_constant (q->exp, offset);
1847 }
1848 \f
1849 /* Hash an rtx. We are careful to make sure the value is never negative.
1850 Equivalent registers hash identically.
1851 MODE is used in hashing for CONST_INTs only;
1852 otherwise the mode of X is used.
1853
1854 Store 1 in do_not_record if any subexpression is volatile.
1855
1856 Store 1 in hash_arg_in_memory if X contains a MEM rtx
1857 which does not have the RTX_UNCHANGING_P bit set.
1858 In this case, also store 1 in hash_arg_in_struct
1859 if there is a MEM rtx which has the MEM_IN_STRUCT_P bit set.
1860
1861 Note that cse_insn knows that the hash code of a MEM expression
1862 is just (int) MEM plus the hash code of the address. */
1863
1864 static unsigned
1865 canon_hash (x, mode)
1866 rtx x;
1867 enum machine_mode mode;
1868 {
1869 register int i, j;
1870 register unsigned hash = 0;
1871 register enum rtx_code code;
1872 register char *fmt;
1873
1874 /* repeat is used to turn tail-recursion into iteration. */
1875 repeat:
1876 if (x == 0)
1877 return hash;
1878
1879 code = GET_CODE (x);
1880 switch (code)
1881 {
1882 case REG:
1883 {
1884 register int regno = REGNO (x);
1885
1886 /* On some machines, we can't record any non-fixed hard register,
1887 because extending its life will cause reload problems. We
1888 consider ap, fp, and sp to be fixed for this purpose.
1889 On all machines, we can't record any global registers. */
1890
1891 if (regno < FIRST_PSEUDO_REGISTER
1892 && (global_regs[regno]
1893 #ifdef SMALL_REGISTER_CLASSES
1894 || (! fixed_regs[regno]
1895 && regno != FRAME_POINTER_REGNUM
1896 && regno != HARD_FRAME_POINTER_REGNUM
1897 && regno != ARG_POINTER_REGNUM
1898 && regno != STACK_POINTER_REGNUM)
1899 #endif
1900 ))
1901 {
1902 do_not_record = 1;
1903 return 0;
1904 }
1905 hash += ((unsigned) REG << 7) + (unsigned) reg_qty[regno];
1906 return hash;
1907 }
1908
1909 case CONST_INT:
1910 {
1911 unsigned HOST_WIDE_INT tem = INTVAL (x);
1912 hash += ((unsigned) CONST_INT << 7) + (unsigned) mode + tem;
1913 return hash;
1914 }
1915
1916 case CONST_DOUBLE:
1917 /* This is like the general case, except that it only counts
1918 the integers representing the constant. */
1919 hash += (unsigned) code + (unsigned) GET_MODE (x);
1920 if (GET_MODE (x) != VOIDmode)
1921 for (i = 2; i < GET_RTX_LENGTH (CONST_DOUBLE); i++)
1922 {
1923 unsigned tem = XINT (x, i);
1924 hash += tem;
1925 }
1926 else
1927 hash += ((unsigned) CONST_DOUBLE_LOW (x)
1928 + (unsigned) CONST_DOUBLE_HIGH (x));
1929 return hash;
1930
1931 /* Assume there is only one rtx object for any given label. */
1932 case LABEL_REF:
1933 hash
1934 += ((unsigned) LABEL_REF << 7) + (unsigned HOST_WIDE_INT) XEXP (x, 0);
1935 return hash;
1936
1937 case SYMBOL_REF:
1938 hash
1939 += ((unsigned) SYMBOL_REF << 7) + (unsigned HOST_WIDE_INT) XSTR (x, 0);
1940 return hash;
1941
1942 case MEM:
1943 if (MEM_VOLATILE_P (x))
1944 {
1945 do_not_record = 1;
1946 return 0;
1947 }
1948 if (! RTX_UNCHANGING_P (x))
1949 {
1950 hash_arg_in_memory = 1;
1951 if (MEM_IN_STRUCT_P (x)) hash_arg_in_struct = 1;
1952 }
1953 /* Now that we have already found this special case,
1954 might as well speed it up as much as possible. */
1955 hash += (unsigned) MEM;
1956 x = XEXP (x, 0);
1957 goto repeat;
1958
1959 case PRE_DEC:
1960 case PRE_INC:
1961 case POST_DEC:
1962 case POST_INC:
1963 case PC:
1964 case CC0:
1965 case CALL:
1966 case UNSPEC_VOLATILE:
1967 do_not_record = 1;
1968 return 0;
1969
1970 case ASM_OPERANDS:
1971 if (MEM_VOLATILE_P (x))
1972 {
1973 do_not_record = 1;
1974 return 0;
1975 }
1976 }
1977
1978 i = GET_RTX_LENGTH (code) - 1;
1979 hash += (unsigned) code + (unsigned) GET_MODE (x);
1980 fmt = GET_RTX_FORMAT (code);
1981 for (; i >= 0; i--)
1982 {
1983 if (fmt[i] == 'e')
1984 {
1985 rtx tem = XEXP (x, i);
1986
1987 /* If we are about to do the last recursive call
1988 needed at this level, change it into iteration.
1989 This function is called enough to be worth it. */
1990 if (i == 0)
1991 {
1992 x = tem;
1993 goto repeat;
1994 }
1995 hash += canon_hash (tem, 0);
1996 }
1997 else if (fmt[i] == 'E')
1998 for (j = 0; j < XVECLEN (x, i); j++)
1999 hash += canon_hash (XVECEXP (x, i, j), 0);
2000 else if (fmt[i] == 's')
2001 {
2002 register unsigned char *p = (unsigned char *) XSTR (x, i);
2003 if (p)
2004 while (*p)
2005 hash += *p++;
2006 }
2007 else if (fmt[i] == 'i')
2008 {
2009 register unsigned tem = XINT (x, i);
2010 hash += tem;
2011 }
2012 else
2013 abort ();
2014 }
2015 return hash;
2016 }
2017
2018 /* Like canon_hash but with no side effects. */
2019
2020 static unsigned
2021 safe_hash (x, mode)
2022 rtx x;
2023 enum machine_mode mode;
2024 {
2025 int save_do_not_record = do_not_record;
2026 int save_hash_arg_in_memory = hash_arg_in_memory;
2027 int save_hash_arg_in_struct = hash_arg_in_struct;
2028 unsigned hash = canon_hash (x, mode);
2029 hash_arg_in_memory = save_hash_arg_in_memory;
2030 hash_arg_in_struct = save_hash_arg_in_struct;
2031 do_not_record = save_do_not_record;
2032 return hash;
2033 }
2034 \f
2035 /* Return 1 iff X and Y would canonicalize into the same thing,
2036 without actually constructing the canonicalization of either one.
2037 If VALIDATE is nonzero,
2038 we assume X is an expression being processed from the rtl
2039 and Y was found in the hash table. We check register refs
2040 in Y for being marked as valid.
2041
2042 If EQUAL_VALUES is nonzero, we allow a register to match a constant value
2043 that is known to be in the register. Ordinarily, we don't allow them
2044 to match, because letting them match would cause unpredictable results
2045 in all the places that search a hash table chain for an equivalent
2046 for a given value. A possible equivalent that has different structure
2047 has its hash code computed from different data. Whether the hash code
2048 is the same as that of the the given value is pure luck. */
2049
2050 static int
2051 exp_equiv_p (x, y, validate, equal_values)
2052 rtx x, y;
2053 int validate;
2054 int equal_values;
2055 {
2056 register int i, j;
2057 register enum rtx_code code;
2058 register char *fmt;
2059
2060 /* Note: it is incorrect to assume an expression is equivalent to itself
2061 if VALIDATE is nonzero. */
2062 if (x == y && !validate)
2063 return 1;
2064 if (x == 0 || y == 0)
2065 return x == y;
2066
2067 code = GET_CODE (x);
2068 if (code != GET_CODE (y))
2069 {
2070 if (!equal_values)
2071 return 0;
2072
2073 /* If X is a constant and Y is a register or vice versa, they may be
2074 equivalent. We only have to validate if Y is a register. */
2075 if (CONSTANT_P (x) && GET_CODE (y) == REG
2076 && REGNO_QTY_VALID_P (REGNO (y))
2077 && GET_MODE (y) == qty_mode[reg_qty[REGNO (y)]]
2078 && rtx_equal_p (x, qty_const[reg_qty[REGNO (y)]])
2079 && (! validate || reg_in_table[REGNO (y)] == reg_tick[REGNO (y)]))
2080 return 1;
2081
2082 if (CONSTANT_P (y) && code == REG
2083 && REGNO_QTY_VALID_P (REGNO (x))
2084 && GET_MODE (x) == qty_mode[reg_qty[REGNO (x)]]
2085 && rtx_equal_p (y, qty_const[reg_qty[REGNO (x)]]))
2086 return 1;
2087
2088 return 0;
2089 }
2090
2091 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent. */
2092 if (GET_MODE (x) != GET_MODE (y))
2093 return 0;
2094
2095 switch (code)
2096 {
2097 case PC:
2098 case CC0:
2099 return x == y;
2100
2101 case CONST_INT:
2102 return INTVAL (x) == INTVAL (y);
2103
2104 case LABEL_REF:
2105 return XEXP (x, 0) == XEXP (y, 0);
2106
2107 case SYMBOL_REF:
2108 return XSTR (x, 0) == XSTR (y, 0);
2109
2110 case REG:
2111 {
2112 int regno = REGNO (y);
2113 int endregno
2114 = regno + (regno >= FIRST_PSEUDO_REGISTER ? 1
2115 : HARD_REGNO_NREGS (regno, GET_MODE (y)));
2116 int i;
2117
2118 /* If the quantities are not the same, the expressions are not
2119 equivalent. If there are and we are not to validate, they
2120 are equivalent. Otherwise, ensure all regs are up-to-date. */
2121
2122 if (reg_qty[REGNO (x)] != reg_qty[regno])
2123 return 0;
2124
2125 if (! validate)
2126 return 1;
2127
2128 for (i = regno; i < endregno; i++)
2129 if (reg_in_table[i] != reg_tick[i])
2130 return 0;
2131
2132 return 1;
2133 }
2134
2135 /* For commutative operations, check both orders. */
2136 case PLUS:
2137 case MULT:
2138 case AND:
2139 case IOR:
2140 case XOR:
2141 case NE:
2142 case EQ:
2143 return ((exp_equiv_p (XEXP (x, 0), XEXP (y, 0), validate, equal_values)
2144 && exp_equiv_p (XEXP (x, 1), XEXP (y, 1),
2145 validate, equal_values))
2146 || (exp_equiv_p (XEXP (x, 0), XEXP (y, 1),
2147 validate, equal_values)
2148 && exp_equiv_p (XEXP (x, 1), XEXP (y, 0),
2149 validate, equal_values)));
2150 }
2151
2152 /* Compare the elements. If any pair of corresponding elements
2153 fail to match, return 0 for the whole things. */
2154
2155 fmt = GET_RTX_FORMAT (code);
2156 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2157 {
2158 switch (fmt[i])
2159 {
2160 case 'e':
2161 if (! exp_equiv_p (XEXP (x, i), XEXP (y, i), validate, equal_values))
2162 return 0;
2163 break;
2164
2165 case 'E':
2166 if (XVECLEN (x, i) != XVECLEN (y, i))
2167 return 0;
2168 for (j = 0; j < XVECLEN (x, i); j++)
2169 if (! exp_equiv_p (XVECEXP (x, i, j), XVECEXP (y, i, j),
2170 validate, equal_values))
2171 return 0;
2172 break;
2173
2174 case 's':
2175 if (strcmp (XSTR (x, i), XSTR (y, i)))
2176 return 0;
2177 break;
2178
2179 case 'i':
2180 if (XINT (x, i) != XINT (y, i))
2181 return 0;
2182 break;
2183
2184 case 'w':
2185 if (XWINT (x, i) != XWINT (y, i))
2186 return 0;
2187 break;
2188
2189 case '0':
2190 break;
2191
2192 default:
2193 abort ();
2194 }
2195 }
2196
2197 return 1;
2198 }
2199 \f
2200 /* Return 1 iff any subexpression of X matches Y.
2201 Here we do not require that X or Y be valid (for registers referred to)
2202 for being in the hash table. */
2203
2204 static int
2205 refers_to_p (x, y)
2206 rtx x, y;
2207 {
2208 register int i;
2209 register enum rtx_code code;
2210 register char *fmt;
2211
2212 repeat:
2213 if (x == y)
2214 return 1;
2215 if (x == 0 || y == 0)
2216 return 0;
2217
2218 code = GET_CODE (x);
2219 /* If X as a whole has the same code as Y, they may match.
2220 If so, return 1. */
2221 if (code == GET_CODE (y))
2222 {
2223 if (exp_equiv_p (x, y, 0, 1))
2224 return 1;
2225 }
2226
2227 /* X does not match, so try its subexpressions. */
2228
2229 fmt = GET_RTX_FORMAT (code);
2230 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2231 if (fmt[i] == 'e')
2232 {
2233 if (i == 0)
2234 {
2235 x = XEXP (x, 0);
2236 goto repeat;
2237 }
2238 else
2239 if (refers_to_p (XEXP (x, i), y))
2240 return 1;
2241 }
2242 else if (fmt[i] == 'E')
2243 {
2244 int j;
2245 for (j = 0; j < XVECLEN (x, i); j++)
2246 if (refers_to_p (XVECEXP (x, i, j), y))
2247 return 1;
2248 }
2249
2250 return 0;
2251 }
2252 \f
2253 /* Given ADDR and SIZE (a memory address, and the size of the memory reference),
2254 set PBASE, PSTART, and PEND which correspond to the base of the address,
2255 the starting offset, and ending offset respectively.
2256
2257 ADDR is known to be a nonvarying address. */
2258
2259 /* ??? Despite what the comments say, this function is in fact frequently
2260 passed varying addresses. This does not appear to cause any problems. */
2261
2262 static void
2263 set_nonvarying_address_components (addr, size, pbase, pstart, pend)
2264 rtx addr;
2265 int size;
2266 rtx *pbase;
2267 HOST_WIDE_INT *pstart, *pend;
2268 {
2269 rtx base;
2270 HOST_WIDE_INT start, end;
2271
2272 base = addr;
2273 start = 0;
2274 end = 0;
2275
2276 /* Registers with nonvarying addresses usually have constant equivalents;
2277 but the frame pointer register is also possible. */
2278 if (GET_CODE (base) == REG
2279 && qty_const != 0
2280 && REGNO_QTY_VALID_P (REGNO (base))
2281 && qty_mode[reg_qty[REGNO (base)]] == GET_MODE (base)
2282 && qty_const[reg_qty[REGNO (base)]] != 0)
2283 base = qty_const[reg_qty[REGNO (base)]];
2284 else if (GET_CODE (base) == PLUS
2285 && GET_CODE (XEXP (base, 1)) == CONST_INT
2286 && GET_CODE (XEXP (base, 0)) == REG
2287 && qty_const != 0
2288 && REGNO_QTY_VALID_P (REGNO (XEXP (base, 0)))
2289 && (qty_mode[reg_qty[REGNO (XEXP (base, 0))]]
2290 == GET_MODE (XEXP (base, 0)))
2291 && qty_const[reg_qty[REGNO (XEXP (base, 0))]])
2292 {
2293 start = INTVAL (XEXP (base, 1));
2294 base = qty_const[reg_qty[REGNO (XEXP (base, 0))]];
2295 }
2296 /* This can happen as the result of virtual register instantiation,
2297 if the initial offset is too large to be a valid address. */
2298 else if (GET_CODE (base) == PLUS
2299 && GET_CODE (XEXP (base, 0)) == REG
2300 && GET_CODE (XEXP (base, 1)) == REG
2301 && qty_const != 0
2302 && REGNO_QTY_VALID_P (REGNO (XEXP (base, 0)))
2303 && (qty_mode[reg_qty[REGNO (XEXP (base, 0))]]
2304 == GET_MODE (XEXP (base, 0)))
2305 && qty_const[reg_qty[REGNO (XEXP (base, 0))]]
2306 && REGNO_QTY_VALID_P (REGNO (XEXP (base, 1)))
2307 && (qty_mode[reg_qty[REGNO (XEXP (base, 1))]]
2308 == GET_MODE (XEXP (base, 1)))
2309 && qty_const[reg_qty[REGNO (XEXP (base, 1))]])
2310 {
2311 rtx tem = qty_const[reg_qty[REGNO (XEXP (base, 1))]];
2312 base = qty_const[reg_qty[REGNO (XEXP (base, 0))]];
2313
2314 /* One of the two values must be a constant. */
2315 if (GET_CODE (base) != CONST_INT)
2316 {
2317 if (GET_CODE (tem) != CONST_INT)
2318 abort ();
2319 start = INTVAL (tem);
2320 }
2321 else
2322 {
2323 start = INTVAL (base);
2324 base = tem;
2325 }
2326 }
2327
2328 /* Handle everything that we can find inside an address that has been
2329 viewed as constant. */
2330
2331 while (1)
2332 {
2333 /* If no part of this switch does a "continue", the code outside
2334 will exit this loop. */
2335
2336 switch (GET_CODE (base))
2337 {
2338 case LO_SUM:
2339 /* By definition, operand1 of a LO_SUM is the associated constant
2340 address. Use the associated constant address as the base
2341 instead. */
2342 base = XEXP (base, 1);
2343 continue;
2344
2345 case CONST:
2346 /* Strip off CONST. */
2347 base = XEXP (base, 0);
2348 continue;
2349
2350 case PLUS:
2351 if (GET_CODE (XEXP (base, 1)) == CONST_INT)
2352 {
2353 start += INTVAL (XEXP (base, 1));
2354 base = XEXP (base, 0);
2355 continue;
2356 }
2357 break;
2358
2359 case AND:
2360 /* Handle the case of an AND which is the negative of a power of
2361 two. This is used to represent unaligned memory operations. */
2362 if (GET_CODE (XEXP (base, 1)) == CONST_INT
2363 && exact_log2 (- INTVAL (XEXP (base, 1))) > 0)
2364 {
2365 set_nonvarying_address_components (XEXP (base, 0), size,
2366 pbase, pstart, pend);
2367
2368 /* Assume the worst misalignment. START is affected, but not
2369 END, so compensate but adjusting SIZE. Don't lose any
2370 constant we already had. */
2371
2372 size = *pend - *pstart - INTVAL (XEXP (base, 1)) - 1;
2373 start += *pstart + INTVAL (XEXP (base, 1)) + 1;
2374 end += *pend;
2375 base = *pbase;
2376 }
2377 break;
2378 }
2379
2380 break;
2381 }
2382
2383 if (GET_CODE (base) == CONST_INT)
2384 {
2385 start += INTVAL (base);
2386 base = const0_rtx;
2387 }
2388
2389 end = start + size;
2390
2391 /* Set the return values. */
2392 *pbase = base;
2393 *pstart = start;
2394 *pend = end;
2395 }
2396
2397 /* Return 1 iff any subexpression of X refers to memory
2398 at an address of BASE plus some offset
2399 such that any of the bytes' offsets fall between START (inclusive)
2400 and END (exclusive).
2401
2402 The value is undefined if X is a varying address (as determined by
2403 cse_rtx_addr_varies_p). This function is not used in such cases.
2404
2405 When used in the cse pass, `qty_const' is nonzero, and it is used
2406 to treat an address that is a register with a known constant value
2407 as if it were that constant value.
2408 In the loop pass, `qty_const' is zero, so this is not done. */
2409
2410 static int
2411 refers_to_mem_p (x, base, start, end)
2412 rtx x, base;
2413 HOST_WIDE_INT start, end;
2414 {
2415 register HOST_WIDE_INT i;
2416 register enum rtx_code code;
2417 register char *fmt;
2418
2419 repeat:
2420 if (x == 0)
2421 return 0;
2422
2423 code = GET_CODE (x);
2424 if (code == MEM)
2425 {
2426 register rtx addr = XEXP (x, 0); /* Get the address. */
2427 rtx mybase;
2428 HOST_WIDE_INT mystart, myend;
2429
2430 set_nonvarying_address_components (addr, GET_MODE_SIZE (GET_MODE (x)),
2431 &mybase, &mystart, &myend);
2432
2433
2434 /* refers_to_mem_p is never called with varying addresses.
2435 If the base addresses are not equal, there is no chance
2436 of the memory addresses conflicting. */
2437 if (! rtx_equal_p (mybase, base))
2438 return 0;
2439
2440 return myend > start && mystart < end;
2441 }
2442
2443 /* X does not match, so try its subexpressions. */
2444
2445 fmt = GET_RTX_FORMAT (code);
2446 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2447 if (fmt[i] == 'e')
2448 {
2449 if (i == 0)
2450 {
2451 x = XEXP (x, 0);
2452 goto repeat;
2453 }
2454 else
2455 if (refers_to_mem_p (XEXP (x, i), base, start, end))
2456 return 1;
2457 }
2458 else if (fmt[i] == 'E')
2459 {
2460 int j;
2461 for (j = 0; j < XVECLEN (x, i); j++)
2462 if (refers_to_mem_p (XVECEXP (x, i, j), base, start, end))
2463 return 1;
2464 }
2465
2466 return 0;
2467 }
2468
2469 /* Nonzero if X refers to memory at a varying address;
2470 except that a register which has at the moment a known constant value
2471 isn't considered variable. */
2472
2473 static int
2474 cse_rtx_addr_varies_p (x)
2475 rtx x;
2476 {
2477 /* We need not check for X and the equivalence class being of the same
2478 mode because if X is equivalent to a constant in some mode, it
2479 doesn't vary in any mode. */
2480
2481 if (GET_CODE (x) == MEM
2482 && GET_CODE (XEXP (x, 0)) == REG
2483 && REGNO_QTY_VALID_P (REGNO (XEXP (x, 0)))
2484 && GET_MODE (XEXP (x, 0)) == qty_mode[reg_qty[REGNO (XEXP (x, 0))]]
2485 && qty_const[reg_qty[REGNO (XEXP (x, 0))]] != 0)
2486 return 0;
2487
2488 if (GET_CODE (x) == MEM
2489 && GET_CODE (XEXP (x, 0)) == PLUS
2490 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2491 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
2492 && REGNO_QTY_VALID_P (REGNO (XEXP (XEXP (x, 0), 0)))
2493 && (GET_MODE (XEXP (XEXP (x, 0), 0))
2494 == qty_mode[reg_qty[REGNO (XEXP (XEXP (x, 0), 0))]])
2495 && qty_const[reg_qty[REGNO (XEXP (XEXP (x, 0), 0))]])
2496 return 0;
2497
2498 /* This can happen as the result of virtual register instantiation, if
2499 the initial constant is too large to be a valid address. This gives
2500 us a three instruction sequence, load large offset into a register,
2501 load fp minus a constant into a register, then a MEM which is the
2502 sum of the two `constant' registers. */
2503 if (GET_CODE (x) == MEM
2504 && GET_CODE (XEXP (x, 0)) == PLUS
2505 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
2506 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG
2507 && REGNO_QTY_VALID_P (REGNO (XEXP (XEXP (x, 0), 0)))
2508 && (GET_MODE (XEXP (XEXP (x, 0), 0))
2509 == qty_mode[reg_qty[REGNO (XEXP (XEXP (x, 0), 0))]])
2510 && qty_const[reg_qty[REGNO (XEXP (XEXP (x, 0), 0))]]
2511 && REGNO_QTY_VALID_P (REGNO (XEXP (XEXP (x, 0), 1)))
2512 && (GET_MODE (XEXP (XEXP (x, 0), 1))
2513 == qty_mode[reg_qty[REGNO (XEXP (XEXP (x, 0), 1))]])
2514 && qty_const[reg_qty[REGNO (XEXP (XEXP (x, 0), 1))]])
2515 return 0;
2516
2517 return rtx_addr_varies_p (x);
2518 }
2519 \f
2520 /* Canonicalize an expression:
2521 replace each register reference inside it
2522 with the "oldest" equivalent register.
2523
2524 If INSN is non-zero and we are replacing a pseudo with a hard register
2525 or vice versa, validate_change is used to ensure that INSN remains valid
2526 after we make our substitution. The calls are made with IN_GROUP non-zero
2527 so apply_change_group must be called upon the outermost return from this
2528 function (unless INSN is zero). The result of apply_change_group can
2529 generally be discarded since the changes we are making are optional. */
2530
2531 static rtx
2532 canon_reg (x, insn)
2533 rtx x;
2534 rtx insn;
2535 {
2536 register int i;
2537 register enum rtx_code code;
2538 register char *fmt;
2539
2540 if (x == 0)
2541 return x;
2542
2543 code = GET_CODE (x);
2544 switch (code)
2545 {
2546 case PC:
2547 case CC0:
2548 case CONST:
2549 case CONST_INT:
2550 case CONST_DOUBLE:
2551 case SYMBOL_REF:
2552 case LABEL_REF:
2553 case ADDR_VEC:
2554 case ADDR_DIFF_VEC:
2555 return x;
2556
2557 case REG:
2558 {
2559 register int first;
2560
2561 /* Never replace a hard reg, because hard regs can appear
2562 in more than one machine mode, and we must preserve the mode
2563 of each occurrence. Also, some hard regs appear in
2564 MEMs that are shared and mustn't be altered. Don't try to
2565 replace any reg that maps to a reg of class NO_REGS. */
2566 if (REGNO (x) < FIRST_PSEUDO_REGISTER
2567 || ! REGNO_QTY_VALID_P (REGNO (x)))
2568 return x;
2569
2570 first = qty_first_reg[reg_qty[REGNO (x)]];
2571 return (first >= FIRST_PSEUDO_REGISTER ? regno_reg_rtx[first]
2572 : REGNO_REG_CLASS (first) == NO_REGS ? x
2573 : gen_rtx (REG, qty_mode[reg_qty[REGNO (x)]], first));
2574 }
2575 }
2576
2577 fmt = GET_RTX_FORMAT (code);
2578 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2579 {
2580 register int j;
2581
2582 if (fmt[i] == 'e')
2583 {
2584 rtx new = canon_reg (XEXP (x, i), insn);
2585
2586 /* If replacing pseudo with hard reg or vice versa, ensure the
2587 insn remains valid. Likewise if the insn has MATCH_DUPs. */
2588 if (insn != 0 && new != 0
2589 && GET_CODE (new) == REG && GET_CODE (XEXP (x, i)) == REG
2590 && (((REGNO (new) < FIRST_PSEUDO_REGISTER)
2591 != (REGNO (XEXP (x, i)) < FIRST_PSEUDO_REGISTER))
2592 || insn_n_dups[recog_memoized (insn)] > 0))
2593 validate_change (insn, &XEXP (x, i), new, 1);
2594 else
2595 XEXP (x, i) = new;
2596 }
2597 else if (fmt[i] == 'E')
2598 for (j = 0; j < XVECLEN (x, i); j++)
2599 XVECEXP (x, i, j) = canon_reg (XVECEXP (x, i, j), insn);
2600 }
2601
2602 return x;
2603 }
2604 \f
2605 /* LOC is a location with INSN that is an operand address (the contents of
2606 a MEM). Find the best equivalent address to use that is valid for this
2607 insn.
2608
2609 On most CISC machines, complicated address modes are costly, and rtx_cost
2610 is a good approximation for that cost. However, most RISC machines have
2611 only a few (usually only one) memory reference formats. If an address is
2612 valid at all, it is often just as cheap as any other address. Hence, for
2613 RISC machines, we use the configuration macro `ADDRESS_COST' to compare the
2614 costs of various addresses. For two addresses of equal cost, choose the one
2615 with the highest `rtx_cost' value as that has the potential of eliminating
2616 the most insns. For equal costs, we choose the first in the equivalence
2617 class. Note that we ignore the fact that pseudo registers are cheaper
2618 than hard registers here because we would also prefer the pseudo registers.
2619 */
2620
2621 static void
2622 find_best_addr (insn, loc)
2623 rtx insn;
2624 rtx *loc;
2625 {
2626 struct table_elt *elt, *p;
2627 rtx addr = *loc;
2628 int our_cost;
2629 int found_better = 1;
2630 int save_do_not_record = do_not_record;
2631 int save_hash_arg_in_memory = hash_arg_in_memory;
2632 int save_hash_arg_in_struct = hash_arg_in_struct;
2633 int addr_volatile;
2634 int regno;
2635 unsigned hash;
2636
2637 /* Do not try to replace constant addresses or addresses of local and
2638 argument slots. These MEM expressions are made only once and inserted
2639 in many instructions, as well as being used to control symbol table
2640 output. It is not safe to clobber them.
2641
2642 There are some uncommon cases where the address is already in a register
2643 for some reason, but we cannot take advantage of that because we have
2644 no easy way to unshare the MEM. In addition, looking up all stack
2645 addresses is costly. */
2646 if ((GET_CODE (addr) == PLUS
2647 && GET_CODE (XEXP (addr, 0)) == REG
2648 && GET_CODE (XEXP (addr, 1)) == CONST_INT
2649 && (regno = REGNO (XEXP (addr, 0)),
2650 regno == FRAME_POINTER_REGNUM || regno == HARD_FRAME_POINTER_REGNUM
2651 || regno == ARG_POINTER_REGNUM))
2652 || (GET_CODE (addr) == REG
2653 && (regno = REGNO (addr), regno == FRAME_POINTER_REGNUM
2654 || regno == HARD_FRAME_POINTER_REGNUM
2655 || regno == ARG_POINTER_REGNUM))
2656 || CONSTANT_ADDRESS_P (addr))
2657 return;
2658
2659 /* If this address is not simply a register, try to fold it. This will
2660 sometimes simplify the expression. Many simplifications
2661 will not be valid, but some, usually applying the associative rule, will
2662 be valid and produce better code. */
2663 if (GET_CODE (addr) != REG)
2664 {
2665 rtx folded = fold_rtx (copy_rtx (addr), NULL_RTX);
2666
2667 if (1
2668 #ifdef ADDRESS_COST
2669 && (ADDRESS_COST (folded) < ADDRESS_COST (addr)
2670 || (ADDRESS_COST (folded) == ADDRESS_COST (addr)
2671 && rtx_cost (folded) > rtx_cost (addr)))
2672 #else
2673 && rtx_cost (folded) < rtx_cost (addr)
2674 #endif
2675 && validate_change (insn, loc, folded, 0))
2676 addr = folded;
2677 }
2678
2679 /* If this address is not in the hash table, we can't look for equivalences
2680 of the whole address. Also, ignore if volatile. */
2681
2682 do_not_record = 0;
2683 hash = HASH (addr, Pmode);
2684 addr_volatile = do_not_record;
2685 do_not_record = save_do_not_record;
2686 hash_arg_in_memory = save_hash_arg_in_memory;
2687 hash_arg_in_struct = save_hash_arg_in_struct;
2688
2689 if (addr_volatile)
2690 return;
2691
2692 elt = lookup (addr, hash, Pmode);
2693
2694 #ifndef ADDRESS_COST
2695 if (elt)
2696 {
2697 our_cost = elt->cost;
2698
2699 /* Find the lowest cost below ours that works. */
2700 for (elt = elt->first_same_value; elt; elt = elt->next_same_value)
2701 if (elt->cost < our_cost
2702 && (GET_CODE (elt->exp) == REG
2703 || exp_equiv_p (elt->exp, elt->exp, 1, 0))
2704 && validate_change (insn, loc,
2705 canon_reg (copy_rtx (elt->exp), NULL_RTX), 0))
2706 return;
2707 }
2708 #else
2709
2710 if (elt)
2711 {
2712 /* We need to find the best (under the criteria documented above) entry
2713 in the class that is valid. We use the `flag' field to indicate
2714 choices that were invalid and iterate until we can't find a better
2715 one that hasn't already been tried. */
2716
2717 for (p = elt->first_same_value; p; p = p->next_same_value)
2718 p->flag = 0;
2719
2720 while (found_better)
2721 {
2722 int best_addr_cost = ADDRESS_COST (*loc);
2723 int best_rtx_cost = (elt->cost + 1) >> 1;
2724 struct table_elt *best_elt = elt;
2725
2726 found_better = 0;
2727 for (p = elt->first_same_value; p; p = p->next_same_value)
2728 if (! p->flag
2729 && (GET_CODE (p->exp) == REG
2730 || exp_equiv_p (p->exp, p->exp, 1, 0))
2731 && (ADDRESS_COST (p->exp) < best_addr_cost
2732 || (ADDRESS_COST (p->exp) == best_addr_cost
2733 && (p->cost + 1) >> 1 > best_rtx_cost)))
2734 {
2735 found_better = 1;
2736 best_addr_cost = ADDRESS_COST (p->exp);
2737 best_rtx_cost = (p->cost + 1) >> 1;
2738 best_elt = p;
2739 }
2740
2741 if (found_better)
2742 {
2743 if (validate_change (insn, loc,
2744 canon_reg (copy_rtx (best_elt->exp),
2745 NULL_RTX), 0))
2746 return;
2747 else
2748 best_elt->flag = 1;
2749 }
2750 }
2751 }
2752
2753 /* If the address is a binary operation with the first operand a register
2754 and the second a constant, do the same as above, but looking for
2755 equivalences of the register. Then try to simplify before checking for
2756 the best address to use. This catches a few cases: First is when we
2757 have REG+const and the register is another REG+const. We can often merge
2758 the constants and eliminate one insn and one register. It may also be
2759 that a machine has a cheap REG+REG+const. Finally, this improves the
2760 code on the Alpha for unaligned byte stores. */
2761
2762 if (flag_expensive_optimizations
2763 && (GET_RTX_CLASS (GET_CODE (*loc)) == '2'
2764 || GET_RTX_CLASS (GET_CODE (*loc)) == 'c')
2765 && GET_CODE (XEXP (*loc, 0)) == REG
2766 && GET_CODE (XEXP (*loc, 1)) == CONST_INT)
2767 {
2768 rtx c = XEXP (*loc, 1);
2769
2770 do_not_record = 0;
2771 hash = HASH (XEXP (*loc, 0), Pmode);
2772 do_not_record = save_do_not_record;
2773 hash_arg_in_memory = save_hash_arg_in_memory;
2774 hash_arg_in_struct = save_hash_arg_in_struct;
2775
2776 elt = lookup (XEXP (*loc, 0), hash, Pmode);
2777 if (elt == 0)
2778 return;
2779
2780 /* We need to find the best (under the criteria documented above) entry
2781 in the class that is valid. We use the `flag' field to indicate
2782 choices that were invalid and iterate until we can't find a better
2783 one that hasn't already been tried. */
2784
2785 for (p = elt->first_same_value; p; p = p->next_same_value)
2786 p->flag = 0;
2787
2788 while (found_better)
2789 {
2790 int best_addr_cost = ADDRESS_COST (*loc);
2791 int best_rtx_cost = (COST (*loc) + 1) >> 1;
2792 struct table_elt *best_elt = elt;
2793 rtx best_rtx = *loc;
2794 int count;
2795
2796 /* This is at worst case an O(n^2) algorithm, so limit our search
2797 to the first 32 elements on the list. This avoids trouble
2798 compiling code with very long basic blocks that can easily
2799 call cse_gen_binary so many times that we run out of memory. */
2800
2801 found_better = 0;
2802 for (p = elt->first_same_value, count = 0;
2803 p && count < 32;
2804 p = p->next_same_value, count++)
2805 if (! p->flag
2806 && (GET_CODE (p->exp) == REG
2807 || exp_equiv_p (p->exp, p->exp, 1, 0)))
2808 {
2809 rtx new = cse_gen_binary (GET_CODE (*loc), Pmode, p->exp, c);
2810
2811 if ((ADDRESS_COST (new) < best_addr_cost
2812 || (ADDRESS_COST (new) == best_addr_cost
2813 && (COST (new) + 1) >> 1 > best_rtx_cost)))
2814 {
2815 found_better = 1;
2816 best_addr_cost = ADDRESS_COST (new);
2817 best_rtx_cost = (COST (new) + 1) >> 1;
2818 best_elt = p;
2819 best_rtx = new;
2820 }
2821 }
2822
2823 if (found_better)
2824 {
2825 if (validate_change (insn, loc,
2826 canon_reg (copy_rtx (best_rtx),
2827 NULL_RTX), 0))
2828 return;
2829 else
2830 best_elt->flag = 1;
2831 }
2832 }
2833 }
2834 #endif
2835 }
2836 \f
2837 /* Given an operation (CODE, *PARG1, *PARG2), where code is a comparison
2838 operation (EQ, NE, GT, etc.), follow it back through the hash table and
2839 what values are being compared.
2840
2841 *PARG1 and *PARG2 are updated to contain the rtx representing the values
2842 actually being compared. For example, if *PARG1 was (cc0) and *PARG2
2843 was (const_int 0), *PARG1 and *PARG2 will be set to the objects that were
2844 compared to produce cc0.
2845
2846 The return value is the comparison operator and is either the code of
2847 A or the code corresponding to the inverse of the comparison. */
2848
2849 static enum rtx_code
2850 find_comparison_args (code, parg1, parg2, pmode1, pmode2)
2851 enum rtx_code code;
2852 rtx *parg1, *parg2;
2853 enum machine_mode *pmode1, *pmode2;
2854 {
2855 rtx arg1, arg2;
2856
2857 arg1 = *parg1, arg2 = *parg2;
2858
2859 /* If ARG2 is const0_rtx, see what ARG1 is equivalent to. */
2860
2861 while (arg2 == CONST0_RTX (GET_MODE (arg1)))
2862 {
2863 /* Set non-zero when we find something of interest. */
2864 rtx x = 0;
2865 int reverse_code = 0;
2866 struct table_elt *p = 0;
2867
2868 /* If arg1 is a COMPARE, extract the comparison arguments from it.
2869 On machines with CC0, this is the only case that can occur, since
2870 fold_rtx will return the COMPARE or item being compared with zero
2871 when given CC0. */
2872
2873 if (GET_CODE (arg1) == COMPARE && arg2 == const0_rtx)
2874 x = arg1;
2875
2876 /* If ARG1 is a comparison operator and CODE is testing for
2877 STORE_FLAG_VALUE, get the inner arguments. */
2878
2879 else if (GET_RTX_CLASS (GET_CODE (arg1)) == '<')
2880 {
2881 if (code == NE
2882 || (GET_MODE_CLASS (GET_MODE (arg1)) == MODE_INT
2883 && code == LT && STORE_FLAG_VALUE == -1)
2884 #ifdef FLOAT_STORE_FLAG_VALUE
2885 || (GET_MODE_CLASS (GET_MODE (arg1)) == MODE_FLOAT
2886 && FLOAT_STORE_FLAG_VALUE < 0)
2887 #endif
2888 )
2889 x = arg1;
2890 else if (code == EQ
2891 || (GET_MODE_CLASS (GET_MODE (arg1)) == MODE_INT
2892 && code == GE && STORE_FLAG_VALUE == -1)
2893 #ifdef FLOAT_STORE_FLAG_VALUE
2894 || (GET_MODE_CLASS (GET_MODE (arg1)) == MODE_FLOAT
2895 && FLOAT_STORE_FLAG_VALUE < 0)
2896 #endif
2897 )
2898 x = arg1, reverse_code = 1;
2899 }
2900
2901 /* ??? We could also check for
2902
2903 (ne (and (eq (...) (const_int 1))) (const_int 0))
2904
2905 and related forms, but let's wait until we see them occurring. */
2906
2907 if (x == 0)
2908 /* Look up ARG1 in the hash table and see if it has an equivalence
2909 that lets us see what is being compared. */
2910 p = lookup (arg1, safe_hash (arg1, GET_MODE (arg1)) % NBUCKETS,
2911 GET_MODE (arg1));
2912 if (p) p = p->first_same_value;
2913
2914 for (; p; p = p->next_same_value)
2915 {
2916 enum machine_mode inner_mode = GET_MODE (p->exp);
2917
2918 /* If the entry isn't valid, skip it. */
2919 if (! exp_equiv_p (p->exp, p->exp, 1, 0))
2920 continue;
2921
2922 if (GET_CODE (p->exp) == COMPARE
2923 /* Another possibility is that this machine has a compare insn
2924 that includes the comparison code. In that case, ARG1 would
2925 be equivalent to a comparison operation that would set ARG1 to
2926 either STORE_FLAG_VALUE or zero. If this is an NE operation,
2927 ORIG_CODE is the actual comparison being done; if it is an EQ,
2928 we must reverse ORIG_CODE. On machine with a negative value
2929 for STORE_FLAG_VALUE, also look at LT and GE operations. */
2930 || ((code == NE
2931 || (code == LT
2932 && GET_MODE_CLASS (inner_mode) == MODE_INT
2933 && (GET_MODE_BITSIZE (inner_mode)
2934 <= HOST_BITS_PER_WIDE_INT)
2935 && (STORE_FLAG_VALUE
2936 & ((HOST_WIDE_INT) 1
2937 << (GET_MODE_BITSIZE (inner_mode) - 1))))
2938 #ifdef FLOAT_STORE_FLAG_VALUE
2939 || (code == LT
2940 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
2941 && FLOAT_STORE_FLAG_VALUE < 0)
2942 #endif
2943 )
2944 && GET_RTX_CLASS (GET_CODE (p->exp)) == '<'))
2945 {
2946 x = p->exp;
2947 break;
2948 }
2949 else if ((code == EQ
2950 || (code == GE
2951 && GET_MODE_CLASS (inner_mode) == MODE_INT
2952 && (GET_MODE_BITSIZE (inner_mode)
2953 <= HOST_BITS_PER_WIDE_INT)
2954 && (STORE_FLAG_VALUE
2955 & ((HOST_WIDE_INT) 1
2956 << (GET_MODE_BITSIZE (inner_mode) - 1))))
2957 #ifdef FLOAT_STORE_FLAG_VALUE
2958 || (code == GE
2959 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
2960 && FLOAT_STORE_FLAG_VALUE < 0)
2961 #endif
2962 )
2963 && GET_RTX_CLASS (GET_CODE (p->exp)) == '<')
2964 {
2965 reverse_code = 1;
2966 x = p->exp;
2967 break;
2968 }
2969
2970 /* If this is fp + constant, the equivalent is a better operand since
2971 it may let us predict the value of the comparison. */
2972 else if (NONZERO_BASE_PLUS_P (p->exp))
2973 {
2974 arg1 = p->exp;
2975 continue;
2976 }
2977 }
2978
2979 /* If we didn't find a useful equivalence for ARG1, we are done.
2980 Otherwise, set up for the next iteration. */
2981 if (x == 0)
2982 break;
2983
2984 arg1 = XEXP (x, 0), arg2 = XEXP (x, 1);
2985 if (GET_RTX_CLASS (GET_CODE (x)) == '<')
2986 code = GET_CODE (x);
2987
2988 if (reverse_code)
2989 code = reverse_condition (code);
2990 }
2991
2992 /* Return our results. Return the modes from before fold_rtx
2993 because fold_rtx might produce const_int, and then it's too late. */
2994 *pmode1 = GET_MODE (arg1), *pmode2 = GET_MODE (arg2);
2995 *parg1 = fold_rtx (arg1, 0), *parg2 = fold_rtx (arg2, 0);
2996
2997 return code;
2998 }
2999 \f
3000 /* Try to simplify a unary operation CODE whose output mode is to be
3001 MODE with input operand OP whose mode was originally OP_MODE.
3002 Return zero if no simplification can be made. */
3003
3004 rtx
3005 simplify_unary_operation (code, mode, op, op_mode)
3006 enum rtx_code code;
3007 enum machine_mode mode;
3008 rtx op;
3009 enum machine_mode op_mode;
3010 {
3011 register int width = GET_MODE_BITSIZE (mode);
3012
3013 /* The order of these tests is critical so that, for example, we don't
3014 check the wrong mode (input vs. output) for a conversion operation,
3015 such as FIX. At some point, this should be simplified. */
3016
3017 #if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC)
3018
3019 if (code == FLOAT && GET_MODE (op) == VOIDmode
3020 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
3021 {
3022 HOST_WIDE_INT hv, lv;
3023 REAL_VALUE_TYPE d;
3024
3025 if (GET_CODE (op) == CONST_INT)
3026 lv = INTVAL (op), hv = INTVAL (op) < 0 ? -1 : 0;
3027 else
3028 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
3029
3030 #ifdef REAL_ARITHMETIC
3031 REAL_VALUE_FROM_INT (d, lv, hv, mode);
3032 #else
3033 if (hv < 0)
3034 {
3035 d = (double) (~ hv);
3036 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
3037 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
3038 d += (double) (unsigned HOST_WIDE_INT) (~ lv);
3039 d = (- d - 1.0);
3040 }
3041 else
3042 {
3043 d = (double) hv;
3044 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
3045 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
3046 d += (double) (unsigned HOST_WIDE_INT) lv;
3047 }
3048 #endif /* REAL_ARITHMETIC */
3049 d = real_value_truncate (mode, d);
3050 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3051 }
3052 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
3053 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
3054 {
3055 HOST_WIDE_INT hv, lv;
3056 REAL_VALUE_TYPE d;
3057
3058 if (GET_CODE (op) == CONST_INT)
3059 lv = INTVAL (op), hv = INTVAL (op) < 0 ? -1 : 0;
3060 else
3061 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
3062
3063 if (op_mode == VOIDmode)
3064 {
3065 /* We don't know how to interpret negative-looking numbers in
3066 this case, so don't try to fold those. */
3067 if (hv < 0)
3068 return 0;
3069 }
3070 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
3071 ;
3072 else
3073 hv = 0, lv &= GET_MODE_MASK (op_mode);
3074
3075 #ifdef REAL_ARITHMETIC
3076 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
3077 #else
3078
3079 d = (double) (unsigned HOST_WIDE_INT) hv;
3080 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
3081 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
3082 d += (double) (unsigned HOST_WIDE_INT) lv;
3083 #endif /* REAL_ARITHMETIC */
3084 d = real_value_truncate (mode, d);
3085 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3086 }
3087 #endif
3088
3089 if (GET_CODE (op) == CONST_INT
3090 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
3091 {
3092 register HOST_WIDE_INT arg0 = INTVAL (op);
3093 register HOST_WIDE_INT val;
3094
3095 switch (code)
3096 {
3097 case NOT:
3098 val = ~ arg0;
3099 break;
3100
3101 case NEG:
3102 val = - arg0;
3103 break;
3104
3105 case ABS:
3106 val = (arg0 >= 0 ? arg0 : - arg0);
3107 break;
3108
3109 case FFS:
3110 /* Don't use ffs here. Instead, get low order bit and then its
3111 number. If arg0 is zero, this will return 0, as desired. */
3112 arg0 &= GET_MODE_MASK (mode);
3113 val = exact_log2 (arg0 & (- arg0)) + 1;
3114 break;
3115
3116 case TRUNCATE:
3117 val = arg0;
3118 break;
3119
3120 case ZERO_EXTEND:
3121 if (op_mode == VOIDmode)
3122 op_mode = mode;
3123 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
3124 {
3125 /* If we were really extending the mode,
3126 we would have to distinguish between zero-extension
3127 and sign-extension. */
3128 if (width != GET_MODE_BITSIZE (op_mode))
3129 abort ();
3130 val = arg0;
3131 }
3132 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
3133 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
3134 else
3135 return 0;
3136 break;
3137
3138 case SIGN_EXTEND:
3139 if (op_mode == VOIDmode)
3140 op_mode = mode;
3141 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
3142 {
3143 /* If we were really extending the mode,
3144 we would have to distinguish between zero-extension
3145 and sign-extension. */
3146 if (width != GET_MODE_BITSIZE (op_mode))
3147 abort ();
3148 val = arg0;
3149 }
3150 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
3151 {
3152 val
3153 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
3154 if (val
3155 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
3156 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
3157 }
3158 else
3159 return 0;
3160 break;
3161
3162 case SQRT:
3163 return 0;
3164
3165 default:
3166 abort ();
3167 }
3168
3169 /* Clear the bits that don't belong in our mode,
3170 unless they and our sign bit are all one.
3171 So we get either a reasonable negative value or a reasonable
3172 unsigned value for this mode. */
3173 if (width < HOST_BITS_PER_WIDE_INT
3174 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
3175 != ((HOST_WIDE_INT) (-1) << (width - 1))))
3176 val &= ((HOST_WIDE_INT) 1 << width) - 1;
3177
3178 return GEN_INT (val);
3179 }
3180
3181 /* We can do some operations on integer CONST_DOUBLEs. Also allow
3182 for a DImode operation on a CONST_INT. */
3183 else if (GET_MODE (op) == VOIDmode && width <= HOST_BITS_PER_INT * 2
3184 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
3185 {
3186 HOST_WIDE_INT l1, h1, lv, hv;
3187
3188 if (GET_CODE (op) == CONST_DOUBLE)
3189 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
3190 else
3191 l1 = INTVAL (op), h1 = l1 < 0 ? -1 : 0;
3192
3193 switch (code)
3194 {
3195 case NOT:
3196 lv = ~ l1;
3197 hv = ~ h1;
3198 break;
3199
3200 case NEG:
3201 neg_double (l1, h1, &lv, &hv);
3202 break;
3203
3204 case ABS:
3205 if (h1 < 0)
3206 neg_double (l1, h1, &lv, &hv);
3207 else
3208 lv = l1, hv = h1;
3209 break;
3210
3211 case FFS:
3212 hv = 0;
3213 if (l1 == 0)
3214 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & (-h1)) + 1;
3215 else
3216 lv = exact_log2 (l1 & (-l1)) + 1;
3217 break;
3218
3219 case TRUNCATE:
3220 /* This is just a change-of-mode, so do nothing. */
3221 lv = l1, hv = h1;
3222 break;
3223
3224 case ZERO_EXTEND:
3225 if (op_mode == VOIDmode
3226 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
3227 return 0;
3228
3229 hv = 0;
3230 lv = l1 & GET_MODE_MASK (op_mode);
3231 break;
3232
3233 case SIGN_EXTEND:
3234 if (op_mode == VOIDmode
3235 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
3236 return 0;
3237 else
3238 {
3239 lv = l1 & GET_MODE_MASK (op_mode);
3240 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
3241 && (lv & ((HOST_WIDE_INT) 1
3242 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
3243 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
3244
3245 hv = (lv < 0) ? ~ (HOST_WIDE_INT) 0 : 0;
3246 }
3247 break;
3248
3249 case SQRT:
3250 return 0;
3251
3252 default:
3253 return 0;
3254 }
3255
3256 return immed_double_const (lv, hv, mode);
3257 }
3258
3259 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3260 else if (GET_CODE (op) == CONST_DOUBLE
3261 && GET_MODE_CLASS (mode) == MODE_FLOAT)
3262 {
3263 REAL_VALUE_TYPE d;
3264 jmp_buf handler;
3265 rtx x;
3266
3267 if (setjmp (handler))
3268 /* There used to be a warning here, but that is inadvisable.
3269 People may want to cause traps, and the natural way
3270 to do it should not get a warning. */
3271 return 0;
3272
3273 set_float_handler (handler);
3274
3275 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
3276
3277 switch (code)
3278 {
3279 case NEG:
3280 d = REAL_VALUE_NEGATE (d);
3281 break;
3282
3283 case ABS:
3284 if (REAL_VALUE_NEGATIVE (d))
3285 d = REAL_VALUE_NEGATE (d);
3286 break;
3287
3288 case FLOAT_TRUNCATE:
3289 d = real_value_truncate (mode, d);
3290 break;
3291
3292 case FLOAT_EXTEND:
3293 /* All this does is change the mode. */
3294 break;
3295
3296 case FIX:
3297 d = REAL_VALUE_RNDZINT (d);
3298 break;
3299
3300 case UNSIGNED_FIX:
3301 d = REAL_VALUE_UNSIGNED_RNDZINT (d);
3302 break;
3303
3304 case SQRT:
3305 return 0;
3306
3307 default:
3308 abort ();
3309 }
3310
3311 x = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3312 set_float_handler (NULL_PTR);
3313 return x;
3314 }
3315
3316 else if (GET_CODE (op) == CONST_DOUBLE
3317 && GET_MODE_CLASS (GET_MODE (op)) == MODE_FLOAT
3318 && GET_MODE_CLASS (mode) == MODE_INT
3319 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
3320 {
3321 REAL_VALUE_TYPE d;
3322 jmp_buf handler;
3323 HOST_WIDE_INT val;
3324
3325 if (setjmp (handler))
3326 return 0;
3327
3328 set_float_handler (handler);
3329
3330 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
3331
3332 switch (code)
3333 {
3334 case FIX:
3335 val = REAL_VALUE_FIX (d);
3336 break;
3337
3338 case UNSIGNED_FIX:
3339 val = REAL_VALUE_UNSIGNED_FIX (d);
3340 break;
3341
3342 default:
3343 abort ();
3344 }
3345
3346 set_float_handler (NULL_PTR);
3347
3348 /* Clear the bits that don't belong in our mode,
3349 unless they and our sign bit are all one.
3350 So we get either a reasonable negative value or a reasonable
3351 unsigned value for this mode. */
3352 if (width < HOST_BITS_PER_WIDE_INT
3353 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
3354 != ((HOST_WIDE_INT) (-1) << (width - 1))))
3355 val &= ((HOST_WIDE_INT) 1 << width) - 1;
3356
3357 /* If this would be an entire word for the target, but is not for
3358 the host, then sign-extend on the host so that the number will look
3359 the same way on the host that it would on the target.
3360
3361 For example, when building a 64 bit alpha hosted 32 bit sparc
3362 targeted compiler, then we want the 32 bit unsigned value -1 to be
3363 represented as a 64 bit value -1, and not as 0x00000000ffffffff.
3364 The later confuses the sparc backend. */
3365
3366 if (BITS_PER_WORD < HOST_BITS_PER_WIDE_INT && BITS_PER_WORD == width
3367 && (val & ((HOST_WIDE_INT) 1 << (width - 1))))
3368 val |= ((HOST_WIDE_INT) (-1) << width);
3369
3370 return GEN_INT (val);
3371 }
3372 #endif
3373 /* This was formerly used only for non-IEEE float.
3374 eggert@twinsun.com says it is safe for IEEE also. */
3375 else
3376 {
3377 /* There are some simplifications we can do even if the operands
3378 aren't constant. */
3379 switch (code)
3380 {
3381 case NEG:
3382 case NOT:
3383 /* (not (not X)) == X, similarly for NEG. */
3384 if (GET_CODE (op) == code)
3385 return XEXP (op, 0);
3386 break;
3387
3388 case SIGN_EXTEND:
3389 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
3390 becomes just the MINUS if its mode is MODE. This allows
3391 folding switch statements on machines using casesi (such as
3392 the Vax). */
3393 if (GET_CODE (op) == TRUNCATE
3394 && GET_MODE (XEXP (op, 0)) == mode
3395 && GET_CODE (XEXP (op, 0)) == MINUS
3396 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
3397 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
3398 return XEXP (op, 0);
3399
3400 #ifdef POINTERS_EXTEND_UNSIGNED
3401 if (! POINTERS_EXTEND_UNSIGNED
3402 && mode == Pmode && GET_MODE (op) == ptr_mode
3403 && CONSTANT_P (op))
3404 return convert_memory_address (Pmode, op);
3405 #endif
3406 break;
3407
3408 #ifdef POINTERS_EXTEND_UNSIGNED
3409 case ZERO_EXTEND:
3410 if (POINTERS_EXTEND_UNSIGNED
3411 && mode == Pmode && GET_MODE (op) == ptr_mode
3412 && CONSTANT_P (op))
3413 return convert_memory_address (Pmode, op);
3414 break;
3415 #endif
3416 }
3417
3418 return 0;
3419 }
3420 }
3421 \f
3422 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
3423 and OP1. Return 0 if no simplification is possible.
3424
3425 Don't use this for relational operations such as EQ or LT.
3426 Use simplify_relational_operation instead. */
3427
3428 rtx
3429 simplify_binary_operation (code, mode, op0, op1)
3430 enum rtx_code code;
3431 enum machine_mode mode;
3432 rtx op0, op1;
3433 {
3434 register HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3435 HOST_WIDE_INT val;
3436 int width = GET_MODE_BITSIZE (mode);
3437 rtx tem;
3438
3439 /* Relational operations don't work here. We must know the mode
3440 of the operands in order to do the comparison correctly.
3441 Assuming a full word can give incorrect results.
3442 Consider comparing 128 with -128 in QImode. */
3443
3444 if (GET_RTX_CLASS (code) == '<')
3445 abort ();
3446
3447 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3448 if (GET_MODE_CLASS (mode) == MODE_FLOAT
3449 && GET_CODE (op0) == CONST_DOUBLE && GET_CODE (op1) == CONST_DOUBLE
3450 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3451 {
3452 REAL_VALUE_TYPE f0, f1, value;
3453 jmp_buf handler;
3454
3455 if (setjmp (handler))
3456 return 0;
3457
3458 set_float_handler (handler);
3459
3460 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3461 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3462 f0 = real_value_truncate (mode, f0);
3463 f1 = real_value_truncate (mode, f1);
3464
3465 #ifdef REAL_ARITHMETIC
3466 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
3467 #else
3468 switch (code)
3469 {
3470 case PLUS:
3471 value = f0 + f1;
3472 break;
3473 case MINUS:
3474 value = f0 - f1;
3475 break;
3476 case MULT:
3477 value = f0 * f1;
3478 break;
3479 case DIV:
3480 #ifndef REAL_INFINITY
3481 if (f1 == 0)
3482 return 0;
3483 #endif
3484 value = f0 / f1;
3485 break;
3486 case SMIN:
3487 value = MIN (f0, f1);
3488 break;
3489 case SMAX:
3490 value = MAX (f0, f1);
3491 break;
3492 default:
3493 abort ();
3494 }
3495 #endif
3496
3497 value = real_value_truncate (mode, value);
3498 set_float_handler (NULL_PTR);
3499 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
3500 }
3501 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3502
3503 /* We can fold some multi-word operations. */
3504 if (GET_MODE_CLASS (mode) == MODE_INT
3505 && width == HOST_BITS_PER_WIDE_INT * 2
3506 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
3507 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
3508 {
3509 HOST_WIDE_INT l1, l2, h1, h2, lv, hv;
3510
3511 if (GET_CODE (op0) == CONST_DOUBLE)
3512 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
3513 else
3514 l1 = INTVAL (op0), h1 = l1 < 0 ? -1 : 0;
3515
3516 if (GET_CODE (op1) == CONST_DOUBLE)
3517 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
3518 else
3519 l2 = INTVAL (op1), h2 = l2 < 0 ? -1 : 0;
3520
3521 switch (code)
3522 {
3523 case MINUS:
3524 /* A - B == A + (-B). */
3525 neg_double (l2, h2, &lv, &hv);
3526 l2 = lv, h2 = hv;
3527
3528 /* .. fall through ... */
3529
3530 case PLUS:
3531 add_double (l1, h1, l2, h2, &lv, &hv);
3532 break;
3533
3534 case MULT:
3535 mul_double (l1, h1, l2, h2, &lv, &hv);
3536 break;
3537
3538 case DIV: case MOD: case UDIV: case UMOD:
3539 /* We'd need to include tree.h to do this and it doesn't seem worth
3540 it. */
3541 return 0;
3542
3543 case AND:
3544 lv = l1 & l2, hv = h1 & h2;
3545 break;
3546
3547 case IOR:
3548 lv = l1 | l2, hv = h1 | h2;
3549 break;
3550
3551 case XOR:
3552 lv = l1 ^ l2, hv = h1 ^ h2;
3553 break;
3554
3555 case SMIN:
3556 if (h1 < h2
3557 || (h1 == h2
3558 && ((unsigned HOST_WIDE_INT) l1
3559 < (unsigned HOST_WIDE_INT) l2)))
3560 lv = l1, hv = h1;
3561 else
3562 lv = l2, hv = h2;
3563 break;
3564
3565 case SMAX:
3566 if (h1 > h2
3567 || (h1 == h2
3568 && ((unsigned HOST_WIDE_INT) l1
3569 > (unsigned HOST_WIDE_INT) l2)))
3570 lv = l1, hv = h1;
3571 else
3572 lv = l2, hv = h2;
3573 break;
3574
3575 case UMIN:
3576 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
3577 || (h1 == h2
3578 && ((unsigned HOST_WIDE_INT) l1
3579 < (unsigned HOST_WIDE_INT) l2)))
3580 lv = l1, hv = h1;
3581 else
3582 lv = l2, hv = h2;
3583 break;
3584
3585 case UMAX:
3586 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
3587 || (h1 == h2
3588 && ((unsigned HOST_WIDE_INT) l1
3589 > (unsigned HOST_WIDE_INT) l2)))
3590 lv = l1, hv = h1;
3591 else
3592 lv = l2, hv = h2;
3593 break;
3594
3595 case LSHIFTRT: case ASHIFTRT:
3596 case ASHIFT:
3597 case ROTATE: case ROTATERT:
3598 #ifdef SHIFT_COUNT_TRUNCATED
3599 if (SHIFT_COUNT_TRUNCATED)
3600 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
3601 #endif
3602
3603 if (h2 != 0 || l2 < 0 || l2 >= GET_MODE_BITSIZE (mode))
3604 return 0;
3605
3606 if (code == LSHIFTRT || code == ASHIFTRT)
3607 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
3608 code == ASHIFTRT);
3609 else if (code == ASHIFT)
3610 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
3611 else if (code == ROTATE)
3612 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3613 else /* code == ROTATERT */
3614 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3615 break;
3616
3617 default:
3618 return 0;
3619 }
3620
3621 return immed_double_const (lv, hv, mode);
3622 }
3623
3624 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
3625 || width > HOST_BITS_PER_WIDE_INT || width == 0)
3626 {
3627 /* Even if we can't compute a constant result,
3628 there are some cases worth simplifying. */
3629
3630 switch (code)
3631 {
3632 case PLUS:
3633 /* In IEEE floating point, x+0 is not the same as x. Similarly
3634 for the other optimizations below. */
3635 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
3636 && FLOAT_MODE_P (mode) && ! flag_fast_math)
3637 break;
3638
3639 if (op1 == CONST0_RTX (mode))
3640 return op0;
3641
3642 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)) */
3643 if (GET_CODE (op0) == NEG)
3644 return cse_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
3645 else if (GET_CODE (op1) == NEG)
3646 return cse_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
3647
3648 /* Handle both-operands-constant cases. We can only add
3649 CONST_INTs to constants since the sum of relocatable symbols
3650 can't be handled by most assemblers. Don't add CONST_INT
3651 to CONST_INT since overflow won't be computed properly if wider
3652 than HOST_BITS_PER_WIDE_INT. */
3653
3654 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
3655 && GET_CODE (op1) == CONST_INT)
3656 return plus_constant (op0, INTVAL (op1));
3657 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
3658 && GET_CODE (op0) == CONST_INT)
3659 return plus_constant (op1, INTVAL (op0));
3660
3661 /* See if this is something like X * C - X or vice versa or
3662 if the multiplication is written as a shift. If so, we can
3663 distribute and make a new multiply, shift, or maybe just
3664 have X (if C is 2 in the example above). But don't make
3665 real multiply if we didn't have one before. */
3666
3667 if (! FLOAT_MODE_P (mode))
3668 {
3669 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
3670 rtx lhs = op0, rhs = op1;
3671 int had_mult = 0;
3672
3673 if (GET_CODE (lhs) == NEG)
3674 coeff0 = -1, lhs = XEXP (lhs, 0);
3675 else if (GET_CODE (lhs) == MULT
3676 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
3677 {
3678 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
3679 had_mult = 1;
3680 }
3681 else if (GET_CODE (lhs) == ASHIFT
3682 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
3683 && INTVAL (XEXP (lhs, 1)) >= 0
3684 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
3685 {
3686 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
3687 lhs = XEXP (lhs, 0);
3688 }
3689
3690 if (GET_CODE (rhs) == NEG)
3691 coeff1 = -1, rhs = XEXP (rhs, 0);
3692 else if (GET_CODE (rhs) == MULT
3693 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
3694 {
3695 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
3696 had_mult = 1;
3697 }
3698 else if (GET_CODE (rhs) == ASHIFT
3699 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
3700 && INTVAL (XEXP (rhs, 1)) >= 0
3701 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
3702 {
3703 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
3704 rhs = XEXP (rhs, 0);
3705 }
3706
3707 if (rtx_equal_p (lhs, rhs))
3708 {
3709 tem = cse_gen_binary (MULT, mode, lhs,
3710 GEN_INT (coeff0 + coeff1));
3711 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
3712 }
3713 }
3714
3715 /* If one of the operands is a PLUS or a MINUS, see if we can
3716 simplify this by the associative law.
3717 Don't use the associative law for floating point.
3718 The inaccuracy makes it nonassociative,
3719 and subtle programs can break if operations are associated. */
3720
3721 if (INTEGRAL_MODE_P (mode)
3722 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
3723 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS)
3724 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
3725 return tem;
3726 break;
3727
3728 case COMPARE:
3729 #ifdef HAVE_cc0
3730 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
3731 using cc0, in which case we want to leave it as a COMPARE
3732 so we can distinguish it from a register-register-copy.
3733
3734 In IEEE floating point, x-0 is not the same as x. */
3735
3736 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3737 || ! FLOAT_MODE_P (mode) || flag_fast_math)
3738 && op1 == CONST0_RTX (mode))
3739 return op0;
3740 #else
3741 /* Do nothing here. */
3742 #endif
3743 break;
3744
3745 case MINUS:
3746 /* None of these optimizations can be done for IEEE
3747 floating point. */
3748 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
3749 && FLOAT_MODE_P (mode) && ! flag_fast_math)
3750 break;
3751
3752 /* We can't assume x-x is 0 even with non-IEEE floating point,
3753 but since it is zero except in very strange circumstances, we
3754 will treat it as zero with -ffast-math. */
3755 if (rtx_equal_p (op0, op1)
3756 && ! side_effects_p (op0)
3757 && (! FLOAT_MODE_P (mode) || flag_fast_math))
3758 return CONST0_RTX (mode);
3759
3760 /* Change subtraction from zero into negation. */
3761 if (op0 == CONST0_RTX (mode))
3762 return gen_rtx (NEG, mode, op1);
3763
3764 /* (-1 - a) is ~a. */
3765 if (op0 == constm1_rtx)
3766 return gen_rtx (NOT, mode, op1);
3767
3768 /* Subtracting 0 has no effect. */
3769 if (op1 == CONST0_RTX (mode))
3770 return op0;
3771
3772 /* See if this is something like X * C - X or vice versa or
3773 if the multiplication is written as a shift. If so, we can
3774 distribute and make a new multiply, shift, or maybe just
3775 have X (if C is 2 in the example above). But don't make
3776 real multiply if we didn't have one before. */
3777
3778 if (! FLOAT_MODE_P (mode))
3779 {
3780 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
3781 rtx lhs = op0, rhs = op1;
3782 int had_mult = 0;
3783
3784 if (GET_CODE (lhs) == NEG)
3785 coeff0 = -1, lhs = XEXP (lhs, 0);
3786 else if (GET_CODE (lhs) == MULT
3787 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
3788 {
3789 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
3790 had_mult = 1;
3791 }
3792 else if (GET_CODE (lhs) == ASHIFT
3793 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
3794 && INTVAL (XEXP (lhs, 1)) >= 0
3795 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
3796 {
3797 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
3798 lhs = XEXP (lhs, 0);
3799 }
3800
3801 if (GET_CODE (rhs) == NEG)
3802 coeff1 = - 1, rhs = XEXP (rhs, 0);
3803 else if (GET_CODE (rhs) == MULT
3804 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
3805 {
3806 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
3807 had_mult = 1;
3808 }
3809 else if (GET_CODE (rhs) == ASHIFT
3810 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
3811 && INTVAL (XEXP (rhs, 1)) >= 0
3812 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
3813 {
3814 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
3815 rhs = XEXP (rhs, 0);
3816 }
3817
3818 if (rtx_equal_p (lhs, rhs))
3819 {
3820 tem = cse_gen_binary (MULT, mode, lhs,
3821 GEN_INT (coeff0 - coeff1));
3822 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
3823 }
3824 }
3825
3826 /* (a - (-b)) -> (a + b). */
3827 if (GET_CODE (op1) == NEG)
3828 return cse_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
3829
3830 /* If one of the operands is a PLUS or a MINUS, see if we can
3831 simplify this by the associative law.
3832 Don't use the associative law for floating point.
3833 The inaccuracy makes it nonassociative,
3834 and subtle programs can break if operations are associated. */
3835
3836 if (INTEGRAL_MODE_P (mode)
3837 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
3838 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS)
3839 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
3840 return tem;
3841
3842 /* Don't let a relocatable value get a negative coeff. */
3843 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
3844 return plus_constant (op0, - INTVAL (op1));
3845
3846 /* (x - (x & y)) -> (x & ~y) */
3847 if (GET_CODE (op1) == AND)
3848 {
3849 if (rtx_equal_p (op0, XEXP (op1, 0)))
3850 return cse_gen_binary (AND, mode, op0, gen_rtx (NOT, mode, XEXP (op1, 1)));
3851 if (rtx_equal_p (op0, XEXP (op1, 1)))
3852 return cse_gen_binary (AND, mode, op0, gen_rtx (NOT, mode, XEXP (op1, 0)));
3853 }
3854 break;
3855
3856 case MULT:
3857 if (op1 == constm1_rtx)
3858 {
3859 tem = simplify_unary_operation (NEG, mode, op0, mode);
3860
3861 return tem ? tem : gen_rtx (NEG, mode, op0);
3862 }
3863
3864 /* In IEEE floating point, x*0 is not always 0. */
3865 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3866 || ! FLOAT_MODE_P (mode) || flag_fast_math)
3867 && op1 == CONST0_RTX (mode)
3868 && ! side_effects_p (op0))
3869 return op1;
3870
3871 /* In IEEE floating point, x*1 is not equivalent to x for nans.
3872 However, ANSI says we can drop signals,
3873 so we can do this anyway. */
3874 if (op1 == CONST1_RTX (mode))
3875 return op0;
3876
3877 /* Convert multiply by constant power of two into shift unless
3878 we are still generating RTL. This test is a kludge. */
3879 if (GET_CODE (op1) == CONST_INT
3880 && (val = exact_log2 (INTVAL (op1))) >= 0
3881 /* If the mode is larger than the host word size, and the
3882 uppermost bit is set, then this isn't a power of two due
3883 to implicit sign extension. */
3884 && (width <= HOST_BITS_PER_WIDE_INT
3885 || val != HOST_BITS_PER_WIDE_INT - 1)
3886 && ! rtx_equal_function_value_matters)
3887 return gen_rtx (ASHIFT, mode, op0, GEN_INT (val));
3888
3889 if (GET_CODE (op1) == CONST_DOUBLE
3890 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_FLOAT)
3891 {
3892 REAL_VALUE_TYPE d;
3893 jmp_buf handler;
3894 int op1is2, op1ism1;
3895
3896 if (setjmp (handler))
3897 return 0;
3898
3899 set_float_handler (handler);
3900 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
3901 op1is2 = REAL_VALUES_EQUAL (d, dconst2);
3902 op1ism1 = REAL_VALUES_EQUAL (d, dconstm1);
3903 set_float_handler (NULL_PTR);
3904
3905 /* x*2 is x+x and x*(-1) is -x */
3906 if (op1is2 && GET_MODE (op0) == mode)
3907 return gen_rtx (PLUS, mode, op0, copy_rtx (op0));
3908
3909 else if (op1ism1 && GET_MODE (op0) == mode)
3910 return gen_rtx (NEG, mode, op0);
3911 }
3912 break;
3913
3914 case IOR:
3915 if (op1 == const0_rtx)
3916 return op0;
3917 if (GET_CODE (op1) == CONST_INT
3918 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
3919 return op1;
3920 if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
3921 return op0;
3922 /* A | (~A) -> -1 */
3923 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3924 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3925 && ! side_effects_p (op0)
3926 && GET_MODE_CLASS (mode) != MODE_CC)
3927 return constm1_rtx;
3928 break;
3929
3930 case XOR:
3931 if (op1 == const0_rtx)
3932 return op0;
3933 if (GET_CODE (op1) == CONST_INT
3934 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
3935 return gen_rtx (NOT, mode, op0);
3936 if (op0 == op1 && ! side_effects_p (op0)
3937 && GET_MODE_CLASS (mode) != MODE_CC)
3938 return const0_rtx;
3939 break;
3940
3941 case AND:
3942 if (op1 == const0_rtx && ! side_effects_p (op0))
3943 return const0_rtx;
3944 if (GET_CODE (op1) == CONST_INT
3945 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
3946 return op0;
3947 if (op0 == op1 && ! side_effects_p (op0)
3948 && GET_MODE_CLASS (mode) != MODE_CC)
3949 return op0;
3950 /* A & (~A) -> 0 */
3951 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3952 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3953 && ! side_effects_p (op0)
3954 && GET_MODE_CLASS (mode) != MODE_CC)
3955 return const0_rtx;
3956 break;
3957
3958 case UDIV:
3959 /* Convert divide by power of two into shift (divide by 1 handled
3960 below). */
3961 if (GET_CODE (op1) == CONST_INT
3962 && (arg1 = exact_log2 (INTVAL (op1))) > 0)
3963 return gen_rtx (LSHIFTRT, mode, op0, GEN_INT (arg1));
3964
3965 /* ... fall through ... */
3966
3967 case DIV:
3968 if (op1 == CONST1_RTX (mode))
3969 return op0;
3970
3971 /* In IEEE floating point, 0/x is not always 0. */
3972 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3973 || ! FLOAT_MODE_P (mode) || flag_fast_math)
3974 && op0 == CONST0_RTX (mode)
3975 && ! side_effects_p (op1))
3976 return op0;
3977
3978 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3979 /* Change division by a constant into multiplication. Only do
3980 this with -ffast-math until an expert says it is safe in
3981 general. */
3982 else if (GET_CODE (op1) == CONST_DOUBLE
3983 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_FLOAT
3984 && op1 != CONST0_RTX (mode)
3985 && flag_fast_math)
3986 {
3987 REAL_VALUE_TYPE d;
3988 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
3989
3990 if (! REAL_VALUES_EQUAL (d, dconst0))
3991 {
3992 #if defined (REAL_ARITHMETIC)
3993 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
3994 return gen_rtx (MULT, mode, op0,
3995 CONST_DOUBLE_FROM_REAL_VALUE (d, mode));
3996 #else
3997 return gen_rtx (MULT, mode, op0,
3998 CONST_DOUBLE_FROM_REAL_VALUE (1./d, mode));
3999 #endif
4000 }
4001 }
4002 #endif
4003 break;
4004
4005 case UMOD:
4006 /* Handle modulus by power of two (mod with 1 handled below). */
4007 if (GET_CODE (op1) == CONST_INT
4008 && exact_log2 (INTVAL (op1)) > 0)
4009 return gen_rtx (AND, mode, op0, GEN_INT (INTVAL (op1) - 1));
4010
4011 /* ... fall through ... */
4012
4013 case MOD:
4014 if ((op0 == const0_rtx || op1 == const1_rtx)
4015 && ! side_effects_p (op0) && ! side_effects_p (op1))
4016 return const0_rtx;
4017 break;
4018
4019 case ROTATERT:
4020 case ROTATE:
4021 /* Rotating ~0 always results in ~0. */
4022 if (GET_CODE (op0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
4023 && INTVAL (op0) == GET_MODE_MASK (mode)
4024 && ! side_effects_p (op1))
4025 return op0;
4026
4027 /* ... fall through ... */
4028
4029 case ASHIFT:
4030 case ASHIFTRT:
4031 case LSHIFTRT:
4032 if (op1 == const0_rtx)
4033 return op0;
4034 if (op0 == const0_rtx && ! side_effects_p (op1))
4035 return op0;
4036 break;
4037
4038 case SMIN:
4039 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (op1) == CONST_INT
4040 && INTVAL (op1) == (HOST_WIDE_INT) 1 << (width -1)
4041 && ! side_effects_p (op0))
4042 return op1;
4043 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
4044 return op0;
4045 break;
4046
4047 case SMAX:
4048 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (op1) == CONST_INT
4049 && (INTVAL (op1)
4050 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
4051 && ! side_effects_p (op0))
4052 return op1;
4053 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
4054 return op0;
4055 break;
4056
4057 case UMIN:
4058 if (op1 == const0_rtx && ! side_effects_p (op0))
4059 return op1;
4060 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
4061 return op0;
4062 break;
4063
4064 case UMAX:
4065 if (op1 == constm1_rtx && ! side_effects_p (op0))
4066 return op1;
4067 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
4068 return op0;
4069 break;
4070
4071 default:
4072 abort ();
4073 }
4074
4075 return 0;
4076 }
4077
4078 /* Get the integer argument values in two forms:
4079 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
4080
4081 arg0 = INTVAL (op0);
4082 arg1 = INTVAL (op1);
4083
4084 if (width < HOST_BITS_PER_WIDE_INT)
4085 {
4086 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
4087 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
4088
4089 arg0s = arg0;
4090 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
4091 arg0s |= ((HOST_WIDE_INT) (-1) << width);
4092
4093 arg1s = arg1;
4094 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
4095 arg1s |= ((HOST_WIDE_INT) (-1) << width);
4096 }
4097 else
4098 {
4099 arg0s = arg0;
4100 arg1s = arg1;
4101 }
4102
4103 /* Compute the value of the arithmetic. */
4104
4105 switch (code)
4106 {
4107 case PLUS:
4108 val = arg0s + arg1s;
4109 break;
4110
4111 case MINUS:
4112 val = arg0s - arg1s;
4113 break;
4114
4115 case MULT:
4116 val = arg0s * arg1s;
4117 break;
4118
4119 case DIV:
4120 if (arg1s == 0)
4121 return 0;
4122 val = arg0s / arg1s;
4123 break;
4124
4125 case MOD:
4126 if (arg1s == 0)
4127 return 0;
4128 val = arg0s % arg1s;
4129 break;
4130
4131 case UDIV:
4132 if (arg1 == 0)
4133 return 0;
4134 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
4135 break;
4136
4137 case UMOD:
4138 if (arg1 == 0)
4139 return 0;
4140 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
4141 break;
4142
4143 case AND:
4144 val = arg0 & arg1;
4145 break;
4146
4147 case IOR:
4148 val = arg0 | arg1;
4149 break;
4150
4151 case XOR:
4152 val = arg0 ^ arg1;
4153 break;
4154
4155 case LSHIFTRT:
4156 /* If shift count is undefined, don't fold it; let the machine do
4157 what it wants. But truncate it if the machine will do that. */
4158 if (arg1 < 0)
4159 return 0;
4160
4161 #ifdef SHIFT_COUNT_TRUNCATED
4162 if (SHIFT_COUNT_TRUNCATED)
4163 arg1 %= width;
4164 #endif
4165
4166 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
4167 break;
4168
4169 case ASHIFT:
4170 if (arg1 < 0)
4171 return 0;
4172
4173 #ifdef SHIFT_COUNT_TRUNCATED
4174 if (SHIFT_COUNT_TRUNCATED)
4175 arg1 %= width;
4176 #endif
4177
4178 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
4179 break;
4180
4181 case ASHIFTRT:
4182 if (arg1 < 0)
4183 return 0;
4184
4185 #ifdef SHIFT_COUNT_TRUNCATED
4186 if (SHIFT_COUNT_TRUNCATED)
4187 arg1 %= width;
4188 #endif
4189
4190 val = arg0s >> arg1;
4191
4192 /* Bootstrap compiler may not have sign extended the right shift.
4193 Manually extend the sign to insure bootstrap cc matches gcc. */
4194 if (arg0s < 0 && arg1 > 0)
4195 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
4196
4197 break;
4198
4199 case ROTATERT:
4200 if (arg1 < 0)
4201 return 0;
4202
4203 arg1 %= width;
4204 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
4205 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
4206 break;
4207
4208 case ROTATE:
4209 if (arg1 < 0)
4210 return 0;
4211
4212 arg1 %= width;
4213 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
4214 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
4215 break;
4216
4217 case COMPARE:
4218 /* Do nothing here. */
4219 return 0;
4220
4221 case SMIN:
4222 val = arg0s <= arg1s ? arg0s : arg1s;
4223 break;
4224
4225 case UMIN:
4226 val = ((unsigned HOST_WIDE_INT) arg0
4227 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4228 break;
4229
4230 case SMAX:
4231 val = arg0s > arg1s ? arg0s : arg1s;
4232 break;
4233
4234 case UMAX:
4235 val = ((unsigned HOST_WIDE_INT) arg0
4236 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4237 break;
4238
4239 default:
4240 abort ();
4241 }
4242
4243 /* Clear the bits that don't belong in our mode, unless they and our sign
4244 bit are all one. So we get either a reasonable negative value or a
4245 reasonable unsigned value for this mode. */
4246 if (width < HOST_BITS_PER_WIDE_INT
4247 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4248 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4249 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4250
4251 /* If this would be an entire word for the target, but is not for
4252 the host, then sign-extend on the host so that the number will look
4253 the same way on the host that it would on the target.
4254
4255 For example, when building a 64 bit alpha hosted 32 bit sparc
4256 targeted compiler, then we want the 32 bit unsigned value -1 to be
4257 represented as a 64 bit value -1, and not as 0x00000000ffffffff.
4258 The later confuses the sparc backend. */
4259
4260 if (BITS_PER_WORD < HOST_BITS_PER_WIDE_INT && BITS_PER_WORD == width
4261 && (val & ((HOST_WIDE_INT) 1 << (width - 1))))
4262 val |= ((HOST_WIDE_INT) (-1) << width);
4263
4264 return GEN_INT (val);
4265 }
4266 \f
4267 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
4268 PLUS or MINUS.
4269
4270 Rather than test for specific case, we do this by a brute-force method
4271 and do all possible simplifications until no more changes occur. Then
4272 we rebuild the operation. */
4273
4274 static rtx
4275 simplify_plus_minus (code, mode, op0, op1)
4276 enum rtx_code code;
4277 enum machine_mode mode;
4278 rtx op0, op1;
4279 {
4280 rtx ops[8];
4281 int negs[8];
4282 rtx result, tem;
4283 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts = 0;
4284 int first = 1, negate = 0, changed;
4285 int i, j;
4286
4287 bzero ((char *) ops, sizeof ops);
4288
4289 /* Set up the two operands and then expand them until nothing has been
4290 changed. If we run out of room in our array, give up; this should
4291 almost never happen. */
4292
4293 ops[0] = op0, ops[1] = op1, negs[0] = 0, negs[1] = (code == MINUS);
4294
4295 changed = 1;
4296 while (changed)
4297 {
4298 changed = 0;
4299
4300 for (i = 0; i < n_ops; i++)
4301 switch (GET_CODE (ops[i]))
4302 {
4303 case PLUS:
4304 case MINUS:
4305 if (n_ops == 7)
4306 return 0;
4307
4308 ops[n_ops] = XEXP (ops[i], 1);
4309 negs[n_ops++] = GET_CODE (ops[i]) == MINUS ? !negs[i] : negs[i];
4310 ops[i] = XEXP (ops[i], 0);
4311 input_ops++;
4312 changed = 1;
4313 break;
4314
4315 case NEG:
4316 ops[i] = XEXP (ops[i], 0);
4317 negs[i] = ! negs[i];
4318 changed = 1;
4319 break;
4320
4321 case CONST:
4322 ops[i] = XEXP (ops[i], 0);
4323 input_consts++;
4324 changed = 1;
4325 break;
4326
4327 case NOT:
4328 /* ~a -> (-a - 1) */
4329 if (n_ops != 7)
4330 {
4331 ops[n_ops] = constm1_rtx;
4332 negs[n_ops++] = negs[i];
4333 ops[i] = XEXP (ops[i], 0);
4334 negs[i] = ! negs[i];
4335 changed = 1;
4336 }
4337 break;
4338
4339 case CONST_INT:
4340 if (negs[i])
4341 ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0, changed = 1;
4342 break;
4343 }
4344 }
4345
4346 /* If we only have two operands, we can't do anything. */
4347 if (n_ops <= 2)
4348 return 0;
4349
4350 /* Now simplify each pair of operands until nothing changes. The first
4351 time through just simplify constants against each other. */
4352
4353 changed = 1;
4354 while (changed)
4355 {
4356 changed = first;
4357
4358 for (i = 0; i < n_ops - 1; i++)
4359 for (j = i + 1; j < n_ops; j++)
4360 if (ops[i] != 0 && ops[j] != 0
4361 && (! first || (CONSTANT_P (ops[i]) && CONSTANT_P (ops[j]))))
4362 {
4363 rtx lhs = ops[i], rhs = ops[j];
4364 enum rtx_code ncode = PLUS;
4365
4366 if (negs[i] && ! negs[j])
4367 lhs = ops[j], rhs = ops[i], ncode = MINUS;
4368 else if (! negs[i] && negs[j])
4369 ncode = MINUS;
4370
4371 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4372 if (tem)
4373 {
4374 ops[i] = tem, ops[j] = 0;
4375 negs[i] = negs[i] && negs[j];
4376 if (GET_CODE (tem) == NEG)
4377 ops[i] = XEXP (tem, 0), negs[i] = ! negs[i];
4378
4379 if (GET_CODE (ops[i]) == CONST_INT && negs[i])
4380 ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0;
4381 changed = 1;
4382 }
4383 }
4384
4385 first = 0;
4386 }
4387
4388 /* Pack all the operands to the lower-numbered entries and give up if
4389 we didn't reduce the number of operands we had. Make sure we
4390 count a CONST as two operands. If we have the same number of
4391 operands, but have made more CONSTs than we had, this is also
4392 an improvement, so accept it. */
4393
4394 for (i = 0, j = 0; j < n_ops; j++)
4395 if (ops[j] != 0)
4396 {
4397 ops[i] = ops[j], negs[i++] = negs[j];
4398 if (GET_CODE (ops[j]) == CONST)
4399 n_consts++;
4400 }
4401
4402 if (i + n_consts > input_ops
4403 || (i + n_consts == input_ops && n_consts <= input_consts))
4404 return 0;
4405
4406 n_ops = i;
4407
4408 /* If we have a CONST_INT, put it last. */
4409 for (i = 0; i < n_ops - 1; i++)
4410 if (GET_CODE (ops[i]) == CONST_INT)
4411 {
4412 tem = ops[n_ops - 1], ops[n_ops - 1] = ops[i] , ops[i] = tem;
4413 j = negs[n_ops - 1], negs[n_ops - 1] = negs[i], negs[i] = j;
4414 }
4415
4416 /* Put a non-negated operand first. If there aren't any, make all
4417 operands positive and negate the whole thing later. */
4418 for (i = 0; i < n_ops && negs[i]; i++)
4419 ;
4420
4421 if (i == n_ops)
4422 {
4423 for (i = 0; i < n_ops; i++)
4424 negs[i] = 0;
4425 negate = 1;
4426 }
4427 else if (i != 0)
4428 {
4429 tem = ops[0], ops[0] = ops[i], ops[i] = tem;
4430 j = negs[0], negs[0] = negs[i], negs[i] = j;
4431 }
4432
4433 /* Now make the result by performing the requested operations. */
4434 result = ops[0];
4435 for (i = 1; i < n_ops; i++)
4436 result = cse_gen_binary (negs[i] ? MINUS : PLUS, mode, result, ops[i]);
4437
4438 return negate ? gen_rtx (NEG, mode, result) : result;
4439 }
4440 \f
4441 /* Make a binary operation by properly ordering the operands and
4442 seeing if the expression folds. */
4443
4444 static rtx
4445 cse_gen_binary (code, mode, op0, op1)
4446 enum rtx_code code;
4447 enum machine_mode mode;
4448 rtx op0, op1;
4449 {
4450 rtx tem;
4451
4452 /* Put complex operands first and constants second if commutative. */
4453 if (GET_RTX_CLASS (code) == 'c'
4454 && ((CONSTANT_P (op0) && GET_CODE (op1) != CONST_INT)
4455 || (GET_RTX_CLASS (GET_CODE (op0)) == 'o'
4456 && GET_RTX_CLASS (GET_CODE (op1)) != 'o')
4457 || (GET_CODE (op0) == SUBREG
4458 && GET_RTX_CLASS (GET_CODE (SUBREG_REG (op0))) == 'o'
4459 && GET_RTX_CLASS (GET_CODE (op1)) != 'o')))
4460 tem = op0, op0 = op1, op1 = tem;
4461
4462 /* If this simplifies, do it. */
4463 tem = simplify_binary_operation (code, mode, op0, op1);
4464
4465 if (tem)
4466 return tem;
4467
4468 /* Handle addition and subtraction of CONST_INT specially. Otherwise,
4469 just form the operation. */
4470
4471 if (code == PLUS && GET_CODE (op1) == CONST_INT
4472 && GET_MODE (op0) != VOIDmode)
4473 return plus_constant (op0, INTVAL (op1));
4474 else if (code == MINUS && GET_CODE (op1) == CONST_INT
4475 && GET_MODE (op0) != VOIDmode)
4476 return plus_constant (op0, - INTVAL (op1));
4477 else
4478 return gen_rtx (code, mode, op0, op1);
4479 }
4480 \f
4481 /* Like simplify_binary_operation except used for relational operators.
4482 MODE is the mode of the operands, not that of the result. If MODE
4483 is VOIDmode, both operands must also be VOIDmode and we compare the
4484 operands in "infinite precision".
4485
4486 If no simplification is possible, this function returns zero. Otherwise,
4487 it returns either const_true_rtx or const0_rtx. */
4488
4489 rtx
4490 simplify_relational_operation (code, mode, op0, op1)
4491 enum rtx_code code;
4492 enum machine_mode mode;
4493 rtx op0, op1;
4494 {
4495 int equal, op0lt, op0ltu, op1lt, op1ltu;
4496 rtx tem;
4497
4498 /* If op0 is a compare, extract the comparison arguments from it. */
4499 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4500 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
4501
4502 /* We can't simplify MODE_CC values since we don't know what the
4503 actual comparison is. */
4504 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC
4505 #ifdef HAVE_cc0
4506 || op0 == cc0_rtx
4507 #endif
4508 )
4509 return 0;
4510
4511 /* For integer comparisons of A and B maybe we can simplify A - B and can
4512 then simplify a comparison of that with zero. If A and B are both either
4513 a register or a CONST_INT, this can't help; testing for these cases will
4514 prevent infinite recursion here and speed things up.
4515
4516 If CODE is an unsigned comparison, then we can never do this optimization,
4517 because it gives an incorrect result if the subtraction wraps around zero.
4518 ANSI C defines unsigned operations such that they never overflow, and
4519 thus such cases can not be ignored. */
4520
4521 if (INTEGRAL_MODE_P (mode) && op1 != const0_rtx
4522 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == CONST_INT)
4523 && (GET_CODE (op1) == REG || GET_CODE (op1) == CONST_INT))
4524 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4525 && code != GTU && code != GEU && code != LTU && code != LEU)
4526 return simplify_relational_operation (signed_condition (code),
4527 mode, tem, const0_rtx);
4528
4529 /* For non-IEEE floating-point, if the two operands are equal, we know the
4530 result. */
4531 if (rtx_equal_p (op0, op1)
4532 && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
4533 || ! FLOAT_MODE_P (GET_MODE (op0)) || flag_fast_math))
4534 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
4535
4536 /* If the operands are floating-point constants, see if we can fold
4537 the result. */
4538 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
4539 else if (GET_CODE (op0) == CONST_DOUBLE && GET_CODE (op1) == CONST_DOUBLE
4540 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
4541 {
4542 REAL_VALUE_TYPE d0, d1;
4543 jmp_buf handler;
4544
4545 if (setjmp (handler))
4546 return 0;
4547
4548 set_float_handler (handler);
4549 REAL_VALUE_FROM_CONST_DOUBLE (d0, op0);
4550 REAL_VALUE_FROM_CONST_DOUBLE (d1, op1);
4551 equal = REAL_VALUES_EQUAL (d0, d1);
4552 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
4553 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
4554 set_float_handler (NULL_PTR);
4555 }
4556 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
4557
4558 /* Otherwise, see if the operands are both integers. */
4559 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4560 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
4561 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
4562 {
4563 int width = GET_MODE_BITSIZE (mode);
4564 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4565 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4566
4567 /* Get the two words comprising each integer constant. */
4568 if (GET_CODE (op0) == CONST_DOUBLE)
4569 {
4570 l0u = l0s = CONST_DOUBLE_LOW (op0);
4571 h0u = h0s = CONST_DOUBLE_HIGH (op0);
4572 }
4573 else
4574 {
4575 l0u = l0s = INTVAL (op0);
4576 h0u = 0, h0s = l0s < 0 ? -1 : 0;
4577 }
4578
4579 if (GET_CODE (op1) == CONST_DOUBLE)
4580 {
4581 l1u = l1s = CONST_DOUBLE_LOW (op1);
4582 h1u = h1s = CONST_DOUBLE_HIGH (op1);
4583 }
4584 else
4585 {
4586 l1u = l1s = INTVAL (op1);
4587 h1u = 0, h1s = l1s < 0 ? -1 : 0;
4588 }
4589
4590 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4591 we have to sign or zero-extend the values. */
4592 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4593 h0u = h1u = 0, h0s = l0s < 0 ? -1 : 0, h1s = l1s < 0 ? -1 : 0;
4594
4595 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4596 {
4597 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
4598 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
4599
4600 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
4601 l0s |= ((HOST_WIDE_INT) (-1) << width);
4602
4603 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
4604 l1s |= ((HOST_WIDE_INT) (-1) << width);
4605 }
4606
4607 equal = (h0u == h1u && l0u == l1u);
4608 op0lt = (h0s < h1s || (h0s == h1s && l0s < l1s));
4609 op1lt = (h1s < h0s || (h1s == h0s && l1s < l0s));
4610 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
4611 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
4612 }
4613
4614 /* Otherwise, there are some code-specific tests we can make. */
4615 else
4616 {
4617 switch (code)
4618 {
4619 case EQ:
4620 /* References to the frame plus a constant or labels cannot
4621 be zero, but a SYMBOL_REF can due to #pragma weak. */
4622 if (((NONZERO_BASE_PLUS_P (op0) && op1 == const0_rtx)
4623 || GET_CODE (op0) == LABEL_REF)
4624 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4625 /* On some machines, the ap reg can be 0 sometimes. */
4626 && op0 != arg_pointer_rtx
4627 #endif
4628 )
4629 return const0_rtx;
4630 break;
4631
4632 case NE:
4633 if (((NONZERO_BASE_PLUS_P (op0) && op1 == const0_rtx)
4634 || GET_CODE (op0) == LABEL_REF)
4635 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4636 && op0 != arg_pointer_rtx
4637 #endif
4638 )
4639 return const_true_rtx;
4640 break;
4641
4642 case GEU:
4643 /* Unsigned values are never negative. */
4644 if (op1 == const0_rtx)
4645 return const_true_rtx;
4646 break;
4647
4648 case LTU:
4649 if (op1 == const0_rtx)
4650 return const0_rtx;
4651 break;
4652
4653 case LEU:
4654 /* Unsigned values are never greater than the largest
4655 unsigned value. */
4656 if (GET_CODE (op1) == CONST_INT
4657 && INTVAL (op1) == GET_MODE_MASK (mode)
4658 && INTEGRAL_MODE_P (mode))
4659 return const_true_rtx;
4660 break;
4661
4662 case GTU:
4663 if (GET_CODE (op1) == CONST_INT
4664 && INTVAL (op1) == GET_MODE_MASK (mode)
4665 && INTEGRAL_MODE_P (mode))
4666 return const0_rtx;
4667 break;
4668 }
4669
4670 return 0;
4671 }
4672
4673 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
4674 as appropriate. */
4675 switch (code)
4676 {
4677 case EQ:
4678 return equal ? const_true_rtx : const0_rtx;
4679 case NE:
4680 return ! equal ? const_true_rtx : const0_rtx;
4681 case LT:
4682 return op0lt ? const_true_rtx : const0_rtx;
4683 case GT:
4684 return op1lt ? const_true_rtx : const0_rtx;
4685 case LTU:
4686 return op0ltu ? const_true_rtx : const0_rtx;
4687 case GTU:
4688 return op1ltu ? const_true_rtx : const0_rtx;
4689 case LE:
4690 return equal || op0lt ? const_true_rtx : const0_rtx;
4691 case GE:
4692 return equal || op1lt ? const_true_rtx : const0_rtx;
4693 case LEU:
4694 return equal || op0ltu ? const_true_rtx : const0_rtx;
4695 case GEU:
4696 return equal || op1ltu ? const_true_rtx : const0_rtx;
4697 }
4698
4699 abort ();
4700 }
4701 \f
4702 /* Simplify CODE, an operation with result mode MODE and three operands,
4703 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4704 a constant. Return 0 if no simplifications is possible. */
4705
4706 rtx
4707 simplify_ternary_operation (code, mode, op0_mode, op0, op1, op2)
4708 enum rtx_code code;
4709 enum machine_mode mode, op0_mode;
4710 rtx op0, op1, op2;
4711 {
4712 int width = GET_MODE_BITSIZE (mode);
4713
4714 /* VOIDmode means "infinite" precision. */
4715 if (width == 0)
4716 width = HOST_BITS_PER_WIDE_INT;
4717
4718 switch (code)
4719 {
4720 case SIGN_EXTRACT:
4721 case ZERO_EXTRACT:
4722 if (GET_CODE (op0) == CONST_INT
4723 && GET_CODE (op1) == CONST_INT
4724 && GET_CODE (op2) == CONST_INT
4725 && INTVAL (op1) + INTVAL (op2) <= GET_MODE_BITSIZE (op0_mode)
4726 && width <= HOST_BITS_PER_WIDE_INT)
4727 {
4728 /* Extracting a bit-field from a constant */
4729 HOST_WIDE_INT val = INTVAL (op0);
4730
4731 if (BITS_BIG_ENDIAN)
4732 val >>= (GET_MODE_BITSIZE (op0_mode)
4733 - INTVAL (op2) - INTVAL (op1));
4734 else
4735 val >>= INTVAL (op2);
4736
4737 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4738 {
4739 /* First zero-extend. */
4740 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4741 /* If desired, propagate sign bit. */
4742 if (code == SIGN_EXTRACT
4743 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4744 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4745 }
4746
4747 /* Clear the bits that don't belong in our mode,
4748 unless they and our sign bit are all one.
4749 So we get either a reasonable negative value or a reasonable
4750 unsigned value for this mode. */
4751 if (width < HOST_BITS_PER_WIDE_INT
4752 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4753 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4754 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4755
4756 return GEN_INT (val);
4757 }
4758 break;
4759
4760 case IF_THEN_ELSE:
4761 if (GET_CODE (op0) == CONST_INT)
4762 return op0 != const0_rtx ? op1 : op2;
4763 break;
4764
4765 default:
4766 abort ();
4767 }
4768
4769 return 0;
4770 }
4771 \f
4772 /* If X is a nontrivial arithmetic operation on an argument
4773 for which a constant value can be determined, return
4774 the result of operating on that value, as a constant.
4775 Otherwise, return X, possibly with one or more operands
4776 modified by recursive calls to this function.
4777
4778 If X is a register whose contents are known, we do NOT
4779 return those contents here. equiv_constant is called to
4780 perform that task.
4781
4782 INSN is the insn that we may be modifying. If it is 0, make a copy
4783 of X before modifying it. */
4784
4785 static rtx
4786 fold_rtx (x, insn)
4787 rtx x;
4788 rtx insn;
4789 {
4790 register enum rtx_code code;
4791 register enum machine_mode mode;
4792 register char *fmt;
4793 register int i;
4794 rtx new = 0;
4795 int copied = 0;
4796 int must_swap = 0;
4797
4798 /* Folded equivalents of first two operands of X. */
4799 rtx folded_arg0;
4800 rtx folded_arg1;
4801
4802 /* Constant equivalents of first three operands of X;
4803 0 when no such equivalent is known. */
4804 rtx const_arg0;
4805 rtx const_arg1;
4806 rtx const_arg2;
4807
4808 /* The mode of the first operand of X. We need this for sign and zero
4809 extends. */
4810 enum machine_mode mode_arg0;
4811
4812 if (x == 0)
4813 return x;
4814
4815 mode = GET_MODE (x);
4816 code = GET_CODE (x);
4817 switch (code)
4818 {
4819 case CONST:
4820 case CONST_INT:
4821 case CONST_DOUBLE:
4822 case SYMBOL_REF:
4823 case LABEL_REF:
4824 case REG:
4825 /* No use simplifying an EXPR_LIST
4826 since they are used only for lists of args
4827 in a function call's REG_EQUAL note. */
4828 case EXPR_LIST:
4829 return x;
4830
4831 #ifdef HAVE_cc0
4832 case CC0:
4833 return prev_insn_cc0;
4834 #endif
4835
4836 case PC:
4837 /* If the next insn is a CODE_LABEL followed by a jump table,
4838 PC's value is a LABEL_REF pointing to that label. That
4839 lets us fold switch statements on the Vax. */
4840 if (insn && GET_CODE (insn) == JUMP_INSN)
4841 {
4842 rtx next = next_nonnote_insn (insn);
4843
4844 if (next && GET_CODE (next) == CODE_LABEL
4845 && NEXT_INSN (next) != 0
4846 && GET_CODE (NEXT_INSN (next)) == JUMP_INSN
4847 && (GET_CODE (PATTERN (NEXT_INSN (next))) == ADDR_VEC
4848 || GET_CODE (PATTERN (NEXT_INSN (next))) == ADDR_DIFF_VEC))
4849 return gen_rtx (LABEL_REF, Pmode, next);
4850 }
4851 break;
4852
4853 case SUBREG:
4854 /* See if we previously assigned a constant value to this SUBREG. */
4855 if ((new = lookup_as_function (x, CONST_INT)) != 0
4856 || (new = lookup_as_function (x, CONST_DOUBLE)) != 0)
4857 return new;
4858
4859 /* If this is a paradoxical SUBREG, we have no idea what value the
4860 extra bits would have. However, if the operand is equivalent
4861 to a SUBREG whose operand is the same as our mode, and all the
4862 modes are within a word, we can just use the inner operand
4863 because these SUBREGs just say how to treat the register.
4864
4865 Similarly if we find an integer constant. */
4866
4867 if (GET_MODE_SIZE (mode) > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
4868 {
4869 enum machine_mode imode = GET_MODE (SUBREG_REG (x));
4870 struct table_elt *elt;
4871
4872 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
4873 && GET_MODE_SIZE (imode) <= UNITS_PER_WORD
4874 && (elt = lookup (SUBREG_REG (x), HASH (SUBREG_REG (x), imode),
4875 imode)) != 0)
4876 for (elt = elt->first_same_value;
4877 elt; elt = elt->next_same_value)
4878 {
4879 if (CONSTANT_P (elt->exp)
4880 && GET_MODE (elt->exp) == VOIDmode)
4881 return elt->exp;
4882
4883 if (GET_CODE (elt->exp) == SUBREG
4884 && GET_MODE (SUBREG_REG (elt->exp)) == mode
4885 && exp_equiv_p (elt->exp, elt->exp, 1, 0))
4886 return copy_rtx (SUBREG_REG (elt->exp));
4887 }
4888
4889 return x;
4890 }
4891
4892 /* Fold SUBREG_REG. If it changed, see if we can simplify the SUBREG.
4893 We might be able to if the SUBREG is extracting a single word in an
4894 integral mode or extracting the low part. */
4895
4896 folded_arg0 = fold_rtx (SUBREG_REG (x), insn);
4897 const_arg0 = equiv_constant (folded_arg0);
4898 if (const_arg0)
4899 folded_arg0 = const_arg0;
4900
4901 if (folded_arg0 != SUBREG_REG (x))
4902 {
4903 new = 0;
4904
4905 if (GET_MODE_CLASS (mode) == MODE_INT
4906 && GET_MODE_SIZE (mode) == UNITS_PER_WORD
4907 && GET_MODE (SUBREG_REG (x)) != VOIDmode)
4908 new = operand_subword (folded_arg0, SUBREG_WORD (x), 0,
4909 GET_MODE (SUBREG_REG (x)));
4910 if (new == 0 && subreg_lowpart_p (x))
4911 new = gen_lowpart_if_possible (mode, folded_arg0);
4912 if (new)
4913 return new;
4914 }
4915
4916 /* If this is a narrowing SUBREG and our operand is a REG, see if
4917 we can find an equivalence for REG that is an arithmetic operation
4918 in a wider mode where both operands are paradoxical SUBREGs
4919 from objects of our result mode. In that case, we couldn't report
4920 an equivalent value for that operation, since we don't know what the
4921 extra bits will be. But we can find an equivalence for this SUBREG
4922 by folding that operation is the narrow mode. This allows us to
4923 fold arithmetic in narrow modes when the machine only supports
4924 word-sized arithmetic.
4925
4926 Also look for a case where we have a SUBREG whose operand is the
4927 same as our result. If both modes are smaller than a word, we
4928 are simply interpreting a register in different modes and we
4929 can use the inner value. */
4930
4931 if (GET_CODE (folded_arg0) == REG
4932 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (folded_arg0))
4933 && subreg_lowpart_p (x))
4934 {
4935 struct table_elt *elt;
4936
4937 /* We can use HASH here since we know that canon_hash won't be
4938 called. */
4939 elt = lookup (folded_arg0,
4940 HASH (folded_arg0, GET_MODE (folded_arg0)),
4941 GET_MODE (folded_arg0));
4942
4943 if (elt)
4944 elt = elt->first_same_value;
4945
4946 for (; elt; elt = elt->next_same_value)
4947 {
4948 enum rtx_code eltcode = GET_CODE (elt->exp);
4949
4950 /* Just check for unary and binary operations. */
4951 if (GET_RTX_CLASS (GET_CODE (elt->exp)) == '1'
4952 && GET_CODE (elt->exp) != SIGN_EXTEND
4953 && GET_CODE (elt->exp) != ZERO_EXTEND
4954 && GET_CODE (XEXP (elt->exp, 0)) == SUBREG
4955 && GET_MODE (SUBREG_REG (XEXP (elt->exp, 0))) == mode)
4956 {
4957 rtx op0 = SUBREG_REG (XEXP (elt->exp, 0));
4958
4959 if (GET_CODE (op0) != REG && ! CONSTANT_P (op0))
4960 op0 = fold_rtx (op0, NULL_RTX);
4961
4962 op0 = equiv_constant (op0);
4963 if (op0)
4964 new = simplify_unary_operation (GET_CODE (elt->exp), mode,
4965 op0, mode);
4966 }
4967 else if ((GET_RTX_CLASS (GET_CODE (elt->exp)) == '2'
4968 || GET_RTX_CLASS (GET_CODE (elt->exp)) == 'c')
4969 && eltcode != DIV && eltcode != MOD
4970 && eltcode != UDIV && eltcode != UMOD
4971 && eltcode != ASHIFTRT && eltcode != LSHIFTRT
4972 && eltcode != ROTATE && eltcode != ROTATERT
4973 && ((GET_CODE (XEXP (elt->exp, 0)) == SUBREG
4974 && (GET_MODE (SUBREG_REG (XEXP (elt->exp, 0)))
4975 == mode))
4976 || CONSTANT_P (XEXP (elt->exp, 0)))
4977 && ((GET_CODE (XEXP (elt->exp, 1)) == SUBREG
4978 && (GET_MODE (SUBREG_REG (XEXP (elt->exp, 1)))
4979 == mode))
4980 || CONSTANT_P (XEXP (elt->exp, 1))))
4981 {
4982 rtx op0 = gen_lowpart_common (mode, XEXP (elt->exp, 0));
4983 rtx op1 = gen_lowpart_common (mode, XEXP (elt->exp, 1));
4984
4985 if (op0 && GET_CODE (op0) != REG && ! CONSTANT_P (op0))
4986 op0 = fold_rtx (op0, NULL_RTX);
4987
4988 if (op0)
4989 op0 = equiv_constant (op0);
4990
4991 if (op1 && GET_CODE (op1) != REG && ! CONSTANT_P (op1))
4992 op1 = fold_rtx (op1, NULL_RTX);
4993
4994 if (op1)
4995 op1 = equiv_constant (op1);
4996
4997 /* If we are looking for the low SImode part of
4998 (ashift:DI c (const_int 32)), it doesn't work
4999 to compute that in SImode, because a 32-bit shift
5000 in SImode is unpredictable. We know the value is 0. */
5001 if (op0 && op1
5002 && GET_CODE (elt->exp) == ASHIFT
5003 && GET_CODE (op1) == CONST_INT
5004 && INTVAL (op1) >= GET_MODE_BITSIZE (mode))
5005 {
5006 if (INTVAL (op1) < GET_MODE_BITSIZE (GET_MODE (elt->exp)))
5007
5008 /* If the count fits in the inner mode's width,
5009 but exceeds the outer mode's width,
5010 the value will get truncated to 0
5011 by the subreg. */
5012 new = const0_rtx;
5013 else
5014 /* If the count exceeds even the inner mode's width,
5015 don't fold this expression. */
5016 new = 0;
5017 }
5018 else if (op0 && op1)
5019 new = simplify_binary_operation (GET_CODE (elt->exp), mode,
5020 op0, op1);
5021 }
5022
5023 else if (GET_CODE (elt->exp) == SUBREG
5024 && GET_MODE (SUBREG_REG (elt->exp)) == mode
5025 && (GET_MODE_SIZE (GET_MODE (folded_arg0))
5026 <= UNITS_PER_WORD)
5027 && exp_equiv_p (elt->exp, elt->exp, 1, 0))
5028 new = copy_rtx (SUBREG_REG (elt->exp));
5029
5030 if (new)
5031 return new;
5032 }
5033 }
5034
5035 return x;
5036
5037 case NOT:
5038 case NEG:
5039 /* If we have (NOT Y), see if Y is known to be (NOT Z).
5040 If so, (NOT Y) simplifies to Z. Similarly for NEG. */
5041 new = lookup_as_function (XEXP (x, 0), code);
5042 if (new)
5043 return fold_rtx (copy_rtx (XEXP (new, 0)), insn);
5044 break;
5045
5046 case MEM:
5047 /* If we are not actually processing an insn, don't try to find the
5048 best address. Not only don't we care, but we could modify the
5049 MEM in an invalid way since we have no insn to validate against. */
5050 if (insn != 0)
5051 find_best_addr (insn, &XEXP (x, 0));
5052
5053 {
5054 /* Even if we don't fold in the insn itself,
5055 we can safely do so here, in hopes of getting a constant. */
5056 rtx addr = fold_rtx (XEXP (x, 0), NULL_RTX);
5057 rtx base = 0;
5058 HOST_WIDE_INT offset = 0;
5059
5060 if (GET_CODE (addr) == REG
5061 && REGNO_QTY_VALID_P (REGNO (addr))
5062 && GET_MODE (addr) == qty_mode[reg_qty[REGNO (addr)]]
5063 && qty_const[reg_qty[REGNO (addr)]] != 0)
5064 addr = qty_const[reg_qty[REGNO (addr)]];
5065
5066 /* If address is constant, split it into a base and integer offset. */
5067 if (GET_CODE (addr) == SYMBOL_REF || GET_CODE (addr) == LABEL_REF)
5068 base = addr;
5069 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
5070 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
5071 {
5072 base = XEXP (XEXP (addr, 0), 0);
5073 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
5074 }
5075 else if (GET_CODE (addr) == LO_SUM
5076 && GET_CODE (XEXP (addr, 1)) == SYMBOL_REF)
5077 base = XEXP (addr, 1);
5078
5079 /* If this is a constant pool reference, we can fold it into its
5080 constant to allow better value tracking. */
5081 if (base && GET_CODE (base) == SYMBOL_REF
5082 && CONSTANT_POOL_ADDRESS_P (base))
5083 {
5084 rtx constant = get_pool_constant (base);
5085 enum machine_mode const_mode = get_pool_mode (base);
5086 rtx new;
5087
5088 if (CONSTANT_P (constant) && GET_CODE (constant) != CONST_INT)
5089 constant_pool_entries_cost = COST (constant);
5090
5091 /* If we are loading the full constant, we have an equivalence. */
5092 if (offset == 0 && mode == const_mode)
5093 return constant;
5094
5095 /* If this actually isn't a constant (weird!), we can't do
5096 anything. Otherwise, handle the two most common cases:
5097 extracting a word from a multi-word constant, and extracting
5098 the low-order bits. Other cases don't seem common enough to
5099 worry about. */
5100 if (! CONSTANT_P (constant))
5101 return x;
5102
5103 if (GET_MODE_CLASS (mode) == MODE_INT
5104 && GET_MODE_SIZE (mode) == UNITS_PER_WORD
5105 && offset % UNITS_PER_WORD == 0
5106 && (new = operand_subword (constant,
5107 offset / UNITS_PER_WORD,
5108 0, const_mode)) != 0)
5109 return new;
5110
5111 if (((BYTES_BIG_ENDIAN
5112 && offset == GET_MODE_SIZE (GET_MODE (constant)) - 1)
5113 || (! BYTES_BIG_ENDIAN && offset == 0))
5114 && (new = gen_lowpart_if_possible (mode, constant)) != 0)
5115 return new;
5116 }
5117
5118 /* If this is a reference to a label at a known position in a jump
5119 table, we also know its value. */
5120 if (base && GET_CODE (base) == LABEL_REF)
5121 {
5122 rtx label = XEXP (base, 0);
5123 rtx table_insn = NEXT_INSN (label);
5124
5125 if (table_insn && GET_CODE (table_insn) == JUMP_INSN
5126 && GET_CODE (PATTERN (table_insn)) == ADDR_VEC)
5127 {
5128 rtx table = PATTERN (table_insn);
5129
5130 if (offset >= 0
5131 && (offset / GET_MODE_SIZE (GET_MODE (table))
5132 < XVECLEN (table, 0)))
5133 return XVECEXP (table, 0,
5134 offset / GET_MODE_SIZE (GET_MODE (table)));
5135 }
5136 if (table_insn && GET_CODE (table_insn) == JUMP_INSN
5137 && GET_CODE (PATTERN (table_insn)) == ADDR_DIFF_VEC)
5138 {
5139 rtx table = PATTERN (table_insn);
5140
5141 if (offset >= 0
5142 && (offset / GET_MODE_SIZE (GET_MODE (table))
5143 < XVECLEN (table, 1)))
5144 {
5145 offset /= GET_MODE_SIZE (GET_MODE (table));
5146 new = gen_rtx (MINUS, Pmode, XVECEXP (table, 1, offset),
5147 XEXP (table, 0));
5148
5149 if (GET_MODE (table) != Pmode)
5150 new = gen_rtx (TRUNCATE, GET_MODE (table), new);
5151
5152 /* Indicate this is a constant. This isn't a
5153 valid form of CONST, but it will only be used
5154 to fold the next insns and then discarded, so
5155 it should be safe. */
5156 return gen_rtx (CONST, GET_MODE (new), new);
5157 }
5158 }
5159 }
5160
5161 return x;
5162 }
5163 }
5164
5165 const_arg0 = 0;
5166 const_arg1 = 0;
5167 const_arg2 = 0;
5168 mode_arg0 = VOIDmode;
5169
5170 /* Try folding our operands.
5171 Then see which ones have constant values known. */
5172
5173 fmt = GET_RTX_FORMAT (code);
5174 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5175 if (fmt[i] == 'e')
5176 {
5177 rtx arg = XEXP (x, i);
5178 rtx folded_arg = arg, const_arg = 0;
5179 enum machine_mode mode_arg = GET_MODE (arg);
5180 rtx cheap_arg, expensive_arg;
5181 rtx replacements[2];
5182 int j;
5183
5184 /* Most arguments are cheap, so handle them specially. */
5185 switch (GET_CODE (arg))
5186 {
5187 case REG:
5188 /* This is the same as calling equiv_constant; it is duplicated
5189 here for speed. */
5190 if (REGNO_QTY_VALID_P (REGNO (arg))
5191 && qty_const[reg_qty[REGNO (arg)]] != 0
5192 && GET_CODE (qty_const[reg_qty[REGNO (arg)]]) != REG
5193 && GET_CODE (qty_const[reg_qty[REGNO (arg)]]) != PLUS)
5194 const_arg
5195 = gen_lowpart_if_possible (GET_MODE (arg),
5196 qty_const[reg_qty[REGNO (arg)]]);
5197 break;
5198
5199 case CONST:
5200 case CONST_INT:
5201 case SYMBOL_REF:
5202 case LABEL_REF:
5203 case CONST_DOUBLE:
5204 const_arg = arg;
5205 break;
5206
5207 #ifdef HAVE_cc0
5208 case CC0:
5209 folded_arg = prev_insn_cc0;
5210 mode_arg = prev_insn_cc0_mode;
5211 const_arg = equiv_constant (folded_arg);
5212 break;
5213 #endif
5214
5215 default:
5216 folded_arg = fold_rtx (arg, insn);
5217 const_arg = equiv_constant (folded_arg);
5218 }
5219
5220 /* For the first three operands, see if the operand
5221 is constant or equivalent to a constant. */
5222 switch (i)
5223 {
5224 case 0:
5225 folded_arg0 = folded_arg;
5226 const_arg0 = const_arg;
5227 mode_arg0 = mode_arg;
5228 break;
5229 case 1:
5230 folded_arg1 = folded_arg;
5231 const_arg1 = const_arg;
5232 break;
5233 case 2:
5234 const_arg2 = const_arg;
5235 break;
5236 }
5237
5238 /* Pick the least expensive of the folded argument and an
5239 equivalent constant argument. */
5240 if (const_arg == 0 || const_arg == folded_arg
5241 || COST (const_arg) > COST (folded_arg))
5242 cheap_arg = folded_arg, expensive_arg = const_arg;
5243 else
5244 cheap_arg = const_arg, expensive_arg = folded_arg;
5245
5246 /* Try to replace the operand with the cheapest of the two
5247 possibilities. If it doesn't work and this is either of the first
5248 two operands of a commutative operation, try swapping them.
5249 If THAT fails, try the more expensive, provided it is cheaper
5250 than what is already there. */
5251
5252 if (cheap_arg == XEXP (x, i))
5253 continue;
5254
5255 if (insn == 0 && ! copied)
5256 {
5257 x = copy_rtx (x);
5258 copied = 1;
5259 }
5260
5261 replacements[0] = cheap_arg, replacements[1] = expensive_arg;
5262 for (j = 0;
5263 j < 2 && replacements[j]
5264 && COST (replacements[j]) < COST (XEXP (x, i));
5265 j++)
5266 {
5267 if (validate_change (insn, &XEXP (x, i), replacements[j], 0))
5268 break;
5269
5270 if (code == NE || code == EQ || GET_RTX_CLASS (code) == 'c')
5271 {
5272 validate_change (insn, &XEXP (x, i), XEXP (x, 1 - i), 1);
5273 validate_change (insn, &XEXP (x, 1 - i), replacements[j], 1);
5274
5275 if (apply_change_group ())
5276 {
5277 /* Swap them back to be invalid so that this loop can
5278 continue and flag them to be swapped back later. */
5279 rtx tem;
5280
5281 tem = XEXP (x, 0); XEXP (x, 0) = XEXP (x, 1);
5282 XEXP (x, 1) = tem;
5283 must_swap = 1;
5284 break;
5285 }
5286 }
5287 }
5288 }
5289
5290 else if (fmt[i] == 'E')
5291 /* Don't try to fold inside of a vector of expressions.
5292 Doing nothing is harmless. */
5293 ;
5294
5295 /* If a commutative operation, place a constant integer as the second
5296 operand unless the first operand is also a constant integer. Otherwise,
5297 place any constant second unless the first operand is also a constant. */
5298
5299 if (code == EQ || code == NE || GET_RTX_CLASS (code) == 'c')
5300 {
5301 if (must_swap || (const_arg0
5302 && (const_arg1 == 0
5303 || (GET_CODE (const_arg0) == CONST_INT
5304 && GET_CODE (const_arg1) != CONST_INT))))
5305 {
5306 register rtx tem = XEXP (x, 0);
5307
5308 if (insn == 0 && ! copied)
5309 {
5310 x = copy_rtx (x);
5311 copied = 1;
5312 }
5313
5314 validate_change (insn, &XEXP (x, 0), XEXP (x, 1), 1);
5315 validate_change (insn, &XEXP (x, 1), tem, 1);
5316 if (apply_change_group ())
5317 {
5318 tem = const_arg0, const_arg0 = const_arg1, const_arg1 = tem;
5319 tem = folded_arg0, folded_arg0 = folded_arg1, folded_arg1 = tem;
5320 }
5321 }
5322 }
5323
5324 /* If X is an arithmetic operation, see if we can simplify it. */
5325
5326 switch (GET_RTX_CLASS (code))
5327 {
5328 case '1':
5329 {
5330 int is_const = 0;
5331
5332 /* We can't simplify extension ops unless we know the
5333 original mode. */
5334 if ((code == ZERO_EXTEND || code == SIGN_EXTEND)
5335 && mode_arg0 == VOIDmode)
5336 break;
5337
5338 /* If we had a CONST, strip it off and put it back later if we
5339 fold. */
5340 if (const_arg0 != 0 && GET_CODE (const_arg0) == CONST)
5341 is_const = 1, const_arg0 = XEXP (const_arg0, 0);
5342
5343 new = simplify_unary_operation (code, mode,
5344 const_arg0 ? const_arg0 : folded_arg0,
5345 mode_arg0);
5346 if (new != 0 && is_const)
5347 new = gen_rtx (CONST, mode, new);
5348 }
5349 break;
5350
5351 case '<':
5352 /* See what items are actually being compared and set FOLDED_ARG[01]
5353 to those values and CODE to the actual comparison code. If any are
5354 constant, set CONST_ARG0 and CONST_ARG1 appropriately. We needn't
5355 do anything if both operands are already known to be constant. */
5356
5357 if (const_arg0 == 0 || const_arg1 == 0)
5358 {
5359 struct table_elt *p0, *p1;
5360 rtx true = const_true_rtx, false = const0_rtx;
5361 enum machine_mode mode_arg1;
5362
5363 #ifdef FLOAT_STORE_FLAG_VALUE
5364 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5365 {
5366 true = CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE,
5367 mode);
5368 false = CONST0_RTX (mode);
5369 }
5370 #endif
5371
5372 code = find_comparison_args (code, &folded_arg0, &folded_arg1,
5373 &mode_arg0, &mode_arg1);
5374 const_arg0 = equiv_constant (folded_arg0);
5375 const_arg1 = equiv_constant (folded_arg1);
5376
5377 /* If the mode is VOIDmode or a MODE_CC mode, we don't know
5378 what kinds of things are being compared, so we can't do
5379 anything with this comparison. */
5380
5381 if (mode_arg0 == VOIDmode || GET_MODE_CLASS (mode_arg0) == MODE_CC)
5382 break;
5383
5384 /* If we do not now have two constants being compared, see
5385 if we can nevertheless deduce some things about the
5386 comparison. */
5387 if (const_arg0 == 0 || const_arg1 == 0)
5388 {
5389 /* Is FOLDED_ARG0 frame-pointer plus a constant? Or
5390 non-explicit constant? These aren't zero, but we
5391 don't know their sign. */
5392 if (const_arg1 == const0_rtx
5393 && (NONZERO_BASE_PLUS_P (folded_arg0)
5394 #if 0 /* Sad to say, on sysvr4, #pragma weak can make a symbol address
5395 come out as 0. */
5396 || GET_CODE (folded_arg0) == SYMBOL_REF
5397 #endif
5398 || GET_CODE (folded_arg0) == LABEL_REF
5399 || GET_CODE (folded_arg0) == CONST))
5400 {
5401 if (code == EQ)
5402 return false;
5403 else if (code == NE)
5404 return true;
5405 }
5406
5407 /* See if the two operands are the same. We don't do this
5408 for IEEE floating-point since we can't assume x == x
5409 since x might be a NaN. */
5410
5411 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
5412 || ! FLOAT_MODE_P (mode_arg0) || flag_fast_math)
5413 && (folded_arg0 == folded_arg1
5414 || (GET_CODE (folded_arg0) == REG
5415 && GET_CODE (folded_arg1) == REG
5416 && (reg_qty[REGNO (folded_arg0)]
5417 == reg_qty[REGNO (folded_arg1)]))
5418 || ((p0 = lookup (folded_arg0,
5419 (safe_hash (folded_arg0, mode_arg0)
5420 % NBUCKETS), mode_arg0))
5421 && (p1 = lookup (folded_arg1,
5422 (safe_hash (folded_arg1, mode_arg0)
5423 % NBUCKETS), mode_arg0))
5424 && p0->first_same_value == p1->first_same_value)))
5425 return ((code == EQ || code == LE || code == GE
5426 || code == LEU || code == GEU)
5427 ? true : false);
5428
5429 /* If FOLDED_ARG0 is a register, see if the comparison we are
5430 doing now is either the same as we did before or the reverse
5431 (we only check the reverse if not floating-point). */
5432 else if (GET_CODE (folded_arg0) == REG)
5433 {
5434 int qty = reg_qty[REGNO (folded_arg0)];
5435
5436 if (REGNO_QTY_VALID_P (REGNO (folded_arg0))
5437 && (comparison_dominates_p (qty_comparison_code[qty], code)
5438 || (comparison_dominates_p (qty_comparison_code[qty],
5439 reverse_condition (code))
5440 && ! FLOAT_MODE_P (mode_arg0)))
5441 && (rtx_equal_p (qty_comparison_const[qty], folded_arg1)
5442 || (const_arg1
5443 && rtx_equal_p (qty_comparison_const[qty],
5444 const_arg1))
5445 || (GET_CODE (folded_arg1) == REG
5446 && (reg_qty[REGNO (folded_arg1)]
5447 == qty_comparison_qty[qty]))))
5448 return (comparison_dominates_p (qty_comparison_code[qty],
5449 code)
5450 ? true : false);
5451 }
5452 }
5453 }
5454
5455 /* If we are comparing against zero, see if the first operand is
5456 equivalent to an IOR with a constant. If so, we may be able to
5457 determine the result of this comparison. */
5458
5459 if (const_arg1 == const0_rtx)
5460 {
5461 rtx y = lookup_as_function (folded_arg0, IOR);
5462 rtx inner_const;
5463
5464 if (y != 0
5465 && (inner_const = equiv_constant (XEXP (y, 1))) != 0
5466 && GET_CODE (inner_const) == CONST_INT
5467 && INTVAL (inner_const) != 0)
5468 {
5469 int sign_bitnum = GET_MODE_BITSIZE (mode_arg0) - 1;
5470 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5471 && (INTVAL (inner_const)
5472 & ((HOST_WIDE_INT) 1 << sign_bitnum)));
5473 rtx true = const_true_rtx, false = const0_rtx;
5474
5475 #ifdef FLOAT_STORE_FLAG_VALUE
5476 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5477 {
5478 true = CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE,
5479 mode);
5480 false = CONST0_RTX (mode);
5481 }
5482 #endif
5483
5484 switch (code)
5485 {
5486 case EQ:
5487 return false;
5488 case NE:
5489 return true;
5490 case LT: case LE:
5491 if (has_sign)
5492 return true;
5493 break;
5494 case GT: case GE:
5495 if (has_sign)
5496 return false;
5497 break;
5498 }
5499 }
5500 }
5501
5502 new = simplify_relational_operation (code, mode_arg0,
5503 const_arg0 ? const_arg0 : folded_arg0,
5504 const_arg1 ? const_arg1 : folded_arg1);
5505 #ifdef FLOAT_STORE_FLAG_VALUE
5506 if (new != 0 && GET_MODE_CLASS (mode) == MODE_FLOAT)
5507 new = ((new == const0_rtx) ? CONST0_RTX (mode)
5508 : CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE, mode));
5509 #endif
5510 break;
5511
5512 case '2':
5513 case 'c':
5514 switch (code)
5515 {
5516 case PLUS:
5517 /* If the second operand is a LABEL_REF, see if the first is a MINUS
5518 with that LABEL_REF as its second operand. If so, the result is
5519 the first operand of that MINUS. This handles switches with an
5520 ADDR_DIFF_VEC table. */
5521 if (const_arg1 && GET_CODE (const_arg1) == LABEL_REF)
5522 {
5523 rtx y
5524 = GET_CODE (folded_arg0) == MINUS ? folded_arg0
5525 : lookup_as_function (folded_arg0, MINUS);
5526
5527 if (y != 0 && GET_CODE (XEXP (y, 1)) == LABEL_REF
5528 && XEXP (XEXP (y, 1), 0) == XEXP (const_arg1, 0))
5529 return XEXP (y, 0);
5530
5531 /* Now try for a CONST of a MINUS like the above. */
5532 if ((y = (GET_CODE (folded_arg0) == CONST ? folded_arg0
5533 : lookup_as_function (folded_arg0, CONST))) != 0
5534 && GET_CODE (XEXP (y, 0)) == MINUS
5535 && GET_CODE (XEXP (XEXP (y, 0), 1)) == LABEL_REF
5536 && XEXP (XEXP (XEXP (y, 0),1), 0) == XEXP (const_arg1, 0))
5537 return XEXP (XEXP (y, 0), 0);
5538 }
5539
5540 /* Likewise if the operands are in the other order. */
5541 if (const_arg0 && GET_CODE (const_arg0) == LABEL_REF)
5542 {
5543 rtx y
5544 = GET_CODE (folded_arg1) == MINUS ? folded_arg1
5545 : lookup_as_function (folded_arg1, MINUS);
5546
5547 if (y != 0 && GET_CODE (XEXP (y, 1)) == LABEL_REF
5548 && XEXP (XEXP (y, 1), 0) == XEXP (const_arg0, 0))
5549 return XEXP (y, 0);
5550
5551 /* Now try for a CONST of a MINUS like the above. */
5552 if ((y = (GET_CODE (folded_arg1) == CONST ? folded_arg1
5553 : lookup_as_function (folded_arg1, CONST))) != 0
5554 && GET_CODE (XEXP (y, 0)) == MINUS
5555 && GET_CODE (XEXP (XEXP (y, 0), 1)) == LABEL_REF
5556 && XEXP (XEXP (XEXP (y, 0),1), 0) == XEXP (const_arg0, 0))
5557 return XEXP (XEXP (y, 0), 0);
5558 }
5559
5560 /* If second operand is a register equivalent to a negative
5561 CONST_INT, see if we can find a register equivalent to the
5562 positive constant. Make a MINUS if so. Don't do this for
5563 a negative constant since we might then alternate between
5564 chosing positive and negative constants. Having the positive
5565 constant previously-used is the more common case. */
5566 if (const_arg1 && GET_CODE (const_arg1) == CONST_INT
5567 && INTVAL (const_arg1) < 0 && GET_CODE (folded_arg1) == REG)
5568 {
5569 rtx new_const = GEN_INT (- INTVAL (const_arg1));
5570 struct table_elt *p
5571 = lookup (new_const, safe_hash (new_const, mode) % NBUCKETS,
5572 mode);
5573
5574 if (p)
5575 for (p = p->first_same_value; p; p = p->next_same_value)
5576 if (GET_CODE (p->exp) == REG)
5577 return cse_gen_binary (MINUS, mode, folded_arg0,
5578 canon_reg (p->exp, NULL_RTX));
5579 }
5580 goto from_plus;
5581
5582 case MINUS:
5583 /* If we have (MINUS Y C), see if Y is known to be (PLUS Z C2).
5584 If so, produce (PLUS Z C2-C). */
5585 if (const_arg1 != 0 && GET_CODE (const_arg1) == CONST_INT)
5586 {
5587 rtx y = lookup_as_function (XEXP (x, 0), PLUS);
5588 if (y && GET_CODE (XEXP (y, 1)) == CONST_INT)
5589 return fold_rtx (plus_constant (copy_rtx (y),
5590 -INTVAL (const_arg1)),
5591 NULL_RTX);
5592 }
5593
5594 /* ... fall through ... */
5595
5596 from_plus:
5597 case SMIN: case SMAX: case UMIN: case UMAX:
5598 case IOR: case AND: case XOR:
5599 case MULT: case DIV: case UDIV:
5600 case ASHIFT: case LSHIFTRT: case ASHIFTRT:
5601 /* If we have (<op> <reg> <const_int>) for an associative OP and REG
5602 is known to be of similar form, we may be able to replace the
5603 operation with a combined operation. This may eliminate the
5604 intermediate operation if every use is simplified in this way.
5605 Note that the similar optimization done by combine.c only works
5606 if the intermediate operation's result has only one reference. */
5607
5608 if (GET_CODE (folded_arg0) == REG
5609 && const_arg1 && GET_CODE (const_arg1) == CONST_INT)
5610 {
5611 int is_shift
5612 = (code == ASHIFT || code == ASHIFTRT || code == LSHIFTRT);
5613 rtx y = lookup_as_function (folded_arg0, code);
5614 rtx inner_const;
5615 enum rtx_code associate_code;
5616 rtx new_const;
5617
5618 if (y == 0
5619 || 0 == (inner_const
5620 = equiv_constant (fold_rtx (XEXP (y, 1), 0)))
5621 || GET_CODE (inner_const) != CONST_INT
5622 /* If we have compiled a statement like
5623 "if (x == (x & mask1))", and now are looking at
5624 "x & mask2", we will have a case where the first operand
5625 of Y is the same as our first operand. Unless we detect
5626 this case, an infinite loop will result. */
5627 || XEXP (y, 0) == folded_arg0)
5628 break;
5629
5630 /* Don't associate these operations if they are a PLUS with the
5631 same constant and it is a power of two. These might be doable
5632 with a pre- or post-increment. Similarly for two subtracts of
5633 identical powers of two with post decrement. */
5634
5635 if (code == PLUS && INTVAL (const_arg1) == INTVAL (inner_const)
5636 && (0
5637 #if defined(HAVE_PRE_INCREMENT) || defined(HAVE_POST_INCREMENT)
5638 || exact_log2 (INTVAL (const_arg1)) >= 0
5639 #endif
5640 #if defined(HAVE_PRE_DECREMENT) || defined(HAVE_POST_DECREMENT)
5641 || exact_log2 (- INTVAL (const_arg1)) >= 0
5642 #endif
5643 ))
5644 break;
5645
5646 /* Compute the code used to compose the constants. For example,
5647 A/C1/C2 is A/(C1 * C2), so if CODE == DIV, we want MULT. */
5648
5649 associate_code
5650 = (code == MULT || code == DIV || code == UDIV ? MULT
5651 : is_shift || code == PLUS || code == MINUS ? PLUS : code);
5652
5653 new_const = simplify_binary_operation (associate_code, mode,
5654 const_arg1, inner_const);
5655
5656 if (new_const == 0)
5657 break;
5658
5659 /* If we are associating shift operations, don't let this
5660 produce a shift of the size of the object or larger.
5661 This could occur when we follow a sign-extend by a right
5662 shift on a machine that does a sign-extend as a pair
5663 of shifts. */
5664
5665 if (is_shift && GET_CODE (new_const) == CONST_INT
5666 && INTVAL (new_const) >= GET_MODE_BITSIZE (mode))
5667 {
5668 /* As an exception, we can turn an ASHIFTRT of this
5669 form into a shift of the number of bits - 1. */
5670 if (code == ASHIFTRT)
5671 new_const = GEN_INT (GET_MODE_BITSIZE (mode) - 1);
5672 else
5673 break;
5674 }
5675
5676 y = copy_rtx (XEXP (y, 0));
5677
5678 /* If Y contains our first operand (the most common way this
5679 can happen is if Y is a MEM), we would do into an infinite
5680 loop if we tried to fold it. So don't in that case. */
5681
5682 if (! reg_mentioned_p (folded_arg0, y))
5683 y = fold_rtx (y, insn);
5684
5685 return cse_gen_binary (code, mode, y, new_const);
5686 }
5687 }
5688
5689 new = simplify_binary_operation (code, mode,
5690 const_arg0 ? const_arg0 : folded_arg0,
5691 const_arg1 ? const_arg1 : folded_arg1);
5692 break;
5693
5694 case 'o':
5695 /* (lo_sum (high X) X) is simply X. */
5696 if (code == LO_SUM && const_arg0 != 0
5697 && GET_CODE (const_arg0) == HIGH
5698 && rtx_equal_p (XEXP (const_arg0, 0), const_arg1))
5699 return const_arg1;
5700 break;
5701
5702 case '3':
5703 case 'b':
5704 new = simplify_ternary_operation (code, mode, mode_arg0,
5705 const_arg0 ? const_arg0 : folded_arg0,
5706 const_arg1 ? const_arg1 : folded_arg1,
5707 const_arg2 ? const_arg2 : XEXP (x, 2));
5708 break;
5709 }
5710
5711 return new ? new : x;
5712 }
5713 \f
5714 /* Return a constant value currently equivalent to X.
5715 Return 0 if we don't know one. */
5716
5717 static rtx
5718 equiv_constant (x)
5719 rtx x;
5720 {
5721 if (GET_CODE (x) == REG
5722 && REGNO_QTY_VALID_P (REGNO (x))
5723 && qty_const[reg_qty[REGNO (x)]])
5724 x = gen_lowpart_if_possible (GET_MODE (x), qty_const[reg_qty[REGNO (x)]]);
5725
5726 if (x != 0 && CONSTANT_P (x))
5727 return x;
5728
5729 /* If X is a MEM, try to fold it outside the context of any insn to see if
5730 it might be equivalent to a constant. That handles the case where it
5731 is a constant-pool reference. Then try to look it up in the hash table
5732 in case it is something whose value we have seen before. */
5733
5734 if (GET_CODE (x) == MEM)
5735 {
5736 struct table_elt *elt;
5737
5738 x = fold_rtx (x, NULL_RTX);
5739 if (CONSTANT_P (x))
5740 return x;
5741
5742 elt = lookup (x, safe_hash (x, GET_MODE (x)) % NBUCKETS, GET_MODE (x));
5743 if (elt == 0)
5744 return 0;
5745
5746 for (elt = elt->first_same_value; elt; elt = elt->next_same_value)
5747 if (elt->is_const && CONSTANT_P (elt->exp))
5748 return elt->exp;
5749 }
5750
5751 return 0;
5752 }
5753 \f
5754 /* Assuming that X is an rtx (e.g., MEM, REG or SUBREG) for a fixed-point
5755 number, return an rtx (MEM, SUBREG, or CONST_INT) that refers to the
5756 least-significant part of X.
5757 MODE specifies how big a part of X to return.
5758
5759 If the requested operation cannot be done, 0 is returned.
5760
5761 This is similar to gen_lowpart in emit-rtl.c. */
5762
5763 rtx
5764 gen_lowpart_if_possible (mode, x)
5765 enum machine_mode mode;
5766 register rtx x;
5767 {
5768 rtx result = gen_lowpart_common (mode, x);
5769
5770 if (result)
5771 return result;
5772 else if (GET_CODE (x) == MEM)
5773 {
5774 /* This is the only other case we handle. */
5775 register int offset = 0;
5776 rtx new;
5777
5778 if (WORDS_BIG_ENDIAN)
5779 offset = (MAX (GET_MODE_SIZE (GET_MODE (x)), UNITS_PER_WORD)
5780 - MAX (GET_MODE_SIZE (mode), UNITS_PER_WORD));
5781 if (BYTES_BIG_ENDIAN)
5782 /* Adjust the address so that the address-after-the-data is
5783 unchanged. */
5784 offset -= (MIN (UNITS_PER_WORD, GET_MODE_SIZE (mode))
5785 - MIN (UNITS_PER_WORD, GET_MODE_SIZE (GET_MODE (x))));
5786 new = gen_rtx (MEM, mode, plus_constant (XEXP (x, 0), offset));
5787 if (! memory_address_p (mode, XEXP (new, 0)))
5788 return 0;
5789 MEM_VOLATILE_P (new) = MEM_VOLATILE_P (x);
5790 RTX_UNCHANGING_P (new) = RTX_UNCHANGING_P (x);
5791 MEM_IN_STRUCT_P (new) = MEM_IN_STRUCT_P (x);
5792 return new;
5793 }
5794 else
5795 return 0;
5796 }
5797 \f
5798 /* Given INSN, a jump insn, TAKEN indicates if we are following the "taken"
5799 branch. It will be zero if not.
5800
5801 In certain cases, this can cause us to add an equivalence. For example,
5802 if we are following the taken case of
5803 if (i == 2)
5804 we can add the fact that `i' and '2' are now equivalent.
5805
5806 In any case, we can record that this comparison was passed. If the same
5807 comparison is seen later, we will know its value. */
5808
5809 static void
5810 record_jump_equiv (insn, taken)
5811 rtx insn;
5812 int taken;
5813 {
5814 int cond_known_true;
5815 rtx op0, op1;
5816 enum machine_mode mode, mode0, mode1;
5817 int reversed_nonequality = 0;
5818 enum rtx_code code;
5819
5820 /* Ensure this is the right kind of insn. */
5821 if (! condjump_p (insn) || simplejump_p (insn))
5822 return;
5823
5824 /* See if this jump condition is known true or false. */
5825 if (taken)
5826 cond_known_true = (XEXP (SET_SRC (PATTERN (insn)), 2) == pc_rtx);
5827 else
5828 cond_known_true = (XEXP (SET_SRC (PATTERN (insn)), 1) == pc_rtx);
5829
5830 /* Get the type of comparison being done and the operands being compared.
5831 If we had to reverse a non-equality condition, record that fact so we
5832 know that it isn't valid for floating-point. */
5833 code = GET_CODE (XEXP (SET_SRC (PATTERN (insn)), 0));
5834 op0 = fold_rtx (XEXP (XEXP (SET_SRC (PATTERN (insn)), 0), 0), insn);
5835 op1 = fold_rtx (XEXP (XEXP (SET_SRC (PATTERN (insn)), 0), 1), insn);
5836
5837 code = find_comparison_args (code, &op0, &op1, &mode0, &mode1);
5838 if (! cond_known_true)
5839 {
5840 reversed_nonequality = (code != EQ && code != NE);
5841 code = reverse_condition (code);
5842 }
5843
5844 /* The mode is the mode of the non-constant. */
5845 mode = mode0;
5846 if (mode1 != VOIDmode)
5847 mode = mode1;
5848
5849 record_jump_cond (code, mode, op0, op1, reversed_nonequality);
5850 }
5851
5852 /* We know that comparison CODE applied to OP0 and OP1 in MODE is true.
5853 REVERSED_NONEQUALITY is nonzero if CODE had to be swapped.
5854 Make any useful entries we can with that information. Called from
5855 above function and called recursively. */
5856
5857 static void
5858 record_jump_cond (code, mode, op0, op1, reversed_nonequality)
5859 enum rtx_code code;
5860 enum machine_mode mode;
5861 rtx op0, op1;
5862 int reversed_nonequality;
5863 {
5864 unsigned op0_hash, op1_hash;
5865 int op0_in_memory, op0_in_struct, op1_in_memory, op1_in_struct;
5866 struct table_elt *op0_elt, *op1_elt;
5867
5868 /* If OP0 and OP1 are known equal, and either is a paradoxical SUBREG,
5869 we know that they are also equal in the smaller mode (this is also
5870 true for all smaller modes whether or not there is a SUBREG, but
5871 is not worth testing for with no SUBREG. */
5872
5873 /* Note that GET_MODE (op0) may not equal MODE. */
5874 if (code == EQ && GET_CODE (op0) == SUBREG
5875 && (GET_MODE_SIZE (GET_MODE (op0))
5876 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))))
5877 {
5878 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
5879 rtx tem = gen_lowpart_if_possible (inner_mode, op1);
5880
5881 record_jump_cond (code, mode, SUBREG_REG (op0),
5882 tem ? tem : gen_rtx (SUBREG, inner_mode, op1, 0),
5883 reversed_nonequality);
5884 }
5885
5886 if (code == EQ && GET_CODE (op1) == SUBREG
5887 && (GET_MODE_SIZE (GET_MODE (op1))
5888 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (op1)))))
5889 {
5890 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op1));
5891 rtx tem = gen_lowpart_if_possible (inner_mode, op0);
5892
5893 record_jump_cond (code, mode, SUBREG_REG (op1),
5894 tem ? tem : gen_rtx (SUBREG, inner_mode, op0, 0),
5895 reversed_nonequality);
5896 }
5897
5898 /* Similarly, if this is an NE comparison, and either is a SUBREG
5899 making a smaller mode, we know the whole thing is also NE. */
5900
5901 /* Note that GET_MODE (op0) may not equal MODE;
5902 if we test MODE instead, we can get an infinite recursion
5903 alternating between two modes each wider than MODE. */
5904
5905 if (code == NE && GET_CODE (op0) == SUBREG
5906 && subreg_lowpart_p (op0)
5907 && (GET_MODE_SIZE (GET_MODE (op0))
5908 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))))
5909 {
5910 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
5911 rtx tem = gen_lowpart_if_possible (inner_mode, op1);
5912
5913 record_jump_cond (code, mode, SUBREG_REG (op0),
5914 tem ? tem : gen_rtx (SUBREG, inner_mode, op1, 0),
5915 reversed_nonequality);
5916 }
5917
5918 if (code == NE && GET_CODE (op1) == SUBREG
5919 && subreg_lowpart_p (op1)
5920 && (GET_MODE_SIZE (GET_MODE (op1))
5921 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op1)))))
5922 {
5923 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op1));
5924 rtx tem = gen_lowpart_if_possible (inner_mode, op0);
5925
5926 record_jump_cond (code, mode, SUBREG_REG (op1),
5927 tem ? tem : gen_rtx (SUBREG, inner_mode, op0, 0),
5928 reversed_nonequality);
5929 }
5930
5931 /* Hash both operands. */
5932
5933 do_not_record = 0;
5934 hash_arg_in_memory = 0;
5935 hash_arg_in_struct = 0;
5936 op0_hash = HASH (op0, mode);
5937 op0_in_memory = hash_arg_in_memory;
5938 op0_in_struct = hash_arg_in_struct;
5939
5940 if (do_not_record)
5941 return;
5942
5943 do_not_record = 0;
5944 hash_arg_in_memory = 0;
5945 hash_arg_in_struct = 0;
5946 op1_hash = HASH (op1, mode);
5947 op1_in_memory = hash_arg_in_memory;
5948 op1_in_struct = hash_arg_in_struct;
5949
5950 if (do_not_record)
5951 return;
5952
5953 /* Look up both operands. */
5954 op0_elt = lookup (op0, op0_hash, mode);
5955 op1_elt = lookup (op1, op1_hash, mode);
5956
5957 /* If both operands are already equivalent or if they are not in the
5958 table but are identical, do nothing. */
5959 if ((op0_elt != 0 && op1_elt != 0
5960 && op0_elt->first_same_value == op1_elt->first_same_value)
5961 || op0 == op1 || rtx_equal_p (op0, op1))
5962 return;
5963
5964 /* If we aren't setting two things equal all we can do is save this
5965 comparison. Similarly if this is floating-point. In the latter
5966 case, OP1 might be zero and both -0.0 and 0.0 are equal to it.
5967 If we record the equality, we might inadvertently delete code
5968 whose intent was to change -0 to +0. */
5969
5970 if (code != EQ || FLOAT_MODE_P (GET_MODE (op0)))
5971 {
5972 /* If we reversed a floating-point comparison, if OP0 is not a
5973 register, or if OP1 is neither a register or constant, we can't
5974 do anything. */
5975
5976 if (GET_CODE (op1) != REG)
5977 op1 = equiv_constant (op1);
5978
5979 if ((reversed_nonequality && FLOAT_MODE_P (mode))
5980 || GET_CODE (op0) != REG || op1 == 0)
5981 return;
5982
5983 /* Put OP0 in the hash table if it isn't already. This gives it a
5984 new quantity number. */
5985 if (op0_elt == 0)
5986 {
5987 if (insert_regs (op0, NULL_PTR, 0))
5988 {
5989 rehash_using_reg (op0);
5990 op0_hash = HASH (op0, mode);
5991
5992 /* If OP0 is contained in OP1, this changes its hash code
5993 as well. Faster to rehash than to check, except
5994 for the simple case of a constant. */
5995 if (! CONSTANT_P (op1))
5996 op1_hash = HASH (op1,mode);
5997 }
5998
5999 op0_elt = insert (op0, NULL_PTR, op0_hash, mode);
6000 op0_elt->in_memory = op0_in_memory;
6001 op0_elt->in_struct = op0_in_struct;
6002 }
6003
6004 qty_comparison_code[reg_qty[REGNO (op0)]] = code;
6005 if (GET_CODE (op1) == REG)
6006 {
6007 /* Look it up again--in case op0 and op1 are the same. */
6008 op1_elt = lookup (op1, op1_hash, mode);
6009
6010 /* Put OP1 in the hash table so it gets a new quantity number. */
6011 if (op1_elt == 0)
6012 {
6013 if (insert_regs (op1, NULL_PTR, 0))
6014 {
6015 rehash_using_reg (op1);
6016 op1_hash = HASH (op1, mode);
6017 }
6018
6019 op1_elt = insert (op1, NULL_PTR, op1_hash, mode);
6020 op1_elt->in_memory = op1_in_memory;
6021 op1_elt->in_struct = op1_in_struct;
6022 }
6023
6024 qty_comparison_qty[reg_qty[REGNO (op0)]] = reg_qty[REGNO (op1)];
6025 qty_comparison_const[reg_qty[REGNO (op0)]] = 0;
6026 }
6027 else
6028 {
6029 qty_comparison_qty[reg_qty[REGNO (op0)]] = -1;
6030 qty_comparison_const[reg_qty[REGNO (op0)]] = op1;
6031 }
6032
6033 return;
6034 }
6035
6036 /* If either side is still missing an equivalence, make it now,
6037 then merge the equivalences. */
6038
6039 if (op0_elt == 0)
6040 {
6041 if (insert_regs (op0, NULL_PTR, 0))
6042 {
6043 rehash_using_reg (op0);
6044 op0_hash = HASH (op0, mode);
6045 }
6046
6047 op0_elt = insert (op0, NULL_PTR, op0_hash, mode);
6048 op0_elt->in_memory = op0_in_memory;
6049 op0_elt->in_struct = op0_in_struct;
6050 }
6051
6052 if (op1_elt == 0)
6053 {
6054 if (insert_regs (op1, NULL_PTR, 0))
6055 {
6056 rehash_using_reg (op1);
6057 op1_hash = HASH (op1, mode);
6058 }
6059
6060 op1_elt = insert (op1, NULL_PTR, op1_hash, mode);
6061 op1_elt->in_memory = op1_in_memory;
6062 op1_elt->in_struct = op1_in_struct;
6063 }
6064
6065 merge_equiv_classes (op0_elt, op1_elt);
6066 last_jump_equiv_class = op0_elt;
6067 }
6068 \f
6069 /* CSE processing for one instruction.
6070 First simplify sources and addresses of all assignments
6071 in the instruction, using previously-computed equivalents values.
6072 Then install the new sources and destinations in the table
6073 of available values.
6074
6075 If IN_LIBCALL_BLOCK is nonzero, don't record any equivalence made in
6076 the insn. */
6077
6078 /* Data on one SET contained in the instruction. */
6079
6080 struct set
6081 {
6082 /* The SET rtx itself. */
6083 rtx rtl;
6084 /* The SET_SRC of the rtx (the original value, if it is changing). */
6085 rtx src;
6086 /* The hash-table element for the SET_SRC of the SET. */
6087 struct table_elt *src_elt;
6088 /* Hash value for the SET_SRC. */
6089 unsigned src_hash;
6090 /* Hash value for the SET_DEST. */
6091 unsigned dest_hash;
6092 /* The SET_DEST, with SUBREG, etc., stripped. */
6093 rtx inner_dest;
6094 /* Place where the pointer to the INNER_DEST was found. */
6095 rtx *inner_dest_loc;
6096 /* Nonzero if the SET_SRC is in memory. */
6097 char src_in_memory;
6098 /* Nonzero if the SET_SRC is in a structure. */
6099 char src_in_struct;
6100 /* Nonzero if the SET_SRC contains something
6101 whose value cannot be predicted and understood. */
6102 char src_volatile;
6103 /* Original machine mode, in case it becomes a CONST_INT. */
6104 enum machine_mode mode;
6105 /* A constant equivalent for SET_SRC, if any. */
6106 rtx src_const;
6107 /* Hash value of constant equivalent for SET_SRC. */
6108 unsigned src_const_hash;
6109 /* Table entry for constant equivalent for SET_SRC, if any. */
6110 struct table_elt *src_const_elt;
6111 };
6112
6113 static void
6114 cse_insn (insn, in_libcall_block)
6115 rtx insn;
6116 int in_libcall_block;
6117 {
6118 register rtx x = PATTERN (insn);
6119 register int i;
6120 rtx tem;
6121 register int n_sets = 0;
6122
6123 /* Records what this insn does to set CC0. */
6124 rtx this_insn_cc0 = 0;
6125 enum machine_mode this_insn_cc0_mode;
6126 struct write_data writes_memory;
6127 static struct write_data init = {0, 0, 0, 0};
6128
6129 rtx src_eqv = 0;
6130 struct table_elt *src_eqv_elt = 0;
6131 int src_eqv_volatile;
6132 int src_eqv_in_memory;
6133 int src_eqv_in_struct;
6134 unsigned src_eqv_hash;
6135
6136 struct set *sets;
6137
6138 this_insn = insn;
6139 writes_memory = init;
6140
6141 /* Find all the SETs and CLOBBERs in this instruction.
6142 Record all the SETs in the array `set' and count them.
6143 Also determine whether there is a CLOBBER that invalidates
6144 all memory references, or all references at varying addresses. */
6145
6146 if (GET_CODE (insn) == CALL_INSN)
6147 {
6148 for (tem = CALL_INSN_FUNCTION_USAGE (insn); tem; tem = XEXP (tem, 1))
6149 if (GET_CODE (XEXP (tem, 0)) == CLOBBER)
6150 invalidate (SET_DEST (XEXP (tem, 0)), VOIDmode);
6151 }
6152
6153 if (GET_CODE (x) == SET)
6154 {
6155 sets = (struct set *) alloca (sizeof (struct set));
6156 sets[0].rtl = x;
6157
6158 /* Ignore SETs that are unconditional jumps.
6159 They never need cse processing, so this does not hurt.
6160 The reason is not efficiency but rather
6161 so that we can test at the end for instructions
6162 that have been simplified to unconditional jumps
6163 and not be misled by unchanged instructions
6164 that were unconditional jumps to begin with. */
6165 if (SET_DEST (x) == pc_rtx
6166 && GET_CODE (SET_SRC (x)) == LABEL_REF)
6167 ;
6168
6169 /* Don't count call-insns, (set (reg 0) (call ...)), as a set.
6170 The hard function value register is used only once, to copy to
6171 someplace else, so it isn't worth cse'ing (and on 80386 is unsafe)!
6172 Ensure we invalidate the destination register. On the 80386 no
6173 other code would invalidate it since it is a fixed_reg.
6174 We need not check the return of apply_change_group; see canon_reg. */
6175
6176 else if (GET_CODE (SET_SRC (x)) == CALL)
6177 {
6178 canon_reg (SET_SRC (x), insn);
6179 apply_change_group ();
6180 fold_rtx (SET_SRC (x), insn);
6181 invalidate (SET_DEST (x), VOIDmode);
6182 }
6183 else
6184 n_sets = 1;
6185 }
6186 else if (GET_CODE (x) == PARALLEL)
6187 {
6188 register int lim = XVECLEN (x, 0);
6189
6190 sets = (struct set *) alloca (lim * sizeof (struct set));
6191
6192 /* Find all regs explicitly clobbered in this insn,
6193 and ensure they are not replaced with any other regs
6194 elsewhere in this insn.
6195 When a reg that is clobbered is also used for input,
6196 we should presume that that is for a reason,
6197 and we should not substitute some other register
6198 which is not supposed to be clobbered.
6199 Therefore, this loop cannot be merged into the one below
6200 because a CALL may precede a CLOBBER and refer to the
6201 value clobbered. We must not let a canonicalization do
6202 anything in that case. */
6203 for (i = 0; i < lim; i++)
6204 {
6205 register rtx y = XVECEXP (x, 0, i);
6206 if (GET_CODE (y) == CLOBBER)
6207 {
6208 rtx clobbered = XEXP (y, 0);
6209
6210 if (GET_CODE (clobbered) == REG
6211 || GET_CODE (clobbered) == SUBREG)
6212 invalidate (clobbered, VOIDmode);
6213 else if (GET_CODE (clobbered) == STRICT_LOW_PART
6214 || GET_CODE (clobbered) == ZERO_EXTRACT)
6215 invalidate (XEXP (clobbered, 0), GET_MODE (clobbered));
6216 }
6217 }
6218
6219 for (i = 0; i < lim; i++)
6220 {
6221 register rtx y = XVECEXP (x, 0, i);
6222 if (GET_CODE (y) == SET)
6223 {
6224 /* As above, we ignore unconditional jumps and call-insns and
6225 ignore the result of apply_change_group. */
6226 if (GET_CODE (SET_SRC (y)) == CALL)
6227 {
6228 canon_reg (SET_SRC (y), insn);
6229 apply_change_group ();
6230 fold_rtx (SET_SRC (y), insn);
6231 invalidate (SET_DEST (y), VOIDmode);
6232 }
6233 else if (SET_DEST (y) == pc_rtx
6234 && GET_CODE (SET_SRC (y)) == LABEL_REF)
6235 ;
6236 else
6237 sets[n_sets++].rtl = y;
6238 }
6239 else if (GET_CODE (y) == CLOBBER)
6240 {
6241 /* If we clobber memory, take note of that,
6242 and canon the address.
6243 This does nothing when a register is clobbered
6244 because we have already invalidated the reg. */
6245 if (GET_CODE (XEXP (y, 0)) == MEM)
6246 {
6247 canon_reg (XEXP (y, 0), NULL_RTX);
6248 note_mem_written (XEXP (y, 0), &writes_memory);
6249 }
6250 }
6251 else if (GET_CODE (y) == USE
6252 && ! (GET_CODE (XEXP (y, 0)) == REG
6253 && REGNO (XEXP (y, 0)) < FIRST_PSEUDO_REGISTER))
6254 canon_reg (y, NULL_RTX);
6255 else if (GET_CODE (y) == CALL)
6256 {
6257 /* The result of apply_change_group can be ignored; see
6258 canon_reg. */
6259 canon_reg (y, insn);
6260 apply_change_group ();
6261 fold_rtx (y, insn);
6262 }
6263 }
6264 }
6265 else if (GET_CODE (x) == CLOBBER)
6266 {
6267 if (GET_CODE (XEXP (x, 0)) == MEM)
6268 {
6269 canon_reg (XEXP (x, 0), NULL_RTX);
6270 note_mem_written (XEXP (x, 0), &writes_memory);
6271 }
6272 }
6273
6274 /* Canonicalize a USE of a pseudo register or memory location. */
6275 else if (GET_CODE (x) == USE
6276 && ! (GET_CODE (XEXP (x, 0)) == REG
6277 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER))
6278 canon_reg (XEXP (x, 0), NULL_RTX);
6279 else if (GET_CODE (x) == CALL)
6280 {
6281 /* The result of apply_change_group can be ignored; see canon_reg. */
6282 canon_reg (x, insn);
6283 apply_change_group ();
6284 fold_rtx (x, insn);
6285 }
6286
6287 /* Store the equivalent value in SRC_EQV, if different, or if the DEST
6288 is a STRICT_LOW_PART. The latter condition is necessary because SRC_EQV
6289 is handled specially for this case, and if it isn't set, then there will
6290 be no equivalence for the destination. */
6291 if (n_sets == 1 && REG_NOTES (insn) != 0
6292 && (tem = find_reg_note (insn, REG_EQUAL, NULL_RTX)) != 0
6293 && (! rtx_equal_p (XEXP (tem, 0), SET_SRC (sets[0].rtl))
6294 || GET_CODE (SET_DEST (sets[0].rtl)) == STRICT_LOW_PART))
6295 src_eqv = canon_reg (XEXP (tem, 0), NULL_RTX);
6296
6297 /* Canonicalize sources and addresses of destinations.
6298 We do this in a separate pass to avoid problems when a MATCH_DUP is
6299 present in the insn pattern. In that case, we want to ensure that
6300 we don't break the duplicate nature of the pattern. So we will replace
6301 both operands at the same time. Otherwise, we would fail to find an
6302 equivalent substitution in the loop calling validate_change below.
6303
6304 We used to suppress canonicalization of DEST if it appears in SRC,
6305 but we don't do this any more. */
6306
6307 for (i = 0; i < n_sets; i++)
6308 {
6309 rtx dest = SET_DEST (sets[i].rtl);
6310 rtx src = SET_SRC (sets[i].rtl);
6311 rtx new = canon_reg (src, insn);
6312
6313 if ((GET_CODE (new) == REG && GET_CODE (src) == REG
6314 && ((REGNO (new) < FIRST_PSEUDO_REGISTER)
6315 != (REGNO (src) < FIRST_PSEUDO_REGISTER)))
6316 || insn_n_dups[recog_memoized (insn)] > 0)
6317 validate_change (insn, &SET_SRC (sets[i].rtl), new, 1);
6318 else
6319 SET_SRC (sets[i].rtl) = new;
6320
6321 if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
6322 {
6323 validate_change (insn, &XEXP (dest, 1),
6324 canon_reg (XEXP (dest, 1), insn), 1);
6325 validate_change (insn, &XEXP (dest, 2),
6326 canon_reg (XEXP (dest, 2), insn), 1);
6327 }
6328
6329 while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART
6330 || GET_CODE (dest) == ZERO_EXTRACT
6331 || GET_CODE (dest) == SIGN_EXTRACT)
6332 dest = XEXP (dest, 0);
6333
6334 if (GET_CODE (dest) == MEM)
6335 canon_reg (dest, insn);
6336 }
6337
6338 /* Now that we have done all the replacements, we can apply the change
6339 group and see if they all work. Note that this will cause some
6340 canonicalizations that would have worked individually not to be applied
6341 because some other canonicalization didn't work, but this should not
6342 occur often.
6343
6344 The result of apply_change_group can be ignored; see canon_reg. */
6345
6346 apply_change_group ();
6347
6348 /* Set sets[i].src_elt to the class each source belongs to.
6349 Detect assignments from or to volatile things
6350 and set set[i] to zero so they will be ignored
6351 in the rest of this function.
6352
6353 Nothing in this loop changes the hash table or the register chains. */
6354
6355 for (i = 0; i < n_sets; i++)
6356 {
6357 register rtx src, dest;
6358 register rtx src_folded;
6359 register struct table_elt *elt = 0, *p;
6360 enum machine_mode mode;
6361 rtx src_eqv_here;
6362 rtx src_const = 0;
6363 rtx src_related = 0;
6364 struct table_elt *src_const_elt = 0;
6365 int src_cost = 10000, src_eqv_cost = 10000, src_folded_cost = 10000;
6366 int src_related_cost = 10000, src_elt_cost = 10000;
6367 /* Set non-zero if we need to call force_const_mem on with the
6368 contents of src_folded before using it. */
6369 int src_folded_force_flag = 0;
6370
6371 dest = SET_DEST (sets[i].rtl);
6372 src = SET_SRC (sets[i].rtl);
6373
6374 /* If SRC is a constant that has no machine mode,
6375 hash it with the destination's machine mode.
6376 This way we can keep different modes separate. */
6377
6378 mode = GET_MODE (src) == VOIDmode ? GET_MODE (dest) : GET_MODE (src);
6379 sets[i].mode = mode;
6380
6381 if (src_eqv)
6382 {
6383 enum machine_mode eqvmode = mode;
6384 if (GET_CODE (dest) == STRICT_LOW_PART)
6385 eqvmode = GET_MODE (SUBREG_REG (XEXP (dest, 0)));
6386 do_not_record = 0;
6387 hash_arg_in_memory = 0;
6388 hash_arg_in_struct = 0;
6389 src_eqv = fold_rtx (src_eqv, insn);
6390 src_eqv_hash = HASH (src_eqv, eqvmode);
6391
6392 /* Find the equivalence class for the equivalent expression. */
6393
6394 if (!do_not_record)
6395 src_eqv_elt = lookup (src_eqv, src_eqv_hash, eqvmode);
6396
6397 src_eqv_volatile = do_not_record;
6398 src_eqv_in_memory = hash_arg_in_memory;
6399 src_eqv_in_struct = hash_arg_in_struct;
6400 }
6401
6402 /* If this is a STRICT_LOW_PART assignment, src_eqv corresponds to the
6403 value of the INNER register, not the destination. So it is not
6404 a valid substitution for the source. But save it for later. */
6405 if (GET_CODE (dest) == STRICT_LOW_PART)
6406 src_eqv_here = 0;
6407 else
6408 src_eqv_here = src_eqv;
6409
6410 /* Simplify and foldable subexpressions in SRC. Then get the fully-
6411 simplified result, which may not necessarily be valid. */
6412 src_folded = fold_rtx (src, insn);
6413
6414 #if 0
6415 /* ??? This caused bad code to be generated for the m68k port with -O2.
6416 Suppose src is (CONST_INT -1), and that after truncation src_folded
6417 is (CONST_INT 3). Suppose src_folded is then used for src_const.
6418 At the end we will add src and src_const to the same equivalence
6419 class. We now have 3 and -1 on the same equivalence class. This
6420 causes later instructions to be mis-optimized. */
6421 /* If storing a constant in a bitfield, pre-truncate the constant
6422 so we will be able to record it later. */
6423 if (GET_CODE (SET_DEST (sets[i].rtl)) == ZERO_EXTRACT
6424 || GET_CODE (SET_DEST (sets[i].rtl)) == SIGN_EXTRACT)
6425 {
6426 rtx width = XEXP (SET_DEST (sets[i].rtl), 1);
6427
6428 if (GET_CODE (src) == CONST_INT
6429 && GET_CODE (width) == CONST_INT
6430 && INTVAL (width) < HOST_BITS_PER_WIDE_INT
6431 && (INTVAL (src) & ((HOST_WIDE_INT) (-1) << INTVAL (width))))
6432 src_folded
6433 = GEN_INT (INTVAL (src) & (((HOST_WIDE_INT) 1
6434 << INTVAL (width)) - 1));
6435 }
6436 #endif
6437
6438 /* Compute SRC's hash code, and also notice if it
6439 should not be recorded at all. In that case,
6440 prevent any further processing of this assignment. */
6441 do_not_record = 0;
6442 hash_arg_in_memory = 0;
6443 hash_arg_in_struct = 0;
6444
6445 sets[i].src = src;
6446 sets[i].src_hash = HASH (src, mode);
6447 sets[i].src_volatile = do_not_record;
6448 sets[i].src_in_memory = hash_arg_in_memory;
6449 sets[i].src_in_struct = hash_arg_in_struct;
6450
6451 #if 0
6452 /* It is no longer clear why we used to do this, but it doesn't
6453 appear to still be needed. So let's try without it since this
6454 code hurts cse'ing widened ops. */
6455 /* If source is a perverse subreg (such as QI treated as an SI),
6456 treat it as volatile. It may do the work of an SI in one context
6457 where the extra bits are not being used, but cannot replace an SI
6458 in general. */
6459 if (GET_CODE (src) == SUBREG
6460 && (GET_MODE_SIZE (GET_MODE (src))
6461 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))))
6462 sets[i].src_volatile = 1;
6463 #endif
6464
6465 /* Locate all possible equivalent forms for SRC. Try to replace
6466 SRC in the insn with each cheaper equivalent.
6467
6468 We have the following types of equivalents: SRC itself, a folded
6469 version, a value given in a REG_EQUAL note, or a value related
6470 to a constant.
6471
6472 Each of these equivalents may be part of an additional class
6473 of equivalents (if more than one is in the table, they must be in
6474 the same class; we check for this).
6475
6476 If the source is volatile, we don't do any table lookups.
6477
6478 We note any constant equivalent for possible later use in a
6479 REG_NOTE. */
6480
6481 if (!sets[i].src_volatile)
6482 elt = lookup (src, sets[i].src_hash, mode);
6483
6484 sets[i].src_elt = elt;
6485
6486 if (elt && src_eqv_here && src_eqv_elt)
6487 {
6488 if (elt->first_same_value != src_eqv_elt->first_same_value)
6489 {
6490 /* The REG_EQUAL is indicating that two formerly distinct
6491 classes are now equivalent. So merge them. */
6492 merge_equiv_classes (elt, src_eqv_elt);
6493 src_eqv_hash = HASH (src_eqv, elt->mode);
6494 src_eqv_elt = lookup (src_eqv, src_eqv_hash, elt->mode);
6495 }
6496
6497 src_eqv_here = 0;
6498 }
6499
6500 else if (src_eqv_elt)
6501 elt = src_eqv_elt;
6502
6503 /* Try to find a constant somewhere and record it in `src_const'.
6504 Record its table element, if any, in `src_const_elt'. Look in
6505 any known equivalences first. (If the constant is not in the
6506 table, also set `sets[i].src_const_hash'). */
6507 if (elt)
6508 for (p = elt->first_same_value; p; p = p->next_same_value)
6509 if (p->is_const)
6510 {
6511 src_const = p->exp;
6512 src_const_elt = elt;
6513 break;
6514 }
6515
6516 if (src_const == 0
6517 && (CONSTANT_P (src_folded)
6518 /* Consider (minus (label_ref L1) (label_ref L2)) as
6519 "constant" here so we will record it. This allows us
6520 to fold switch statements when an ADDR_DIFF_VEC is used. */
6521 || (GET_CODE (src_folded) == MINUS
6522 && GET_CODE (XEXP (src_folded, 0)) == LABEL_REF
6523 && GET_CODE (XEXP (src_folded, 1)) == LABEL_REF)))
6524 src_const = src_folded, src_const_elt = elt;
6525 else if (src_const == 0 && src_eqv_here && CONSTANT_P (src_eqv_here))
6526 src_const = src_eqv_here, src_const_elt = src_eqv_elt;
6527
6528 /* If we don't know if the constant is in the table, get its
6529 hash code and look it up. */
6530 if (src_const && src_const_elt == 0)
6531 {
6532 sets[i].src_const_hash = HASH (src_const, mode);
6533 src_const_elt = lookup (src_const, sets[i].src_const_hash, mode);
6534 }
6535
6536 sets[i].src_const = src_const;
6537 sets[i].src_const_elt = src_const_elt;
6538
6539 /* If the constant and our source are both in the table, mark them as
6540 equivalent. Otherwise, if a constant is in the table but the source
6541 isn't, set ELT to it. */
6542 if (src_const_elt && elt
6543 && src_const_elt->first_same_value != elt->first_same_value)
6544 merge_equiv_classes (elt, src_const_elt);
6545 else if (src_const_elt && elt == 0)
6546 elt = src_const_elt;
6547
6548 /* See if there is a register linearly related to a constant
6549 equivalent of SRC. */
6550 if (src_const
6551 && (GET_CODE (src_const) == CONST
6552 || (src_const_elt && src_const_elt->related_value != 0)))
6553 {
6554 src_related = use_related_value (src_const, src_const_elt);
6555 if (src_related)
6556 {
6557 struct table_elt *src_related_elt
6558 = lookup (src_related, HASH (src_related, mode), mode);
6559 if (src_related_elt && elt)
6560 {
6561 if (elt->first_same_value
6562 != src_related_elt->first_same_value)
6563 /* This can occur when we previously saw a CONST
6564 involving a SYMBOL_REF and then see the SYMBOL_REF
6565 twice. Merge the involved classes. */
6566 merge_equiv_classes (elt, src_related_elt);
6567
6568 src_related = 0;
6569 src_related_elt = 0;
6570 }
6571 else if (src_related_elt && elt == 0)
6572 elt = src_related_elt;
6573 }
6574 }
6575
6576 /* See if we have a CONST_INT that is already in a register in a
6577 wider mode. */
6578
6579 if (src_const && src_related == 0 && GET_CODE (src_const) == CONST_INT
6580 && GET_MODE_CLASS (mode) == MODE_INT
6581 && GET_MODE_BITSIZE (mode) < BITS_PER_WORD)
6582 {
6583 enum machine_mode wider_mode;
6584
6585 for (wider_mode = GET_MODE_WIDER_MODE (mode);
6586 GET_MODE_BITSIZE (wider_mode) <= BITS_PER_WORD
6587 && src_related == 0;
6588 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
6589 {
6590 struct table_elt *const_elt
6591 = lookup (src_const, HASH (src_const, wider_mode), wider_mode);
6592
6593 if (const_elt == 0)
6594 continue;
6595
6596 for (const_elt = const_elt->first_same_value;
6597 const_elt; const_elt = const_elt->next_same_value)
6598 if (GET_CODE (const_elt->exp) == REG)
6599 {
6600 src_related = gen_lowpart_if_possible (mode,
6601 const_elt->exp);
6602 break;
6603 }
6604 }
6605 }
6606
6607 /* Another possibility is that we have an AND with a constant in
6608 a mode narrower than a word. If so, it might have been generated
6609 as part of an "if" which would narrow the AND. If we already
6610 have done the AND in a wider mode, we can use a SUBREG of that
6611 value. */
6612
6613 if (flag_expensive_optimizations && ! src_related
6614 && GET_CODE (src) == AND && GET_CODE (XEXP (src, 1)) == CONST_INT
6615 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
6616 {
6617 enum machine_mode tmode;
6618 rtx new_and = gen_rtx (AND, VOIDmode, NULL_RTX, XEXP (src, 1));
6619
6620 for (tmode = GET_MODE_WIDER_MODE (mode);
6621 GET_MODE_SIZE (tmode) <= UNITS_PER_WORD;
6622 tmode = GET_MODE_WIDER_MODE (tmode))
6623 {
6624 rtx inner = gen_lowpart_if_possible (tmode, XEXP (src, 0));
6625 struct table_elt *larger_elt;
6626
6627 if (inner)
6628 {
6629 PUT_MODE (new_and, tmode);
6630 XEXP (new_and, 0) = inner;
6631 larger_elt = lookup (new_and, HASH (new_and, tmode), tmode);
6632 if (larger_elt == 0)
6633 continue;
6634
6635 for (larger_elt = larger_elt->first_same_value;
6636 larger_elt; larger_elt = larger_elt->next_same_value)
6637 if (GET_CODE (larger_elt->exp) == REG)
6638 {
6639 src_related
6640 = gen_lowpart_if_possible (mode, larger_elt->exp);
6641 break;
6642 }
6643
6644 if (src_related)
6645 break;
6646 }
6647 }
6648 }
6649
6650 #ifdef LOAD_EXTEND_OP
6651 /* See if a MEM has already been loaded with a widening operation;
6652 if it has, we can use a subreg of that. Many CISC machines
6653 also have such operations, but this is only likely to be
6654 beneficial these machines. */
6655
6656 if (flag_expensive_optimizations && src_related == 0
6657 && (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
6658 && GET_MODE_CLASS (mode) == MODE_INT
6659 && GET_CODE (src) == MEM && ! do_not_record
6660 && LOAD_EXTEND_OP (mode) != NIL)
6661 {
6662 enum machine_mode tmode;
6663
6664 /* Set what we are trying to extend and the operation it might
6665 have been extended with. */
6666 PUT_CODE (memory_extend_rtx, LOAD_EXTEND_OP (mode));
6667 XEXP (memory_extend_rtx, 0) = src;
6668
6669 for (tmode = GET_MODE_WIDER_MODE (mode);
6670 GET_MODE_SIZE (tmode) <= UNITS_PER_WORD;
6671 tmode = GET_MODE_WIDER_MODE (tmode))
6672 {
6673 struct table_elt *larger_elt;
6674
6675 PUT_MODE (memory_extend_rtx, tmode);
6676 larger_elt = lookup (memory_extend_rtx,
6677 HASH (memory_extend_rtx, tmode), tmode);
6678 if (larger_elt == 0)
6679 continue;
6680
6681 for (larger_elt = larger_elt->first_same_value;
6682 larger_elt; larger_elt = larger_elt->next_same_value)
6683 if (GET_CODE (larger_elt->exp) == REG)
6684 {
6685 src_related = gen_lowpart_if_possible (mode,
6686 larger_elt->exp);
6687 break;
6688 }
6689
6690 if (src_related)
6691 break;
6692 }
6693 }
6694 #endif /* LOAD_EXTEND_OP */
6695
6696 if (src == src_folded)
6697 src_folded = 0;
6698
6699 /* At this point, ELT, if non-zero, points to a class of expressions
6700 equivalent to the source of this SET and SRC, SRC_EQV, SRC_FOLDED,
6701 and SRC_RELATED, if non-zero, each contain additional equivalent
6702 expressions. Prune these latter expressions by deleting expressions
6703 already in the equivalence class.
6704
6705 Check for an equivalent identical to the destination. If found,
6706 this is the preferred equivalent since it will likely lead to
6707 elimination of the insn. Indicate this by placing it in
6708 `src_related'. */
6709
6710 if (elt) elt = elt->first_same_value;
6711 for (p = elt; p; p = p->next_same_value)
6712 {
6713 enum rtx_code code = GET_CODE (p->exp);
6714
6715 /* If the expression is not valid, ignore it. Then we do not
6716 have to check for validity below. In most cases, we can use
6717 `rtx_equal_p', since canonicalization has already been done. */
6718 if (code != REG && ! exp_equiv_p (p->exp, p->exp, 1, 0))
6719 continue;
6720
6721 if (src && GET_CODE (src) == code && rtx_equal_p (src, p->exp))
6722 src = 0;
6723 else if (src_folded && GET_CODE (src_folded) == code
6724 && rtx_equal_p (src_folded, p->exp))
6725 src_folded = 0;
6726 else if (src_eqv_here && GET_CODE (src_eqv_here) == code
6727 && rtx_equal_p (src_eqv_here, p->exp))
6728 src_eqv_here = 0;
6729 else if (src_related && GET_CODE (src_related) == code
6730 && rtx_equal_p (src_related, p->exp))
6731 src_related = 0;
6732
6733 /* This is the same as the destination of the insns, we want
6734 to prefer it. Copy it to src_related. The code below will
6735 then give it a negative cost. */
6736 if (GET_CODE (dest) == code && rtx_equal_p (p->exp, dest))
6737 src_related = dest;
6738
6739 }
6740
6741 /* Find the cheapest valid equivalent, trying all the available
6742 possibilities. Prefer items not in the hash table to ones
6743 that are when they are equal cost. Note that we can never
6744 worsen an insn as the current contents will also succeed.
6745 If we find an equivalent identical to the destination, use it as best,
6746 since this insn will probably be eliminated in that case. */
6747 if (src)
6748 {
6749 if (rtx_equal_p (src, dest))
6750 src_cost = -1;
6751 else
6752 src_cost = COST (src);
6753 }
6754
6755 if (src_eqv_here)
6756 {
6757 if (rtx_equal_p (src_eqv_here, dest))
6758 src_eqv_cost = -1;
6759 else
6760 src_eqv_cost = COST (src_eqv_here);
6761 }
6762
6763 if (src_folded)
6764 {
6765 if (rtx_equal_p (src_folded, dest))
6766 src_folded_cost = -1;
6767 else
6768 src_folded_cost = COST (src_folded);
6769 }
6770
6771 if (src_related)
6772 {
6773 if (rtx_equal_p (src_related, dest))
6774 src_related_cost = -1;
6775 else
6776 src_related_cost = COST (src_related);
6777 }
6778
6779 /* If this was an indirect jump insn, a known label will really be
6780 cheaper even though it looks more expensive. */
6781 if (dest == pc_rtx && src_const && GET_CODE (src_const) == LABEL_REF)
6782 src_folded = src_const, src_folded_cost = -1;
6783
6784 /* Terminate loop when replacement made. This must terminate since
6785 the current contents will be tested and will always be valid. */
6786 while (1)
6787 {
6788 rtx trial;
6789
6790 /* Skip invalid entries. */
6791 while (elt && GET_CODE (elt->exp) != REG
6792 && ! exp_equiv_p (elt->exp, elt->exp, 1, 0))
6793 elt = elt->next_same_value;
6794
6795 if (elt) src_elt_cost = elt->cost;
6796
6797 /* Find cheapest and skip it for the next time. For items
6798 of equal cost, use this order:
6799 src_folded, src, src_eqv, src_related and hash table entry. */
6800 if (src_folded_cost <= src_cost
6801 && src_folded_cost <= src_eqv_cost
6802 && src_folded_cost <= src_related_cost
6803 && src_folded_cost <= src_elt_cost)
6804 {
6805 trial = src_folded, src_folded_cost = 10000;
6806 if (src_folded_force_flag)
6807 trial = force_const_mem (mode, trial);
6808 }
6809 else if (src_cost <= src_eqv_cost
6810 && src_cost <= src_related_cost
6811 && src_cost <= src_elt_cost)
6812 trial = src, src_cost = 10000;
6813 else if (src_eqv_cost <= src_related_cost
6814 && src_eqv_cost <= src_elt_cost)
6815 trial = copy_rtx (src_eqv_here), src_eqv_cost = 10000;
6816 else if (src_related_cost <= src_elt_cost)
6817 trial = copy_rtx (src_related), src_related_cost = 10000;
6818 else
6819 {
6820 trial = copy_rtx (elt->exp);
6821 elt = elt->next_same_value;
6822 src_elt_cost = 10000;
6823 }
6824
6825 /* We don't normally have an insn matching (set (pc) (pc)), so
6826 check for this separately here. We will delete such an
6827 insn below.
6828
6829 Tablejump insns contain a USE of the table, so simply replacing
6830 the operand with the constant won't match. This is simply an
6831 unconditional branch, however, and is therefore valid. Just
6832 insert the substitution here and we will delete and re-emit
6833 the insn later. */
6834
6835 if (n_sets == 1 && dest == pc_rtx
6836 && (trial == pc_rtx
6837 || (GET_CODE (trial) == LABEL_REF
6838 && ! condjump_p (insn))))
6839 {
6840 /* If TRIAL is a label in front of a jump table, we are
6841 really falling through the switch (this is how casesi
6842 insns work), so we must branch around the table. */
6843 if (GET_CODE (trial) == CODE_LABEL
6844 && NEXT_INSN (trial) != 0
6845 && GET_CODE (NEXT_INSN (trial)) == JUMP_INSN
6846 && (GET_CODE (PATTERN (NEXT_INSN (trial))) == ADDR_DIFF_VEC
6847 || GET_CODE (PATTERN (NEXT_INSN (trial))) == ADDR_VEC))
6848
6849 trial = gen_rtx (LABEL_REF, Pmode, get_label_after (trial));
6850
6851 SET_SRC (sets[i].rtl) = trial;
6852 cse_jumps_altered = 1;
6853 break;
6854 }
6855
6856 /* Look for a substitution that makes a valid insn. */
6857 else if (validate_change (insn, &SET_SRC (sets[i].rtl), trial, 0))
6858 {
6859 /* The result of apply_change_group can be ignored; see
6860 canon_reg. */
6861
6862 validate_change (insn, &SET_SRC (sets[i].rtl),
6863 canon_reg (SET_SRC (sets[i].rtl), insn),
6864 1);
6865 apply_change_group ();
6866 break;
6867 }
6868
6869 /* If we previously found constant pool entries for
6870 constants and this is a constant, try making a
6871 pool entry. Put it in src_folded unless we already have done
6872 this since that is where it likely came from. */
6873
6874 else if (constant_pool_entries_cost
6875 && CONSTANT_P (trial)
6876 && ! (GET_CODE (trial) == CONST
6877 && GET_CODE (XEXP (trial, 0)) == TRUNCATE)
6878 && (src_folded == 0
6879 || (GET_CODE (src_folded) != MEM
6880 && ! src_folded_force_flag))
6881 && GET_MODE_CLASS (mode) != MODE_CC)
6882 {
6883 src_folded_force_flag = 1;
6884 src_folded = trial;
6885 src_folded_cost = constant_pool_entries_cost;
6886 }
6887 }
6888
6889 src = SET_SRC (sets[i].rtl);
6890
6891 /* In general, it is good to have a SET with SET_SRC == SET_DEST.
6892 However, there is an important exception: If both are registers
6893 that are not the head of their equivalence class, replace SET_SRC
6894 with the head of the class. If we do not do this, we will have
6895 both registers live over a portion of the basic block. This way,
6896 their lifetimes will likely abut instead of overlapping. */
6897 if (GET_CODE (dest) == REG
6898 && REGNO_QTY_VALID_P (REGNO (dest))
6899 && qty_mode[reg_qty[REGNO (dest)]] == GET_MODE (dest)
6900 && qty_first_reg[reg_qty[REGNO (dest)]] != REGNO (dest)
6901 && GET_CODE (src) == REG && REGNO (src) == REGNO (dest)
6902 /* Don't do this if the original insn had a hard reg as
6903 SET_SRC. */
6904 && (GET_CODE (sets[i].src) != REG
6905 || REGNO (sets[i].src) >= FIRST_PSEUDO_REGISTER))
6906 /* We can't call canon_reg here because it won't do anything if
6907 SRC is a hard register. */
6908 {
6909 int first = qty_first_reg[reg_qty[REGNO (src)]];
6910
6911 src = SET_SRC (sets[i].rtl)
6912 = first >= FIRST_PSEUDO_REGISTER ? regno_reg_rtx[first]
6913 : gen_rtx (REG, GET_MODE (src), first);
6914
6915 /* If we had a constant that is cheaper than what we are now
6916 setting SRC to, use that constant. We ignored it when we
6917 thought we could make this into a no-op. */
6918 if (src_const && COST (src_const) < COST (src)
6919 && validate_change (insn, &SET_SRC (sets[i].rtl), src_const, 0))
6920 src = src_const;
6921 }
6922
6923 /* If we made a change, recompute SRC values. */
6924 if (src != sets[i].src)
6925 {
6926 do_not_record = 0;
6927 hash_arg_in_memory = 0;
6928 hash_arg_in_struct = 0;
6929 sets[i].src = src;
6930 sets[i].src_hash = HASH (src, mode);
6931 sets[i].src_volatile = do_not_record;
6932 sets[i].src_in_memory = hash_arg_in_memory;
6933 sets[i].src_in_struct = hash_arg_in_struct;
6934 sets[i].src_elt = lookup (src, sets[i].src_hash, mode);
6935 }
6936
6937 /* If this is a single SET, we are setting a register, and we have an
6938 equivalent constant, we want to add a REG_NOTE. We don't want
6939 to write a REG_EQUAL note for a constant pseudo since verifying that
6940 that pseudo hasn't been eliminated is a pain. Such a note also
6941 won't help anything. */
6942 if (n_sets == 1 && src_const && GET_CODE (dest) == REG
6943 && GET_CODE (src_const) != REG)
6944 {
6945 tem = find_reg_note (insn, REG_EQUAL, NULL_RTX);
6946
6947 /* Record the actual constant value in a REG_EQUAL note, making
6948 a new one if one does not already exist. */
6949 if (tem)
6950 XEXP (tem, 0) = src_const;
6951 else
6952 REG_NOTES (insn) = gen_rtx (EXPR_LIST, REG_EQUAL,
6953 src_const, REG_NOTES (insn));
6954
6955 /* If storing a constant value in a register that
6956 previously held the constant value 0,
6957 record this fact with a REG_WAS_0 note on this insn.
6958
6959 Note that the *register* is required to have previously held 0,
6960 not just any register in the quantity and we must point to the
6961 insn that set that register to zero.
6962
6963 Rather than track each register individually, we just see if
6964 the last set for this quantity was for this register. */
6965
6966 if (REGNO_QTY_VALID_P (REGNO (dest))
6967 && qty_const[reg_qty[REGNO (dest)]] == const0_rtx)
6968 {
6969 /* See if we previously had a REG_WAS_0 note. */
6970 rtx note = find_reg_note (insn, REG_WAS_0, NULL_RTX);
6971 rtx const_insn = qty_const_insn[reg_qty[REGNO (dest)]];
6972
6973 if ((tem = single_set (const_insn)) != 0
6974 && rtx_equal_p (SET_DEST (tem), dest))
6975 {
6976 if (note)
6977 XEXP (note, 0) = const_insn;
6978 else
6979 REG_NOTES (insn) = gen_rtx (INSN_LIST, REG_WAS_0,
6980 const_insn, REG_NOTES (insn));
6981 }
6982 }
6983 }
6984
6985 /* Now deal with the destination. */
6986 do_not_record = 0;
6987 sets[i].inner_dest_loc = &SET_DEST (sets[0].rtl);
6988
6989 /* Look within any SIGN_EXTRACT or ZERO_EXTRACT
6990 to the MEM or REG within it. */
6991 while (GET_CODE (dest) == SIGN_EXTRACT
6992 || GET_CODE (dest) == ZERO_EXTRACT
6993 || GET_CODE (dest) == SUBREG
6994 || GET_CODE (dest) == STRICT_LOW_PART)
6995 {
6996 sets[i].inner_dest_loc = &XEXP (dest, 0);
6997 dest = XEXP (dest, 0);
6998 }
6999
7000 sets[i].inner_dest = dest;
7001
7002 if (GET_CODE (dest) == MEM)
7003 {
7004 dest = fold_rtx (dest, insn);
7005
7006 /* Decide whether we invalidate everything in memory,
7007 or just things at non-fixed places.
7008 Writing a large aggregate must invalidate everything
7009 because we don't know how long it is. */
7010 note_mem_written (dest, &writes_memory);
7011 }
7012
7013 /* Compute the hash code of the destination now,
7014 before the effects of this instruction are recorded,
7015 since the register values used in the address computation
7016 are those before this instruction. */
7017 sets[i].dest_hash = HASH (dest, mode);
7018
7019 /* Don't enter a bit-field in the hash table
7020 because the value in it after the store
7021 may not equal what was stored, due to truncation. */
7022
7023 if (GET_CODE (SET_DEST (sets[i].rtl)) == ZERO_EXTRACT
7024 || GET_CODE (SET_DEST (sets[i].rtl)) == SIGN_EXTRACT)
7025 {
7026 rtx width = XEXP (SET_DEST (sets[i].rtl), 1);
7027
7028 if (src_const != 0 && GET_CODE (src_const) == CONST_INT
7029 && GET_CODE (width) == CONST_INT
7030 && INTVAL (width) < HOST_BITS_PER_WIDE_INT
7031 && ! (INTVAL (src_const)
7032 & ((HOST_WIDE_INT) (-1) << INTVAL (width))))
7033 /* Exception: if the value is constant,
7034 and it won't be truncated, record it. */
7035 ;
7036 else
7037 {
7038 /* This is chosen so that the destination will be invalidated
7039 but no new value will be recorded.
7040 We must invalidate because sometimes constant
7041 values can be recorded for bitfields. */
7042 sets[i].src_elt = 0;
7043 sets[i].src_volatile = 1;
7044 src_eqv = 0;
7045 src_eqv_elt = 0;
7046 }
7047 }
7048
7049 /* If only one set in a JUMP_INSN and it is now a no-op, we can delete
7050 the insn. */
7051 else if (n_sets == 1 && dest == pc_rtx && src == pc_rtx)
7052 {
7053 PUT_CODE (insn, NOTE);
7054 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
7055 NOTE_SOURCE_FILE (insn) = 0;
7056 cse_jumps_altered = 1;
7057 /* One less use of the label this insn used to jump to. */
7058 --LABEL_NUSES (JUMP_LABEL (insn));
7059 /* No more processing for this set. */
7060 sets[i].rtl = 0;
7061 }
7062
7063 /* If this SET is now setting PC to a label, we know it used to
7064 be a conditional or computed branch. So we see if we can follow
7065 it. If it was a computed branch, delete it and re-emit. */
7066 else if (dest == pc_rtx && GET_CODE (src) == LABEL_REF)
7067 {
7068 rtx p;
7069
7070 /* If this is not in the format for a simple branch and
7071 we are the only SET in it, re-emit it. */
7072 if (! simplejump_p (insn) && n_sets == 1)
7073 {
7074 rtx new = emit_jump_insn_before (gen_jump (XEXP (src, 0)), insn);
7075 JUMP_LABEL (new) = XEXP (src, 0);
7076 LABEL_NUSES (XEXP (src, 0))++;
7077 delete_insn (insn);
7078 insn = new;
7079 }
7080 else
7081 /* Otherwise, force rerecognition, since it probably had
7082 a different pattern before.
7083 This shouldn't really be necessary, since whatever
7084 changed the source value above should have done this.
7085 Until the right place is found, might as well do this here. */
7086 INSN_CODE (insn) = -1;
7087
7088 /* Now that we've converted this jump to an unconditional jump,
7089 there is dead code after it. Delete the dead code until we
7090 reach a BARRIER, the end of the function, or a label. Do
7091 not delete NOTEs except for NOTE_INSN_DELETED since later
7092 phases assume these notes are retained. */
7093
7094 p = insn;
7095
7096 while (NEXT_INSN (p) != 0
7097 && GET_CODE (NEXT_INSN (p)) != BARRIER
7098 && GET_CODE (NEXT_INSN (p)) != CODE_LABEL)
7099 {
7100 if (GET_CODE (NEXT_INSN (p)) != NOTE
7101 || NOTE_LINE_NUMBER (NEXT_INSN (p)) == NOTE_INSN_DELETED)
7102 delete_insn (NEXT_INSN (p));
7103 else
7104 p = NEXT_INSN (p);
7105 }
7106
7107 /* If we don't have a BARRIER immediately after INSN, put one there.
7108 Much code assumes that there are no NOTEs between a JUMP_INSN and
7109 BARRIER. */
7110
7111 if (NEXT_INSN (insn) == 0
7112 || GET_CODE (NEXT_INSN (insn)) != BARRIER)
7113 emit_barrier_before (NEXT_INSN (insn));
7114
7115 /* We might have two BARRIERs separated by notes. Delete the second
7116 one if so. */
7117
7118 if (p != insn && NEXT_INSN (p) != 0
7119 && GET_CODE (NEXT_INSN (p)) == BARRIER)
7120 delete_insn (NEXT_INSN (p));
7121
7122 cse_jumps_altered = 1;
7123 sets[i].rtl = 0;
7124 }
7125
7126 /* If destination is volatile, invalidate it and then do no further
7127 processing for this assignment. */
7128
7129 else if (do_not_record)
7130 {
7131 if (GET_CODE (dest) == REG || GET_CODE (dest) == SUBREG
7132 || GET_CODE (dest) == MEM)
7133 invalidate (dest, VOIDmode);
7134 else if (GET_CODE (dest) == STRICT_LOW_PART
7135 || GET_CODE (dest) == ZERO_EXTRACT)
7136 invalidate (XEXP (dest, 0), GET_MODE (dest));
7137 sets[i].rtl = 0;
7138 }
7139
7140 if (sets[i].rtl != 0 && dest != SET_DEST (sets[i].rtl))
7141 sets[i].dest_hash = HASH (SET_DEST (sets[i].rtl), mode);
7142
7143 #ifdef HAVE_cc0
7144 /* If setting CC0, record what it was set to, or a constant, if it
7145 is equivalent to a constant. If it is being set to a floating-point
7146 value, make a COMPARE with the appropriate constant of 0. If we
7147 don't do this, later code can interpret this as a test against
7148 const0_rtx, which can cause problems if we try to put it into an
7149 insn as a floating-point operand. */
7150 if (dest == cc0_rtx)
7151 {
7152 this_insn_cc0 = src_const && mode != VOIDmode ? src_const : src;
7153 this_insn_cc0_mode = mode;
7154 if (FLOAT_MODE_P (mode))
7155 this_insn_cc0 = gen_rtx (COMPARE, VOIDmode, this_insn_cc0,
7156 CONST0_RTX (mode));
7157 }
7158 #endif
7159 }
7160
7161 /* Now enter all non-volatile source expressions in the hash table
7162 if they are not already present.
7163 Record their equivalence classes in src_elt.
7164 This way we can insert the corresponding destinations into
7165 the same classes even if the actual sources are no longer in them
7166 (having been invalidated). */
7167
7168 if (src_eqv && src_eqv_elt == 0 && sets[0].rtl != 0 && ! src_eqv_volatile
7169 && ! rtx_equal_p (src_eqv, SET_DEST (sets[0].rtl)))
7170 {
7171 register struct table_elt *elt;
7172 register struct table_elt *classp = sets[0].src_elt;
7173 rtx dest = SET_DEST (sets[0].rtl);
7174 enum machine_mode eqvmode = GET_MODE (dest);
7175
7176 if (GET_CODE (dest) == STRICT_LOW_PART)
7177 {
7178 eqvmode = GET_MODE (SUBREG_REG (XEXP (dest, 0)));
7179 classp = 0;
7180 }
7181 if (insert_regs (src_eqv, classp, 0))
7182 {
7183 rehash_using_reg (src_eqv);
7184 src_eqv_hash = HASH (src_eqv, eqvmode);
7185 }
7186 elt = insert (src_eqv, classp, src_eqv_hash, eqvmode);
7187 elt->in_memory = src_eqv_in_memory;
7188 elt->in_struct = src_eqv_in_struct;
7189 src_eqv_elt = elt;
7190
7191 /* Check to see if src_eqv_elt is the same as a set source which
7192 does not yet have an elt, and if so set the elt of the set source
7193 to src_eqv_elt. */
7194 for (i = 0; i < n_sets; i++)
7195 if (sets[i].rtl && sets[i].src_elt == 0
7196 && rtx_equal_p (SET_SRC (sets[i].rtl), src_eqv))
7197 sets[i].src_elt = src_eqv_elt;
7198 }
7199
7200 for (i = 0; i < n_sets; i++)
7201 if (sets[i].rtl && ! sets[i].src_volatile
7202 && ! rtx_equal_p (SET_SRC (sets[i].rtl), SET_DEST (sets[i].rtl)))
7203 {
7204 if (GET_CODE (SET_DEST (sets[i].rtl)) == STRICT_LOW_PART)
7205 {
7206 /* REG_EQUAL in setting a STRICT_LOW_PART
7207 gives an equivalent for the entire destination register,
7208 not just for the subreg being stored in now.
7209 This is a more interesting equivalence, so we arrange later
7210 to treat the entire reg as the destination. */
7211 sets[i].src_elt = src_eqv_elt;
7212 sets[i].src_hash = src_eqv_hash;
7213 }
7214 else
7215 {
7216 /* Insert source and constant equivalent into hash table, if not
7217 already present. */
7218 register struct table_elt *classp = src_eqv_elt;
7219 register rtx src = sets[i].src;
7220 register rtx dest = SET_DEST (sets[i].rtl);
7221 enum machine_mode mode
7222 = GET_MODE (src) == VOIDmode ? GET_MODE (dest) : GET_MODE (src);
7223
7224 if (sets[i].src_elt == 0)
7225 {
7226 register struct table_elt *elt;
7227
7228 /* Note that these insert_regs calls cannot remove
7229 any of the src_elt's, because they would have failed to
7230 match if not still valid. */
7231 if (insert_regs (src, classp, 0))
7232 {
7233 rehash_using_reg (src);
7234 sets[i].src_hash = HASH (src, mode);
7235 }
7236 elt = insert (src, classp, sets[i].src_hash, mode);
7237 elt->in_memory = sets[i].src_in_memory;
7238 elt->in_struct = sets[i].src_in_struct;
7239 sets[i].src_elt = classp = elt;
7240 }
7241
7242 if (sets[i].src_const && sets[i].src_const_elt == 0
7243 && src != sets[i].src_const
7244 && ! rtx_equal_p (sets[i].src_const, src))
7245 sets[i].src_elt = insert (sets[i].src_const, classp,
7246 sets[i].src_const_hash, mode);
7247 }
7248 }
7249 else if (sets[i].src_elt == 0)
7250 /* If we did not insert the source into the hash table (e.g., it was
7251 volatile), note the equivalence class for the REG_EQUAL value, if any,
7252 so that the destination goes into that class. */
7253 sets[i].src_elt = src_eqv_elt;
7254
7255 invalidate_from_clobbers (&writes_memory, x);
7256
7257 /* Some registers are invalidated by subroutine calls. Memory is
7258 invalidated by non-constant calls. */
7259
7260 if (GET_CODE (insn) == CALL_INSN)
7261 {
7262 static struct write_data everything = {0, 1, 1, 1};
7263
7264 if (! CONST_CALL_P (insn))
7265 invalidate_memory (&everything);
7266 invalidate_for_call ();
7267 }
7268
7269 /* Now invalidate everything set by this instruction.
7270 If a SUBREG or other funny destination is being set,
7271 sets[i].rtl is still nonzero, so here we invalidate the reg
7272 a part of which is being set. */
7273
7274 for (i = 0; i < n_sets; i++)
7275 if (sets[i].rtl)
7276 {
7277 /* We can't use the inner dest, because the mode associated with
7278 a ZERO_EXTRACT is significant. */
7279 register rtx dest = SET_DEST (sets[i].rtl);
7280
7281 /* Needed for registers to remove the register from its
7282 previous quantity's chain.
7283 Needed for memory if this is a nonvarying address, unless
7284 we have just done an invalidate_memory that covers even those. */
7285 if (GET_CODE (dest) == REG || GET_CODE (dest) == SUBREG
7286 || (GET_CODE (dest) == MEM && ! writes_memory.all
7287 && ! cse_rtx_addr_varies_p (dest)))
7288 invalidate (dest, VOIDmode);
7289 else if (GET_CODE (dest) == STRICT_LOW_PART
7290 || GET_CODE (dest) == ZERO_EXTRACT)
7291 invalidate (XEXP (dest, 0), GET_MODE (dest));
7292 }
7293
7294 /* Make sure registers mentioned in destinations
7295 are safe for use in an expression to be inserted.
7296 This removes from the hash table
7297 any invalid entry that refers to one of these registers.
7298
7299 We don't care about the return value from mention_regs because
7300 we are going to hash the SET_DEST values unconditionally. */
7301
7302 for (i = 0; i < n_sets; i++)
7303 if (sets[i].rtl && GET_CODE (SET_DEST (sets[i].rtl)) != REG)
7304 mention_regs (SET_DEST (sets[i].rtl));
7305
7306 /* We may have just removed some of the src_elt's from the hash table.
7307 So replace each one with the current head of the same class. */
7308
7309 for (i = 0; i < n_sets; i++)
7310 if (sets[i].rtl)
7311 {
7312 if (sets[i].src_elt && sets[i].src_elt->first_same_value == 0)
7313 /* If elt was removed, find current head of same class,
7314 or 0 if nothing remains of that class. */
7315 {
7316 register struct table_elt *elt = sets[i].src_elt;
7317
7318 while (elt && elt->prev_same_value)
7319 elt = elt->prev_same_value;
7320
7321 while (elt && elt->first_same_value == 0)
7322 elt = elt->next_same_value;
7323 sets[i].src_elt = elt ? elt->first_same_value : 0;
7324 }
7325 }
7326
7327 /* Now insert the destinations into their equivalence classes. */
7328
7329 for (i = 0; i < n_sets; i++)
7330 if (sets[i].rtl)
7331 {
7332 register rtx dest = SET_DEST (sets[i].rtl);
7333 register struct table_elt *elt;
7334
7335 /* Don't record value if we are not supposed to risk allocating
7336 floating-point values in registers that might be wider than
7337 memory. */
7338 if ((flag_float_store
7339 && GET_CODE (dest) == MEM
7340 && FLOAT_MODE_P (GET_MODE (dest)))
7341 /* Don't record values of destinations set inside a libcall block
7342 since we might delete the libcall. Things should have been set
7343 up so we won't want to reuse such a value, but we play it safe
7344 here. */
7345 || in_libcall_block
7346 /* If we didn't put a REG_EQUAL value or a source into the hash
7347 table, there is no point is recording DEST. */
7348 || sets[i].src_elt == 0
7349 /* If DEST is a paradoxical SUBREG and SRC is a ZERO_EXTEND
7350 or SIGN_EXTEND, don't record DEST since it can cause
7351 some tracking to be wrong.
7352
7353 ??? Think about this more later. */
7354 || (GET_CODE (dest) == SUBREG
7355 && (GET_MODE_SIZE (GET_MODE (dest))
7356 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))))
7357 && (GET_CODE (sets[i].src) == SIGN_EXTEND
7358 || GET_CODE (sets[i].src) == ZERO_EXTEND)))
7359 continue;
7360
7361 /* STRICT_LOW_PART isn't part of the value BEING set,
7362 and neither is the SUBREG inside it.
7363 Note that in this case SETS[I].SRC_ELT is really SRC_EQV_ELT. */
7364 if (GET_CODE (dest) == STRICT_LOW_PART)
7365 dest = SUBREG_REG (XEXP (dest, 0));
7366
7367 if (GET_CODE (dest) == REG || GET_CODE (dest) == SUBREG)
7368 /* Registers must also be inserted into chains for quantities. */
7369 if (insert_regs (dest, sets[i].src_elt, 1))
7370 {
7371 /* If `insert_regs' changes something, the hash code must be
7372 recalculated. */
7373 rehash_using_reg (dest);
7374 sets[i].dest_hash = HASH (dest, GET_MODE (dest));
7375 }
7376
7377 elt = insert (dest, sets[i].src_elt,
7378 sets[i].dest_hash, GET_MODE (dest));
7379 elt->in_memory = (GET_CODE (sets[i].inner_dest) == MEM
7380 && ! RTX_UNCHANGING_P (sets[i].inner_dest));
7381
7382 if (elt->in_memory)
7383 {
7384 /* This implicitly assumes a whole struct
7385 need not have MEM_IN_STRUCT_P.
7386 But a whole struct is *supposed* to have MEM_IN_STRUCT_P. */
7387 elt->in_struct = (MEM_IN_STRUCT_P (sets[i].inner_dest)
7388 || sets[i].inner_dest != SET_DEST (sets[i].rtl));
7389 }
7390
7391 /* If we have (set (subreg:m1 (reg:m2 foo) 0) (bar:m1)), M1 is no
7392 narrower than M2, and both M1 and M2 are the same number of words,
7393 we are also doing (set (reg:m2 foo) (subreg:m2 (bar:m1) 0)) so
7394 make that equivalence as well.
7395
7396 However, BAR may have equivalences for which gen_lowpart_if_possible
7397 will produce a simpler value than gen_lowpart_if_possible applied to
7398 BAR (e.g., if BAR was ZERO_EXTENDed from M2), so we will scan all
7399 BAR's equivalences. If we don't get a simplified form, make
7400 the SUBREG. It will not be used in an equivalence, but will
7401 cause two similar assignments to be detected.
7402
7403 Note the loop below will find SUBREG_REG (DEST) since we have
7404 already entered SRC and DEST of the SET in the table. */
7405
7406 if (GET_CODE (dest) == SUBREG
7407 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))) - 1)
7408 / UNITS_PER_WORD)
7409 == (GET_MODE_SIZE (GET_MODE (dest)) - 1)/ UNITS_PER_WORD)
7410 && (GET_MODE_SIZE (GET_MODE (dest))
7411 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))))
7412 && sets[i].src_elt != 0)
7413 {
7414 enum machine_mode new_mode = GET_MODE (SUBREG_REG (dest));
7415 struct table_elt *elt, *classp = 0;
7416
7417 for (elt = sets[i].src_elt->first_same_value; elt;
7418 elt = elt->next_same_value)
7419 {
7420 rtx new_src = 0;
7421 unsigned src_hash;
7422 struct table_elt *src_elt;
7423
7424 /* Ignore invalid entries. */
7425 if (GET_CODE (elt->exp) != REG
7426 && ! exp_equiv_p (elt->exp, elt->exp, 1, 0))
7427 continue;
7428
7429 new_src = gen_lowpart_if_possible (new_mode, elt->exp);
7430 if (new_src == 0)
7431 new_src = gen_rtx (SUBREG, new_mode, elt->exp, 0);
7432
7433 src_hash = HASH (new_src, new_mode);
7434 src_elt = lookup (new_src, src_hash, new_mode);
7435
7436 /* Put the new source in the hash table is if isn't
7437 already. */
7438 if (src_elt == 0)
7439 {
7440 if (insert_regs (new_src, classp, 0))
7441 {
7442 rehash_using_reg (new_src);
7443 src_hash = HASH (new_src, new_mode);
7444 }
7445 src_elt = insert (new_src, classp, src_hash, new_mode);
7446 src_elt->in_memory = elt->in_memory;
7447 src_elt->in_struct = elt->in_struct;
7448 }
7449 else if (classp && classp != src_elt->first_same_value)
7450 /* Show that two things that we've seen before are
7451 actually the same. */
7452 merge_equiv_classes (src_elt, classp);
7453
7454 classp = src_elt->first_same_value;
7455 }
7456 }
7457 }
7458
7459 /* Special handling for (set REG0 REG1)
7460 where REG0 is the "cheapest", cheaper than REG1.
7461 After cse, REG1 will probably not be used in the sequel,
7462 so (if easily done) change this insn to (set REG1 REG0) and
7463 replace REG1 with REG0 in the previous insn that computed their value.
7464 Then REG1 will become a dead store and won't cloud the situation
7465 for later optimizations.
7466
7467 Do not make this change if REG1 is a hard register, because it will
7468 then be used in the sequel and we may be changing a two-operand insn
7469 into a three-operand insn.
7470
7471 Also do not do this if we are operating on a copy of INSN. */
7472
7473 if (n_sets == 1 && sets[0].rtl && GET_CODE (SET_DEST (sets[0].rtl)) == REG
7474 && NEXT_INSN (PREV_INSN (insn)) == insn
7475 && GET_CODE (SET_SRC (sets[0].rtl)) == REG
7476 && REGNO (SET_SRC (sets[0].rtl)) >= FIRST_PSEUDO_REGISTER
7477 && REGNO_QTY_VALID_P (REGNO (SET_SRC (sets[0].rtl)))
7478 && (qty_first_reg[reg_qty[REGNO (SET_SRC (sets[0].rtl))]]
7479 == REGNO (SET_DEST (sets[0].rtl))))
7480 {
7481 rtx prev = PREV_INSN (insn);
7482 while (prev && GET_CODE (prev) == NOTE)
7483 prev = PREV_INSN (prev);
7484
7485 if (prev && GET_CODE (prev) == INSN && GET_CODE (PATTERN (prev)) == SET
7486 && SET_DEST (PATTERN (prev)) == SET_SRC (sets[0].rtl))
7487 {
7488 rtx dest = SET_DEST (sets[0].rtl);
7489 rtx note = find_reg_note (prev, REG_EQUIV, NULL_RTX);
7490
7491 validate_change (prev, & SET_DEST (PATTERN (prev)), dest, 1);
7492 validate_change (insn, & SET_DEST (sets[0].rtl),
7493 SET_SRC (sets[0].rtl), 1);
7494 validate_change (insn, & SET_SRC (sets[0].rtl), dest, 1);
7495 apply_change_group ();
7496
7497 /* If REG1 was equivalent to a constant, REG0 is not. */
7498 if (note)
7499 PUT_REG_NOTE_KIND (note, REG_EQUAL);
7500
7501 /* If there was a REG_WAS_0 note on PREV, remove it. Move
7502 any REG_WAS_0 note on INSN to PREV. */
7503 note = find_reg_note (prev, REG_WAS_0, NULL_RTX);
7504 if (note)
7505 remove_note (prev, note);
7506
7507 note = find_reg_note (insn, REG_WAS_0, NULL_RTX);
7508 if (note)
7509 {
7510 remove_note (insn, note);
7511 XEXP (note, 1) = REG_NOTES (prev);
7512 REG_NOTES (prev) = note;
7513 }
7514
7515 /* If INSN has a REG_EQUAL note, and this note mentions REG0,
7516 then we must delete it, because the value in REG0 has changed. */
7517 note = find_reg_note (insn, REG_EQUAL, NULL_RTX);
7518 if (note && reg_mentioned_p (dest, XEXP (note, 0)))
7519 remove_note (insn, note);
7520 }
7521 }
7522
7523 /* If this is a conditional jump insn, record any known equivalences due to
7524 the condition being tested. */
7525
7526 last_jump_equiv_class = 0;
7527 if (GET_CODE (insn) == JUMP_INSN
7528 && n_sets == 1 && GET_CODE (x) == SET
7529 && GET_CODE (SET_SRC (x)) == IF_THEN_ELSE)
7530 record_jump_equiv (insn, 0);
7531
7532 #ifdef HAVE_cc0
7533 /* If the previous insn set CC0 and this insn no longer references CC0,
7534 delete the previous insn. Here we use the fact that nothing expects CC0
7535 to be valid over an insn, which is true until the final pass. */
7536 if (prev_insn && GET_CODE (prev_insn) == INSN
7537 && (tem = single_set (prev_insn)) != 0
7538 && SET_DEST (tem) == cc0_rtx
7539 && ! reg_mentioned_p (cc0_rtx, x))
7540 {
7541 PUT_CODE (prev_insn, NOTE);
7542 NOTE_LINE_NUMBER (prev_insn) = NOTE_INSN_DELETED;
7543 NOTE_SOURCE_FILE (prev_insn) = 0;
7544 }
7545
7546 prev_insn_cc0 = this_insn_cc0;
7547 prev_insn_cc0_mode = this_insn_cc0_mode;
7548 #endif
7549
7550 prev_insn = insn;
7551 }
7552 \f
7553 /* Store 1 in *WRITES_PTR for those categories of memory ref
7554 that must be invalidated when the expression WRITTEN is stored in.
7555 If WRITTEN is null, say everything must be invalidated. */
7556
7557 static void
7558 note_mem_written (written, writes_ptr)
7559 rtx written;
7560 struct write_data *writes_ptr;
7561 {
7562 static struct write_data everything = {0, 1, 1, 1};
7563
7564 if (written == 0)
7565 *writes_ptr = everything;
7566 else if (GET_CODE (written) == MEM)
7567 {
7568 /* Pushing or popping the stack invalidates just the stack pointer. */
7569 rtx addr = XEXP (written, 0);
7570 if ((GET_CODE (addr) == PRE_DEC || GET_CODE (addr) == PRE_INC
7571 || GET_CODE (addr) == POST_DEC || GET_CODE (addr) == POST_INC)
7572 && GET_CODE (XEXP (addr, 0)) == REG
7573 && REGNO (XEXP (addr, 0)) == STACK_POINTER_REGNUM)
7574 {
7575 writes_ptr->sp = 1;
7576 return;
7577 }
7578 else if (GET_MODE (written) == BLKmode)
7579 *writes_ptr = everything;
7580 else if (cse_rtx_addr_varies_p (written))
7581 {
7582 /* A varying address that is a sum indicates an array element,
7583 and that's just as good as a structure element
7584 in implying that we need not invalidate scalar variables.
7585 However, we must allow QImode aliasing of scalars, because the
7586 ANSI C standard allows character pointers to alias anything.
7587 We must also allow AND addresses, because they may generate
7588 accesses outside the object being referenced. This is used to
7589 generate aligned addresses from unaligned addresses, for instance,
7590 the alpha storeqi_unaligned pattern. */
7591 if (! ((MEM_IN_STRUCT_P (written)
7592 || GET_CODE (XEXP (written, 0)) == PLUS)
7593 && GET_MODE (written) != QImode
7594 && GET_CODE (XEXP (written, 0)) != AND))
7595 writes_ptr->all = 1;
7596 writes_ptr->nonscalar = 1;
7597 }
7598 writes_ptr->var = 1;
7599 }
7600 }
7601
7602 /* Perform invalidation on the basis of everything about an insn
7603 except for invalidating the actual places that are SET in it.
7604 This includes the places CLOBBERed, and anything that might
7605 alias with something that is SET or CLOBBERed.
7606
7607 W points to the writes_memory for this insn, a struct write_data
7608 saying which kinds of memory references must be invalidated.
7609 X is the pattern of the insn. */
7610
7611 static void
7612 invalidate_from_clobbers (w, x)
7613 struct write_data *w;
7614 rtx x;
7615 {
7616 /* If W->var is not set, W specifies no action.
7617 If W->all is set, this step gets all memory refs
7618 so they can be ignored in the rest of this function. */
7619 if (w->var)
7620 invalidate_memory (w);
7621
7622 if (w->sp)
7623 {
7624 if (reg_tick[STACK_POINTER_REGNUM] >= 0)
7625 reg_tick[STACK_POINTER_REGNUM]++;
7626
7627 /* This should be *very* rare. */
7628 if (TEST_HARD_REG_BIT (hard_regs_in_table, STACK_POINTER_REGNUM))
7629 invalidate (stack_pointer_rtx, VOIDmode);
7630 }
7631
7632 if (GET_CODE (x) == CLOBBER)
7633 {
7634 rtx ref = XEXP (x, 0);
7635 if (ref)
7636 {
7637 if (GET_CODE (ref) == REG || GET_CODE (ref) == SUBREG
7638 || (GET_CODE (ref) == MEM && ! w->all))
7639 invalidate (ref, VOIDmode);
7640 else if (GET_CODE (ref) == STRICT_LOW_PART
7641 || GET_CODE (ref) == ZERO_EXTRACT)
7642 invalidate (XEXP (ref, 0), GET_MODE (ref));
7643 }
7644 }
7645 else if (GET_CODE (x) == PARALLEL)
7646 {
7647 register int i;
7648 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
7649 {
7650 register rtx y = XVECEXP (x, 0, i);
7651 if (GET_CODE (y) == CLOBBER)
7652 {
7653 rtx ref = XEXP (y, 0);
7654 if (ref)
7655 {
7656 if (GET_CODE (ref) == REG || GET_CODE (ref) == SUBREG
7657 || (GET_CODE (ref) == MEM && !w->all))
7658 invalidate (ref, VOIDmode);
7659 else if (GET_CODE (ref) == STRICT_LOW_PART
7660 || GET_CODE (ref) == ZERO_EXTRACT)
7661 invalidate (XEXP (ref, 0), GET_MODE (ref));
7662 }
7663 }
7664 }
7665 }
7666 }
7667 \f
7668 /* Process X, part of the REG_NOTES of an insn. Look at any REG_EQUAL notes
7669 and replace any registers in them with either an equivalent constant
7670 or the canonical form of the register. If we are inside an address,
7671 only do this if the address remains valid.
7672
7673 OBJECT is 0 except when within a MEM in which case it is the MEM.
7674
7675 Return the replacement for X. */
7676
7677 static rtx
7678 cse_process_notes (x, object)
7679 rtx x;
7680 rtx object;
7681 {
7682 enum rtx_code code = GET_CODE (x);
7683 char *fmt = GET_RTX_FORMAT (code);
7684 int i;
7685
7686 switch (code)
7687 {
7688 case CONST_INT:
7689 case CONST:
7690 case SYMBOL_REF:
7691 case LABEL_REF:
7692 case CONST_DOUBLE:
7693 case PC:
7694 case CC0:
7695 case LO_SUM:
7696 return x;
7697
7698 case MEM:
7699 XEXP (x, 0) = cse_process_notes (XEXP (x, 0), x);
7700 return x;
7701
7702 case EXPR_LIST:
7703 case INSN_LIST:
7704 if (REG_NOTE_KIND (x) == REG_EQUAL)
7705 XEXP (x, 0) = cse_process_notes (XEXP (x, 0), NULL_RTX);
7706 if (XEXP (x, 1))
7707 XEXP (x, 1) = cse_process_notes (XEXP (x, 1), NULL_RTX);
7708 return x;
7709
7710 case SIGN_EXTEND:
7711 case ZERO_EXTEND:
7712 case SUBREG:
7713 {
7714 rtx new = cse_process_notes (XEXP (x, 0), object);
7715 /* We don't substitute VOIDmode constants into these rtx,
7716 since they would impede folding. */
7717 if (GET_MODE (new) != VOIDmode)
7718 validate_change (object, &XEXP (x, 0), new, 0);
7719 return x;
7720 }
7721
7722 case REG:
7723 i = reg_qty[REGNO (x)];
7724
7725 /* Return a constant or a constant register. */
7726 if (REGNO_QTY_VALID_P (REGNO (x))
7727 && qty_const[i] != 0
7728 && (CONSTANT_P (qty_const[i])
7729 || GET_CODE (qty_const[i]) == REG))
7730 {
7731 rtx new = gen_lowpart_if_possible (GET_MODE (x), qty_const[i]);
7732 if (new)
7733 return new;
7734 }
7735
7736 /* Otherwise, canonicalize this register. */
7737 return canon_reg (x, NULL_RTX);
7738 }
7739
7740 for (i = 0; i < GET_RTX_LENGTH (code); i++)
7741 if (fmt[i] == 'e')
7742 validate_change (object, &XEXP (x, i),
7743 cse_process_notes (XEXP (x, i), object), 0);
7744
7745 return x;
7746 }
7747 \f
7748 /* Find common subexpressions between the end test of a loop and the beginning
7749 of the loop. LOOP_START is the CODE_LABEL at the start of a loop.
7750
7751 Often we have a loop where an expression in the exit test is used
7752 in the body of the loop. For example "while (*p) *q++ = *p++;".
7753 Because of the way we duplicate the loop exit test in front of the loop,
7754 however, we don't detect that common subexpression. This will be caught
7755 when global cse is implemented, but this is a quite common case.
7756
7757 This function handles the most common cases of these common expressions.
7758 It is called after we have processed the basic block ending with the
7759 NOTE_INSN_LOOP_END note that ends a loop and the previous JUMP_INSN
7760 jumps to a label used only once. */
7761
7762 static void
7763 cse_around_loop (loop_start)
7764 rtx loop_start;
7765 {
7766 rtx insn;
7767 int i;
7768 struct table_elt *p;
7769
7770 /* If the jump at the end of the loop doesn't go to the start, we don't
7771 do anything. */
7772 for (insn = PREV_INSN (loop_start);
7773 insn && (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) >= 0);
7774 insn = PREV_INSN (insn))
7775 ;
7776
7777 if (insn == 0
7778 || GET_CODE (insn) != NOTE
7779 || NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG)
7780 return;
7781
7782 /* If the last insn of the loop (the end test) was an NE comparison,
7783 we will interpret it as an EQ comparison, since we fell through
7784 the loop. Any equivalences resulting from that comparison are
7785 therefore not valid and must be invalidated. */
7786 if (last_jump_equiv_class)
7787 for (p = last_jump_equiv_class->first_same_value; p;
7788 p = p->next_same_value)
7789 if (GET_CODE (p->exp) == MEM || GET_CODE (p->exp) == REG
7790 || (GET_CODE (p->exp) == SUBREG
7791 && GET_CODE (SUBREG_REG (p->exp)) == REG))
7792 invalidate (p->exp, VOIDmode);
7793 else if (GET_CODE (p->exp) == STRICT_LOW_PART
7794 || GET_CODE (p->exp) == ZERO_EXTRACT)
7795 invalidate (XEXP (p->exp, 0), GET_MODE (p->exp));
7796
7797 /* Process insns starting after LOOP_START until we hit a CALL_INSN or
7798 a CODE_LABEL (we could handle a CALL_INSN, but it isn't worth it).
7799
7800 The only thing we do with SET_DEST is invalidate entries, so we
7801 can safely process each SET in order. It is slightly less efficient
7802 to do so, but we only want to handle the most common cases. */
7803
7804 for (insn = NEXT_INSN (loop_start);
7805 GET_CODE (insn) != CALL_INSN && GET_CODE (insn) != CODE_LABEL
7806 && ! (GET_CODE (insn) == NOTE
7807 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END);
7808 insn = NEXT_INSN (insn))
7809 {
7810 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
7811 && (GET_CODE (PATTERN (insn)) == SET
7812 || GET_CODE (PATTERN (insn)) == CLOBBER))
7813 cse_set_around_loop (PATTERN (insn), insn, loop_start);
7814 else if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
7815 && GET_CODE (PATTERN (insn)) == PARALLEL)
7816 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
7817 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET
7818 || GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == CLOBBER)
7819 cse_set_around_loop (XVECEXP (PATTERN (insn), 0, i), insn,
7820 loop_start);
7821 }
7822 }
7823 \f
7824 /* Variable used for communications between the next two routines. */
7825
7826 static struct write_data skipped_writes_memory;
7827
7828 /* Process one SET of an insn that was skipped. We ignore CLOBBERs
7829 since they are done elsewhere. This function is called via note_stores. */
7830
7831 static void
7832 invalidate_skipped_set (dest, set)
7833 rtx set;
7834 rtx dest;
7835 {
7836 if (GET_CODE (dest) == MEM)
7837 note_mem_written (dest, &skipped_writes_memory);
7838
7839 /* There are times when an address can appear varying and be a PLUS
7840 during this scan when it would be a fixed address were we to know
7841 the proper equivalences. So promote "nonscalar" to be "all". */
7842 if (skipped_writes_memory.nonscalar)
7843 skipped_writes_memory.all = 1;
7844
7845 if (GET_CODE (set) == CLOBBER
7846 #ifdef HAVE_cc0
7847 || dest == cc0_rtx
7848 #endif
7849 || dest == pc_rtx)
7850 return;
7851
7852 if (GET_CODE (dest) == REG || GET_CODE (dest) == SUBREG
7853 || (! skipped_writes_memory.all && ! cse_rtx_addr_varies_p (dest)))
7854 invalidate (dest, VOIDmode);
7855 else if (GET_CODE (dest) == STRICT_LOW_PART
7856 || GET_CODE (dest) == ZERO_EXTRACT)
7857 invalidate (XEXP (dest, 0), GET_MODE (dest));
7858 }
7859
7860 /* Invalidate all insns from START up to the end of the function or the
7861 next label. This called when we wish to CSE around a block that is
7862 conditionally executed. */
7863
7864 static void
7865 invalidate_skipped_block (start)
7866 rtx start;
7867 {
7868 rtx insn;
7869 static struct write_data init = {0, 0, 0, 0};
7870 static struct write_data everything = {0, 1, 1, 1};
7871
7872 for (insn = start; insn && GET_CODE (insn) != CODE_LABEL;
7873 insn = NEXT_INSN (insn))
7874 {
7875 if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
7876 continue;
7877
7878 skipped_writes_memory = init;
7879
7880 if (GET_CODE (insn) == CALL_INSN)
7881 {
7882 invalidate_for_call ();
7883 skipped_writes_memory = everything;
7884 }
7885
7886 note_stores (PATTERN (insn), invalidate_skipped_set);
7887 invalidate_from_clobbers (&skipped_writes_memory, PATTERN (insn));
7888 }
7889 }
7890 \f
7891 /* Used for communication between the following two routines; contains a
7892 value to be checked for modification. */
7893
7894 static rtx cse_check_loop_start_value;
7895
7896 /* If modifying X will modify the value in CSE_CHECK_LOOP_START_VALUE,
7897 indicate that fact by setting CSE_CHECK_LOOP_START_VALUE to 0. */
7898
7899 static void
7900 cse_check_loop_start (x, set)
7901 rtx x;
7902 rtx set;
7903 {
7904 if (cse_check_loop_start_value == 0
7905 || GET_CODE (x) == CC0 || GET_CODE (x) == PC)
7906 return;
7907
7908 if ((GET_CODE (x) == MEM && GET_CODE (cse_check_loop_start_value) == MEM)
7909 || reg_overlap_mentioned_p (x, cse_check_loop_start_value))
7910 cse_check_loop_start_value = 0;
7911 }
7912
7913 /* X is a SET or CLOBBER contained in INSN that was found near the start of
7914 a loop that starts with the label at LOOP_START.
7915
7916 If X is a SET, we see if its SET_SRC is currently in our hash table.
7917 If so, we see if it has a value equal to some register used only in the
7918 loop exit code (as marked by jump.c).
7919
7920 If those two conditions are true, we search backwards from the start of
7921 the loop to see if that same value was loaded into a register that still
7922 retains its value at the start of the loop.
7923
7924 If so, we insert an insn after the load to copy the destination of that
7925 load into the equivalent register and (try to) replace our SET_SRC with that
7926 register.
7927
7928 In any event, we invalidate whatever this SET or CLOBBER modifies. */
7929
7930 static void
7931 cse_set_around_loop (x, insn, loop_start)
7932 rtx x;
7933 rtx insn;
7934 rtx loop_start;
7935 {
7936 struct table_elt *src_elt;
7937 static struct write_data init = {0, 0, 0, 0};
7938 struct write_data writes_memory;
7939
7940 writes_memory = init;
7941
7942 /* If this is a SET, see if we can replace SET_SRC, but ignore SETs that
7943 are setting PC or CC0 or whose SET_SRC is already a register. */
7944 if (GET_CODE (x) == SET
7945 && GET_CODE (SET_DEST (x)) != PC && GET_CODE (SET_DEST (x)) != CC0
7946 && GET_CODE (SET_SRC (x)) != REG)
7947 {
7948 src_elt = lookup (SET_SRC (x),
7949 HASH (SET_SRC (x), GET_MODE (SET_DEST (x))),
7950 GET_MODE (SET_DEST (x)));
7951
7952 if (src_elt)
7953 for (src_elt = src_elt->first_same_value; src_elt;
7954 src_elt = src_elt->next_same_value)
7955 if (GET_CODE (src_elt->exp) == REG && REG_LOOP_TEST_P (src_elt->exp)
7956 && COST (src_elt->exp) < COST (SET_SRC (x)))
7957 {
7958 rtx p, set;
7959
7960 /* Look for an insn in front of LOOP_START that sets
7961 something in the desired mode to SET_SRC (x) before we hit
7962 a label or CALL_INSN. */
7963
7964 for (p = prev_nonnote_insn (loop_start);
7965 p && GET_CODE (p) != CALL_INSN
7966 && GET_CODE (p) != CODE_LABEL;
7967 p = prev_nonnote_insn (p))
7968 if ((set = single_set (p)) != 0
7969 && GET_CODE (SET_DEST (set)) == REG
7970 && GET_MODE (SET_DEST (set)) == src_elt->mode
7971 && rtx_equal_p (SET_SRC (set), SET_SRC (x)))
7972 {
7973 /* We now have to ensure that nothing between P
7974 and LOOP_START modified anything referenced in
7975 SET_SRC (x). We know that nothing within the loop
7976 can modify it, or we would have invalidated it in
7977 the hash table. */
7978 rtx q;
7979
7980 cse_check_loop_start_value = SET_SRC (x);
7981 for (q = p; q != loop_start; q = NEXT_INSN (q))
7982 if (GET_RTX_CLASS (GET_CODE (q)) == 'i')
7983 note_stores (PATTERN (q), cse_check_loop_start);
7984
7985 /* If nothing was changed and we can replace our
7986 SET_SRC, add an insn after P to copy its destination
7987 to what we will be replacing SET_SRC with. */
7988 if (cse_check_loop_start_value
7989 && validate_change (insn, &SET_SRC (x),
7990 src_elt->exp, 0))
7991 emit_insn_after (gen_move_insn (src_elt->exp,
7992 SET_DEST (set)),
7993 p);
7994 break;
7995 }
7996 }
7997 }
7998
7999 /* Now invalidate anything modified by X. */
8000 note_mem_written (SET_DEST (x), &writes_memory);
8001
8002 if (writes_memory.var)
8003 invalidate_memory (&writes_memory);
8004
8005 /* See comment on similar code in cse_insn for explanation of these
8006 tests. */
8007 if (GET_CODE (SET_DEST (x)) == REG || GET_CODE (SET_DEST (x)) == SUBREG
8008 || (GET_CODE (SET_DEST (x)) == MEM && ! writes_memory.all
8009 && ! cse_rtx_addr_varies_p (SET_DEST (x))))
8010 invalidate (SET_DEST (x), VOIDmode);
8011 else if (GET_CODE (SET_DEST (x)) == STRICT_LOW_PART
8012 || GET_CODE (SET_DEST (x)) == ZERO_EXTRACT)
8013 invalidate (XEXP (SET_DEST (x), 0), GET_MODE (SET_DEST (x)));
8014 }
8015 \f
8016 /* Find the end of INSN's basic block and return its range,
8017 the total number of SETs in all the insns of the block, the last insn of the
8018 block, and the branch path.
8019
8020 The branch path indicates which branches should be followed. If a non-zero
8021 path size is specified, the block should be rescanned and a different set
8022 of branches will be taken. The branch path is only used if
8023 FLAG_CSE_FOLLOW_JUMPS or FLAG_CSE_SKIP_BLOCKS is non-zero.
8024
8025 DATA is a pointer to a struct cse_basic_block_data, defined below, that is
8026 used to describe the block. It is filled in with the information about
8027 the current block. The incoming structure's branch path, if any, is used
8028 to construct the output branch path. */
8029
8030 void
8031 cse_end_of_basic_block (insn, data, follow_jumps, after_loop, skip_blocks)
8032 rtx insn;
8033 struct cse_basic_block_data *data;
8034 int follow_jumps;
8035 int after_loop;
8036 int skip_blocks;
8037 {
8038 rtx p = insn, q;
8039 int nsets = 0;
8040 int low_cuid = INSN_CUID (insn), high_cuid = INSN_CUID (insn);
8041 rtx next = GET_RTX_CLASS (GET_CODE (insn)) == 'i' ? insn : next_real_insn (insn);
8042 int path_size = data->path_size;
8043 int path_entry = 0;
8044 int i;
8045
8046 /* Update the previous branch path, if any. If the last branch was
8047 previously TAKEN, mark it NOT_TAKEN. If it was previously NOT_TAKEN,
8048 shorten the path by one and look at the previous branch. We know that
8049 at least one branch must have been taken if PATH_SIZE is non-zero. */
8050 while (path_size > 0)
8051 {
8052 if (data->path[path_size - 1].status != NOT_TAKEN)
8053 {
8054 data->path[path_size - 1].status = NOT_TAKEN;
8055 break;
8056 }
8057 else
8058 path_size--;
8059 }
8060
8061 /* Scan to end of this basic block. */
8062 while (p && GET_CODE (p) != CODE_LABEL)
8063 {
8064 /* Don't cse out the end of a loop. This makes a difference
8065 only for the unusual loops that always execute at least once;
8066 all other loops have labels there so we will stop in any case.
8067 Cse'ing out the end of the loop is dangerous because it
8068 might cause an invariant expression inside the loop
8069 to be reused after the end of the loop. This would make it
8070 hard to move the expression out of the loop in loop.c,
8071 especially if it is one of several equivalent expressions
8072 and loop.c would like to eliminate it.
8073
8074 If we are running after loop.c has finished, we can ignore
8075 the NOTE_INSN_LOOP_END. */
8076
8077 if (! after_loop && GET_CODE (p) == NOTE
8078 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
8079 break;
8080
8081 /* Don't cse over a call to setjmp; on some machines (eg vax)
8082 the regs restored by the longjmp come from
8083 a later time than the setjmp. */
8084 if (GET_CODE (p) == NOTE
8085 && NOTE_LINE_NUMBER (p) == NOTE_INSN_SETJMP)
8086 break;
8087
8088 /* A PARALLEL can have lots of SETs in it,
8089 especially if it is really an ASM_OPERANDS. */
8090 if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
8091 && GET_CODE (PATTERN (p)) == PARALLEL)
8092 nsets += XVECLEN (PATTERN (p), 0);
8093 else if (GET_CODE (p) != NOTE)
8094 nsets += 1;
8095
8096 /* Ignore insns made by CSE; they cannot affect the boundaries of
8097 the basic block. */
8098
8099 if (INSN_UID (p) <= max_uid && INSN_CUID (p) > high_cuid)
8100 high_cuid = INSN_CUID (p);
8101 if (INSN_UID (p) <= max_uid && INSN_CUID (p) < low_cuid)
8102 low_cuid = INSN_CUID (p);
8103
8104 /* See if this insn is in our branch path. If it is and we are to
8105 take it, do so. */
8106 if (path_entry < path_size && data->path[path_entry].branch == p)
8107 {
8108 if (data->path[path_entry].status != NOT_TAKEN)
8109 p = JUMP_LABEL (p);
8110
8111 /* Point to next entry in path, if any. */
8112 path_entry++;
8113 }
8114
8115 /* If this is a conditional jump, we can follow it if -fcse-follow-jumps
8116 was specified, we haven't reached our maximum path length, there are
8117 insns following the target of the jump, this is the only use of the
8118 jump label, and the target label is preceded by a BARRIER.
8119
8120 Alternatively, we can follow the jump if it branches around a
8121 block of code and there are no other branches into the block.
8122 In this case invalidate_skipped_block will be called to invalidate any
8123 registers set in the block when following the jump. */
8124
8125 else if ((follow_jumps || skip_blocks) && path_size < PATHLENGTH - 1
8126 && GET_CODE (p) == JUMP_INSN
8127 && GET_CODE (PATTERN (p)) == SET
8128 && GET_CODE (SET_SRC (PATTERN (p))) == IF_THEN_ELSE
8129 && LABEL_NUSES (JUMP_LABEL (p)) == 1
8130 && NEXT_INSN (JUMP_LABEL (p)) != 0)
8131 {
8132 for (q = PREV_INSN (JUMP_LABEL (p)); q; q = PREV_INSN (q))
8133 if ((GET_CODE (q) != NOTE
8134 || NOTE_LINE_NUMBER (q) == NOTE_INSN_LOOP_END
8135 || NOTE_LINE_NUMBER (q) == NOTE_INSN_SETJMP)
8136 && (GET_CODE (q) != CODE_LABEL || LABEL_NUSES (q) != 0))
8137 break;
8138
8139 /* If we ran into a BARRIER, this code is an extension of the
8140 basic block when the branch is taken. */
8141 if (follow_jumps && q != 0 && GET_CODE (q) == BARRIER)
8142 {
8143 /* Don't allow ourself to keep walking around an
8144 always-executed loop. */
8145 if (next_real_insn (q) == next)
8146 {
8147 p = NEXT_INSN (p);
8148 continue;
8149 }
8150
8151 /* Similarly, don't put a branch in our path more than once. */
8152 for (i = 0; i < path_entry; i++)
8153 if (data->path[i].branch == p)
8154 break;
8155
8156 if (i != path_entry)
8157 break;
8158
8159 data->path[path_entry].branch = p;
8160 data->path[path_entry++].status = TAKEN;
8161
8162 /* This branch now ends our path. It was possible that we
8163 didn't see this branch the last time around (when the
8164 insn in front of the target was a JUMP_INSN that was
8165 turned into a no-op). */
8166 path_size = path_entry;
8167
8168 p = JUMP_LABEL (p);
8169 /* Mark block so we won't scan it again later. */
8170 PUT_MODE (NEXT_INSN (p), QImode);
8171 }
8172 /* Detect a branch around a block of code. */
8173 else if (skip_blocks && q != 0 && GET_CODE (q) != CODE_LABEL)
8174 {
8175 register rtx tmp;
8176
8177 if (next_real_insn (q) == next)
8178 {
8179 p = NEXT_INSN (p);
8180 continue;
8181 }
8182
8183 for (i = 0; i < path_entry; i++)
8184 if (data->path[i].branch == p)
8185 break;
8186
8187 if (i != path_entry)
8188 break;
8189
8190 /* This is no_labels_between_p (p, q) with an added check for
8191 reaching the end of a function (in case Q precedes P). */
8192 for (tmp = NEXT_INSN (p); tmp && tmp != q; tmp = NEXT_INSN (tmp))
8193 if (GET_CODE (tmp) == CODE_LABEL)
8194 break;
8195
8196 if (tmp == q)
8197 {
8198 data->path[path_entry].branch = p;
8199 data->path[path_entry++].status = AROUND;
8200
8201 path_size = path_entry;
8202
8203 p = JUMP_LABEL (p);
8204 /* Mark block so we won't scan it again later. */
8205 PUT_MODE (NEXT_INSN (p), QImode);
8206 }
8207 }
8208 }
8209 p = NEXT_INSN (p);
8210 }
8211
8212 data->low_cuid = low_cuid;
8213 data->high_cuid = high_cuid;
8214 data->nsets = nsets;
8215 data->last = p;
8216
8217 /* If all jumps in the path are not taken, set our path length to zero
8218 so a rescan won't be done. */
8219 for (i = path_size - 1; i >= 0; i--)
8220 if (data->path[i].status != NOT_TAKEN)
8221 break;
8222
8223 if (i == -1)
8224 data->path_size = 0;
8225 else
8226 data->path_size = path_size;
8227
8228 /* End the current branch path. */
8229 data->path[path_size].branch = 0;
8230 }
8231 \f
8232 /* Perform cse on the instructions of a function.
8233 F is the first instruction.
8234 NREGS is one plus the highest pseudo-reg number used in the instruction.
8235
8236 AFTER_LOOP is 1 if this is the cse call done after loop optimization
8237 (only if -frerun-cse-after-loop).
8238
8239 Returns 1 if jump_optimize should be redone due to simplifications
8240 in conditional jump instructions. */
8241
8242 int
8243 cse_main (f, nregs, after_loop, file)
8244 rtx f;
8245 int nregs;
8246 int after_loop;
8247 FILE *file;
8248 {
8249 struct cse_basic_block_data val;
8250 register rtx insn = f;
8251 register int i;
8252
8253 cse_jumps_altered = 0;
8254 recorded_label_ref = 0;
8255 constant_pool_entries_cost = 0;
8256 val.path_size = 0;
8257
8258 init_recog ();
8259
8260 max_reg = nregs;
8261
8262 all_minus_one = (int *) alloca (nregs * sizeof (int));
8263 consec_ints = (int *) alloca (nregs * sizeof (int));
8264
8265 for (i = 0; i < nregs; i++)
8266 {
8267 all_minus_one[i] = -1;
8268 consec_ints[i] = i;
8269 }
8270
8271 reg_next_eqv = (int *) alloca (nregs * sizeof (int));
8272 reg_prev_eqv = (int *) alloca (nregs * sizeof (int));
8273 reg_qty = (int *) alloca (nregs * sizeof (int));
8274 reg_in_table = (int *) alloca (nregs * sizeof (int));
8275 reg_tick = (int *) alloca (nregs * sizeof (int));
8276
8277 #ifdef LOAD_EXTEND_OP
8278
8279 /* Allocate scratch rtl here. cse_insn will fill in the memory reference
8280 and change the code and mode as appropriate. */
8281 memory_extend_rtx = gen_rtx (ZERO_EXTEND, VOIDmode, 0);
8282 #endif
8283
8284 /* Discard all the free elements of the previous function
8285 since they are allocated in the temporarily obstack. */
8286 bzero ((char *) table, sizeof table);
8287 free_element_chain = 0;
8288 n_elements_made = 0;
8289
8290 /* Find the largest uid. */
8291
8292 max_uid = get_max_uid ();
8293 uid_cuid = (int *) alloca ((max_uid + 1) * sizeof (int));
8294 bzero ((char *) uid_cuid, (max_uid + 1) * sizeof (int));
8295
8296 /* Compute the mapping from uids to cuids.
8297 CUIDs are numbers assigned to insns, like uids,
8298 except that cuids increase monotonically through the code.
8299 Don't assign cuids to line-number NOTEs, so that the distance in cuids
8300 between two insns is not affected by -g. */
8301
8302 for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
8303 {
8304 if (GET_CODE (insn) != NOTE
8305 || NOTE_LINE_NUMBER (insn) < 0)
8306 INSN_CUID (insn) = ++i;
8307 else
8308 /* Give a line number note the same cuid as preceding insn. */
8309 INSN_CUID (insn) = i;
8310 }
8311
8312 /* Initialize which registers are clobbered by calls. */
8313
8314 CLEAR_HARD_REG_SET (regs_invalidated_by_call);
8315
8316 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
8317 if ((call_used_regs[i]
8318 /* Used to check !fixed_regs[i] here, but that isn't safe;
8319 fixed regs are still call-clobbered, and sched can get
8320 confused if they can "live across calls".
8321
8322 The frame pointer is always preserved across calls. The arg
8323 pointer is if it is fixed. The stack pointer usually is, unless
8324 RETURN_POPS_ARGS, in which case an explicit CLOBBER
8325 will be present. If we are generating PIC code, the PIC offset
8326 table register is preserved across calls. */
8327
8328 && i != STACK_POINTER_REGNUM
8329 && i != FRAME_POINTER_REGNUM
8330 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
8331 && i != HARD_FRAME_POINTER_REGNUM
8332 #endif
8333 #if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
8334 && ! (i == ARG_POINTER_REGNUM && fixed_regs[i])
8335 #endif
8336 #if defined (PIC_OFFSET_TABLE_REGNUM) && !defined (PIC_OFFSET_TABLE_REG_CALL_CLOBBERED)
8337 && ! (i == PIC_OFFSET_TABLE_REGNUM && flag_pic)
8338 #endif
8339 )
8340 || global_regs[i])
8341 SET_HARD_REG_BIT (regs_invalidated_by_call, i);
8342
8343 /* Loop over basic blocks.
8344 Compute the maximum number of qty's needed for each basic block
8345 (which is 2 for each SET). */
8346 insn = f;
8347 while (insn)
8348 {
8349 cse_end_of_basic_block (insn, &val, flag_cse_follow_jumps, after_loop,
8350 flag_cse_skip_blocks);
8351
8352 /* If this basic block was already processed or has no sets, skip it. */
8353 if (val.nsets == 0 || GET_MODE (insn) == QImode)
8354 {
8355 PUT_MODE (insn, VOIDmode);
8356 insn = (val.last ? NEXT_INSN (val.last) : 0);
8357 val.path_size = 0;
8358 continue;
8359 }
8360
8361 cse_basic_block_start = val.low_cuid;
8362 cse_basic_block_end = val.high_cuid;
8363 max_qty = val.nsets * 2;
8364
8365 if (file)
8366 fprintf (file, ";; Processing block from %d to %d, %d sets.\n",
8367 INSN_UID (insn), val.last ? INSN_UID (val.last) : 0,
8368 val.nsets);
8369
8370 /* Make MAX_QTY bigger to give us room to optimize
8371 past the end of this basic block, if that should prove useful. */
8372 if (max_qty < 500)
8373 max_qty = 500;
8374
8375 max_qty += max_reg;
8376
8377 /* If this basic block is being extended by following certain jumps,
8378 (see `cse_end_of_basic_block'), we reprocess the code from the start.
8379 Otherwise, we start after this basic block. */
8380 if (val.path_size > 0)
8381 cse_basic_block (insn, val.last, val.path, 0);
8382 else
8383 {
8384 int old_cse_jumps_altered = cse_jumps_altered;
8385 rtx temp;
8386
8387 /* When cse changes a conditional jump to an unconditional
8388 jump, we want to reprocess the block, since it will give
8389 us a new branch path to investigate. */
8390 cse_jumps_altered = 0;
8391 temp = cse_basic_block (insn, val.last, val.path, ! after_loop);
8392 if (cse_jumps_altered == 0
8393 || (flag_cse_follow_jumps == 0 && flag_cse_skip_blocks == 0))
8394 insn = temp;
8395
8396 cse_jumps_altered |= old_cse_jumps_altered;
8397 }
8398
8399 #ifdef USE_C_ALLOCA
8400 alloca (0);
8401 #endif
8402 }
8403
8404 /* Tell refers_to_mem_p that qty_const info is not available. */
8405 qty_const = 0;
8406
8407 if (max_elements_made < n_elements_made)
8408 max_elements_made = n_elements_made;
8409
8410 return cse_jumps_altered || recorded_label_ref;
8411 }
8412
8413 /* Process a single basic block. FROM and TO and the limits of the basic
8414 block. NEXT_BRANCH points to the branch path when following jumps or
8415 a null path when not following jumps.
8416
8417 AROUND_LOOP is non-zero if we are to try to cse around to the start of a
8418 loop. This is true when we are being called for the last time on a
8419 block and this CSE pass is before loop.c. */
8420
8421 static rtx
8422 cse_basic_block (from, to, next_branch, around_loop)
8423 register rtx from, to;
8424 struct branch_path *next_branch;
8425 int around_loop;
8426 {
8427 register rtx insn;
8428 int to_usage = 0;
8429 int in_libcall_block = 0;
8430
8431 /* Each of these arrays is undefined before max_reg, so only allocate
8432 the space actually needed and adjust the start below. */
8433
8434 qty_first_reg = (int *) alloca ((max_qty - max_reg) * sizeof (int));
8435 qty_last_reg = (int *) alloca ((max_qty - max_reg) * sizeof (int));
8436 qty_mode= (enum machine_mode *) alloca ((max_qty - max_reg) * sizeof (enum machine_mode));
8437 qty_const = (rtx *) alloca ((max_qty - max_reg) * sizeof (rtx));
8438 qty_const_insn = (rtx *) alloca ((max_qty - max_reg) * sizeof (rtx));
8439 qty_comparison_code
8440 = (enum rtx_code *) alloca ((max_qty - max_reg) * sizeof (enum rtx_code));
8441 qty_comparison_qty = (int *) alloca ((max_qty - max_reg) * sizeof (int));
8442 qty_comparison_const = (rtx *) alloca ((max_qty - max_reg) * sizeof (rtx));
8443
8444 qty_first_reg -= max_reg;
8445 qty_last_reg -= max_reg;
8446 qty_mode -= max_reg;
8447 qty_const -= max_reg;
8448 qty_const_insn -= max_reg;
8449 qty_comparison_code -= max_reg;
8450 qty_comparison_qty -= max_reg;
8451 qty_comparison_const -= max_reg;
8452
8453 new_basic_block ();
8454
8455 /* TO might be a label. If so, protect it from being deleted. */
8456 if (to != 0 && GET_CODE (to) == CODE_LABEL)
8457 ++LABEL_NUSES (to);
8458
8459 for (insn = from; insn != to; insn = NEXT_INSN (insn))
8460 {
8461 register enum rtx_code code;
8462
8463 /* See if this is a branch that is part of the path. If so, and it is
8464 to be taken, do so. */
8465 if (next_branch->branch == insn)
8466 {
8467 enum taken status = next_branch++->status;
8468 if (status != NOT_TAKEN)
8469 {
8470 if (status == TAKEN)
8471 record_jump_equiv (insn, 1);
8472 else
8473 invalidate_skipped_block (NEXT_INSN (insn));
8474
8475 /* Set the last insn as the jump insn; it doesn't affect cc0.
8476 Then follow this branch. */
8477 #ifdef HAVE_cc0
8478 prev_insn_cc0 = 0;
8479 #endif
8480 prev_insn = insn;
8481 insn = JUMP_LABEL (insn);
8482 continue;
8483 }
8484 }
8485
8486 code = GET_CODE (insn);
8487 if (GET_MODE (insn) == QImode)
8488 PUT_MODE (insn, VOIDmode);
8489
8490 if (GET_RTX_CLASS (code) == 'i')
8491 {
8492 /* Process notes first so we have all notes in canonical forms when
8493 looking for duplicate operations. */
8494
8495 if (REG_NOTES (insn))
8496 REG_NOTES (insn) = cse_process_notes (REG_NOTES (insn), NULL_RTX);
8497
8498 /* Track when we are inside in LIBCALL block. Inside such a block,
8499 we do not want to record destinations. The last insn of a
8500 LIBCALL block is not considered to be part of the block, since
8501 its destination is the result of the block and hence should be
8502 recorded. */
8503
8504 if (find_reg_note (insn, REG_LIBCALL, NULL_RTX))
8505 in_libcall_block = 1;
8506 else if (find_reg_note (insn, REG_RETVAL, NULL_RTX))
8507 in_libcall_block = 0;
8508
8509 cse_insn (insn, in_libcall_block);
8510 }
8511
8512 /* If INSN is now an unconditional jump, skip to the end of our
8513 basic block by pretending that we just did the last insn in the
8514 basic block. If we are jumping to the end of our block, show
8515 that we can have one usage of TO. */
8516
8517 if (simplejump_p (insn))
8518 {
8519 if (to == 0)
8520 return 0;
8521
8522 if (JUMP_LABEL (insn) == to)
8523 to_usage = 1;
8524
8525 /* Maybe TO was deleted because the jump is unconditional.
8526 If so, there is nothing left in this basic block. */
8527 /* ??? Perhaps it would be smarter to set TO
8528 to whatever follows this insn,
8529 and pretend the basic block had always ended here. */
8530 if (INSN_DELETED_P (to))
8531 break;
8532
8533 insn = PREV_INSN (to);
8534 }
8535
8536 /* See if it is ok to keep on going past the label
8537 which used to end our basic block. Remember that we incremented
8538 the count of that label, so we decrement it here. If we made
8539 a jump unconditional, TO_USAGE will be one; in that case, we don't
8540 want to count the use in that jump. */
8541
8542 if (to != 0 && NEXT_INSN (insn) == to
8543 && GET_CODE (to) == CODE_LABEL && --LABEL_NUSES (to) == to_usage)
8544 {
8545 struct cse_basic_block_data val;
8546 rtx prev;
8547
8548 insn = NEXT_INSN (to);
8549
8550 if (LABEL_NUSES (to) == 0)
8551 insn = delete_insn (to);
8552
8553 /* If TO was the last insn in the function, we are done. */
8554 if (insn == 0)
8555 return 0;
8556
8557 /* If TO was preceded by a BARRIER we are done with this block
8558 because it has no continuation. */
8559 prev = prev_nonnote_insn (to);
8560 if (prev && GET_CODE (prev) == BARRIER)
8561 return insn;
8562
8563 /* Find the end of the following block. Note that we won't be
8564 following branches in this case. */
8565 to_usage = 0;
8566 val.path_size = 0;
8567 cse_end_of_basic_block (insn, &val, 0, 0, 0);
8568
8569 /* If the tables we allocated have enough space left
8570 to handle all the SETs in the next basic block,
8571 continue through it. Otherwise, return,
8572 and that block will be scanned individually. */
8573 if (val.nsets * 2 + next_qty > max_qty)
8574 break;
8575
8576 cse_basic_block_start = val.low_cuid;
8577 cse_basic_block_end = val.high_cuid;
8578 to = val.last;
8579
8580 /* Prevent TO from being deleted if it is a label. */
8581 if (to != 0 && GET_CODE (to) == CODE_LABEL)
8582 ++LABEL_NUSES (to);
8583
8584 /* Back up so we process the first insn in the extension. */
8585 insn = PREV_INSN (insn);
8586 }
8587 }
8588
8589 if (next_qty > max_qty)
8590 abort ();
8591
8592 /* If we are running before loop.c, we stopped on a NOTE_INSN_LOOP_END, and
8593 the previous insn is the only insn that branches to the head of a loop,
8594 we can cse into the loop. Don't do this if we changed the jump
8595 structure of a loop unless we aren't going to be following jumps. */
8596
8597 if ((cse_jumps_altered == 0
8598 || (flag_cse_follow_jumps == 0 && flag_cse_skip_blocks == 0))
8599 && around_loop && to != 0
8600 && GET_CODE (to) == NOTE && NOTE_LINE_NUMBER (to) == NOTE_INSN_LOOP_END
8601 && GET_CODE (PREV_INSN (to)) == JUMP_INSN
8602 && JUMP_LABEL (PREV_INSN (to)) != 0
8603 && LABEL_NUSES (JUMP_LABEL (PREV_INSN (to))) == 1)
8604 cse_around_loop (JUMP_LABEL (PREV_INSN (to)));
8605
8606 return to ? NEXT_INSN (to) : 0;
8607 }
8608 \f
8609 /* Count the number of times registers are used (not set) in X.
8610 COUNTS is an array in which we accumulate the count, INCR is how much
8611 we count each register usage.
8612
8613 Don't count a usage of DEST, which is the SET_DEST of a SET which
8614 contains X in its SET_SRC. This is because such a SET does not
8615 modify the liveness of DEST. */
8616
8617 static void
8618 count_reg_usage (x, counts, dest, incr)
8619 rtx x;
8620 int *counts;
8621 rtx dest;
8622 int incr;
8623 {
8624 enum rtx_code code;
8625 char *fmt;
8626 int i, j;
8627
8628 if (x == 0)
8629 return;
8630
8631 switch (code = GET_CODE (x))
8632 {
8633 case REG:
8634 if (x != dest)
8635 counts[REGNO (x)] += incr;
8636 return;
8637
8638 case PC:
8639 case CC0:
8640 case CONST:
8641 case CONST_INT:
8642 case CONST_DOUBLE:
8643 case SYMBOL_REF:
8644 case LABEL_REF:
8645 case CLOBBER:
8646 return;
8647
8648 case SET:
8649 /* Unless we are setting a REG, count everything in SET_DEST. */
8650 if (GET_CODE (SET_DEST (x)) != REG)
8651 count_reg_usage (SET_DEST (x), counts, NULL_RTX, incr);
8652
8653 /* If SRC has side-effects, then we can't delete this insn, so the
8654 usage of SET_DEST inside SRC counts.
8655
8656 ??? Strictly-speaking, we might be preserving this insn
8657 because some other SET has side-effects, but that's hard
8658 to do and can't happen now. */
8659 count_reg_usage (SET_SRC (x), counts,
8660 side_effects_p (SET_SRC (x)) ? NULL_RTX : SET_DEST (x),
8661 incr);
8662 return;
8663
8664 case CALL_INSN:
8665 count_reg_usage (CALL_INSN_FUNCTION_USAGE (x), counts, NULL_RTX, incr);
8666
8667 /* ... falls through ... */
8668 case INSN:
8669 case JUMP_INSN:
8670 count_reg_usage (PATTERN (x), counts, NULL_RTX, incr);
8671
8672 /* Things used in a REG_EQUAL note aren't dead since loop may try to
8673 use them. */
8674
8675 count_reg_usage (REG_NOTES (x), counts, NULL_RTX, incr);
8676 return;
8677
8678 case EXPR_LIST:
8679 case INSN_LIST:
8680 if (REG_NOTE_KIND (x) == REG_EQUAL
8681 || GET_CODE (XEXP (x,0)) == USE)
8682 count_reg_usage (XEXP (x, 0), counts, NULL_RTX, incr);
8683 count_reg_usage (XEXP (x, 1), counts, NULL_RTX, incr);
8684 return;
8685 }
8686
8687 fmt = GET_RTX_FORMAT (code);
8688 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8689 {
8690 if (fmt[i] == 'e')
8691 count_reg_usage (XEXP (x, i), counts, dest, incr);
8692 else if (fmt[i] == 'E')
8693 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8694 count_reg_usage (XVECEXP (x, i, j), counts, dest, incr);
8695 }
8696 }
8697 \f
8698 /* Scan all the insns and delete any that are dead; i.e., they store a register
8699 that is never used or they copy a register to itself.
8700
8701 This is used to remove insns made obviously dead by cse. It improves the
8702 heuristics in loop since it won't try to move dead invariants out of loops
8703 or make givs for dead quantities. The remaining passes of the compilation
8704 are also sped up. */
8705
8706 void
8707 delete_dead_from_cse (insns, nreg)
8708 rtx insns;
8709 int nreg;
8710 {
8711 int *counts = (int *) alloca (nreg * sizeof (int));
8712 rtx insn, prev;
8713 rtx tem;
8714 int i;
8715 int in_libcall = 0;
8716
8717 /* First count the number of times each register is used. */
8718 bzero ((char *) counts, sizeof (int) * nreg);
8719 for (insn = next_real_insn (insns); insn; insn = next_real_insn (insn))
8720 count_reg_usage (insn, counts, NULL_RTX, 1);
8721
8722 /* Go from the last insn to the first and delete insns that only set unused
8723 registers or copy a register to itself. As we delete an insn, remove
8724 usage counts for registers it uses. */
8725 for (insn = prev_real_insn (get_last_insn ()); insn; insn = prev)
8726 {
8727 int live_insn = 0;
8728
8729 prev = prev_real_insn (insn);
8730
8731 /* Don't delete any insns that are part of a libcall block.
8732 Flow or loop might get confused if we did that. Remember
8733 that we are scanning backwards. */
8734 if (find_reg_note (insn, REG_RETVAL, NULL_RTX))
8735 in_libcall = 1;
8736
8737 if (in_libcall)
8738 live_insn = 1;
8739 else if (GET_CODE (PATTERN (insn)) == SET)
8740 {
8741 if (GET_CODE (SET_DEST (PATTERN (insn))) == REG
8742 && SET_DEST (PATTERN (insn)) == SET_SRC (PATTERN (insn)))
8743 ;
8744
8745 #ifdef HAVE_cc0
8746 else if (GET_CODE (SET_DEST (PATTERN (insn))) == CC0
8747 && ! side_effects_p (SET_SRC (PATTERN (insn)))
8748 && ((tem = next_nonnote_insn (insn)) == 0
8749 || GET_RTX_CLASS (GET_CODE (tem)) != 'i'
8750 || ! reg_referenced_p (cc0_rtx, PATTERN (tem))))
8751 ;
8752 #endif
8753 else if (GET_CODE (SET_DEST (PATTERN (insn))) != REG
8754 || REGNO (SET_DEST (PATTERN (insn))) < FIRST_PSEUDO_REGISTER
8755 || counts[REGNO (SET_DEST (PATTERN (insn)))] != 0
8756 || side_effects_p (SET_SRC (PATTERN (insn))))
8757 live_insn = 1;
8758 }
8759 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
8760 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
8761 {
8762 rtx elt = XVECEXP (PATTERN (insn), 0, i);
8763
8764 if (GET_CODE (elt) == SET)
8765 {
8766 if (GET_CODE (SET_DEST (elt)) == REG
8767 && SET_DEST (elt) == SET_SRC (elt))
8768 ;
8769
8770 #ifdef HAVE_cc0
8771 else if (GET_CODE (SET_DEST (elt)) == CC0
8772 && ! side_effects_p (SET_SRC (elt))
8773 && ((tem = next_nonnote_insn (insn)) == 0
8774 || GET_RTX_CLASS (GET_CODE (tem)) != 'i'
8775 || ! reg_referenced_p (cc0_rtx, PATTERN (tem))))
8776 ;
8777 #endif
8778 else if (GET_CODE (SET_DEST (elt)) != REG
8779 || REGNO (SET_DEST (elt)) < FIRST_PSEUDO_REGISTER
8780 || counts[REGNO (SET_DEST (elt))] != 0
8781 || side_effects_p (SET_SRC (elt)))
8782 live_insn = 1;
8783 }
8784 else if (GET_CODE (elt) != CLOBBER && GET_CODE (elt) != USE)
8785 live_insn = 1;
8786 }
8787 else
8788 live_insn = 1;
8789
8790 /* If this is a dead insn, delete it and show registers in it aren't
8791 being used. */
8792
8793 if (! live_insn)
8794 {
8795 count_reg_usage (insn, counts, NULL_RTX, -1);
8796 delete_insn (insn);
8797 }
8798
8799 if (find_reg_note (insn, REG_LIBCALL, NULL_RTX))
8800 in_libcall = 0;
8801 }
8802 }