re PR c++/23167 (internal compiler error: in create_tmp_var)
[gcc.git] / gcc / loop.c
1 /* Perform various loop optimizations, including strength reduction.
2 Copyright (C) 1987, 1988, 1989, 1991, 1992, 1993, 1994, 1995,
3 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005
4 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
21 02110-1301, USA. */
22
23 /* This is the loop optimization pass of the compiler.
24 It finds invariant computations within loops and moves them
25 to the beginning of the loop. Then it identifies basic and
26 general induction variables.
27
28 Basic induction variables (BIVs) are a pseudo registers which are set within
29 a loop only by incrementing or decrementing its value. General induction
30 variables (GIVs) are pseudo registers with a value which is a linear function
31 of a basic induction variable. BIVs are recognized by `basic_induction_var';
32 GIVs by `general_induction_var'.
33
34 Once induction variables are identified, strength reduction is applied to the
35 general induction variables, and induction variable elimination is applied to
36 the basic induction variables.
37
38 It also finds cases where
39 a register is set within the loop by zero-extending a narrower value
40 and changes these to zero the entire register once before the loop
41 and merely copy the low part within the loop.
42
43 Most of the complexity is in heuristics to decide when it is worth
44 while to do these things. */
45
46 #include "config.h"
47 #include "system.h"
48 #include "coretypes.h"
49 #include "tm.h"
50 #include "rtl.h"
51 #include "tm_p.h"
52 #include "function.h"
53 #include "expr.h"
54 #include "hard-reg-set.h"
55 #include "basic-block.h"
56 #include "insn-config.h"
57 #include "regs.h"
58 #include "recog.h"
59 #include "flags.h"
60 #include "real.h"
61 #include "cselib.h"
62 #include "except.h"
63 #include "toplev.h"
64 #include "predict.h"
65 #include "insn-flags.h"
66 #include "optabs.h"
67 #include "cfgloop.h"
68 #include "ggc.h"
69 #include "timevar.h"
70 #include "tree-pass.h"
71
72 /* Get the loop info pointer of a loop. */
73 #define LOOP_INFO(LOOP) ((struct loop_info *) (LOOP)->aux)
74
75 /* Get a pointer to the loop movables structure. */
76 #define LOOP_MOVABLES(LOOP) (&LOOP_INFO (LOOP)->movables)
77
78 /* Get a pointer to the loop registers structure. */
79 #define LOOP_REGS(LOOP) (&LOOP_INFO (LOOP)->regs)
80
81 /* Get a pointer to the loop induction variables structure. */
82 #define LOOP_IVS(LOOP) (&LOOP_INFO (LOOP)->ivs)
83
84 /* Get the luid of an insn. Catch the error of trying to reference the LUID
85 of an insn added during loop, since these don't have LUIDs. */
86
87 #define INSN_LUID(INSN) \
88 (gcc_assert (INSN_UID (INSN) < max_uid_for_loop), uid_luid[INSN_UID (INSN)])
89
90 #define REGNO_FIRST_LUID(REGNO) \
91 (REGNO_FIRST_UID (REGNO) < max_uid_for_loop \
92 ? uid_luid[REGNO_FIRST_UID (REGNO)] \
93 : 0)
94 #define REGNO_LAST_LUID(REGNO) \
95 (REGNO_LAST_UID (REGNO) < max_uid_for_loop \
96 ? uid_luid[REGNO_LAST_UID (REGNO)] \
97 : INT_MAX)
98
99 /* A "basic induction variable" or biv is a pseudo reg that is set
100 (within this loop) only by incrementing or decrementing it. */
101 /* A "general induction variable" or giv is a pseudo reg whose
102 value is a linear function of a biv. */
103
104 /* Bivs are recognized by `basic_induction_var';
105 Givs by `general_induction_var'. */
106
107 /* An enum for the two different types of givs, those that are used
108 as memory addresses and those that are calculated into registers. */
109 enum g_types
110 {
111 DEST_ADDR,
112 DEST_REG
113 };
114
115
116 /* A `struct induction' is created for every instruction that sets
117 an induction variable (either a biv or a giv). */
118
119 struct induction
120 {
121 rtx insn; /* The insn that sets a biv or giv */
122 rtx new_reg; /* New register, containing strength reduced
123 version of this giv. */
124 rtx src_reg; /* Biv from which this giv is computed.
125 (If this is a biv, then this is the biv.) */
126 enum g_types giv_type; /* Indicate whether DEST_ADDR or DEST_REG */
127 rtx dest_reg; /* Destination register for insn: this is the
128 register which was the biv or giv.
129 For a biv, this equals src_reg.
130 For a DEST_ADDR type giv, this is 0. */
131 rtx *location; /* Place in the insn where this giv occurs.
132 If GIV_TYPE is DEST_REG, this is 0. */
133 /* For a biv, this is the place where add_val
134 was found. */
135 enum machine_mode mode; /* The mode of this biv or giv */
136 rtx mem; /* For DEST_ADDR, the memory object. */
137 rtx mult_val; /* Multiplicative factor for src_reg. */
138 rtx add_val; /* Additive constant for that product. */
139 int benefit; /* Gain from eliminating this insn. */
140 rtx final_value; /* If the giv is used outside the loop, and its
141 final value could be calculated, it is put
142 here, and the giv is made replaceable. Set
143 the giv to this value before the loop. */
144 unsigned combined_with; /* The number of givs this giv has been
145 combined with. If nonzero, this giv
146 cannot combine with any other giv. */
147 unsigned replaceable : 1; /* 1 if we can substitute the strength-reduced
148 variable for the original variable.
149 0 means they must be kept separate and the
150 new one must be copied into the old pseudo
151 reg each time the old one is set. */
152 unsigned not_replaceable : 1; /* Used to prevent duplicating work. This is
153 1 if we know that the giv definitely can
154 not be made replaceable, in which case we
155 don't bother checking the variable again
156 even if further info is available.
157 Both this and the above can be zero. */
158 unsigned ignore : 1; /* 1 prohibits further processing of giv */
159 unsigned always_computable : 1;/* 1 if this value is computable every
160 iteration. */
161 unsigned always_executed : 1; /* 1 if this set occurs each iteration. */
162 unsigned maybe_multiple : 1; /* Only used for a biv and 1 if this biv
163 update may be done multiple times per
164 iteration. */
165 unsigned cant_derive : 1; /* For giv's, 1 if this giv cannot derive
166 another giv. This occurs in many cases
167 where a giv's lifetime spans an update to
168 a biv. */
169 unsigned maybe_dead : 1; /* 1 if this giv might be dead. In that case,
170 we won't use it to eliminate a biv, it
171 would probably lose. */
172 unsigned auto_inc_opt : 1; /* 1 if this giv had its increment output next
173 to it to try to form an auto-inc address. */
174 unsigned shared : 1;
175 unsigned no_const_addval : 1; /* 1 if add_val does not contain a const. */
176 int lifetime; /* Length of life of this giv */
177 rtx derive_adjustment; /* If nonzero, is an adjustment to be
178 subtracted from add_val when this giv
179 derives another. This occurs when the
180 giv spans a biv update by incrementation. */
181 rtx ext_dependent; /* If nonzero, is a sign or zero extension
182 if a biv on which this giv is dependent. */
183 struct induction *next_iv; /* For givs, links together all givs that are
184 based on the same biv. For bivs, links
185 together all biv entries that refer to the
186 same biv register. */
187 struct induction *same; /* For givs, if the giv has been combined with
188 another giv, this points to the base giv.
189 The base giv will have COMBINED_WITH nonzero.
190 For bivs, if the biv has the same LOCATION
191 than another biv, this points to the base
192 biv. */
193 struct induction *same_insn; /* If there are multiple identical givs in
194 the same insn, then all but one have this
195 field set, and they all point to the giv
196 that doesn't have this field set. */
197 rtx last_use; /* For a giv made from a biv increment, this is
198 a substitute for the lifetime information. */
199 };
200
201
202 /* A `struct iv_class' is created for each biv. */
203
204 struct iv_class
205 {
206 unsigned int regno; /* Pseudo reg which is the biv. */
207 int biv_count; /* Number of insns setting this reg. */
208 struct induction *biv; /* List of all insns that set this reg. */
209 int giv_count; /* Number of DEST_REG givs computed from this
210 biv. The resulting count is only used in
211 check_dbra_loop. */
212 struct induction *giv; /* List of all insns that compute a giv
213 from this reg. */
214 int total_benefit; /* Sum of BENEFITs of all those givs. */
215 rtx initial_value; /* Value of reg at loop start. */
216 rtx initial_test; /* Test performed on BIV before loop. */
217 rtx final_value; /* Value of reg at loop end, if known. */
218 struct iv_class *next; /* Links all class structures together. */
219 rtx init_insn; /* insn which initializes biv, 0 if none. */
220 rtx init_set; /* SET of INIT_INSN, if any. */
221 unsigned incremented : 1; /* 1 if somewhere incremented/decremented */
222 unsigned eliminable : 1; /* 1 if plausible candidate for
223 elimination. */
224 unsigned nonneg : 1; /* 1 if we added a REG_NONNEG note for
225 this. */
226 unsigned reversed : 1; /* 1 if we reversed the loop that this
227 biv controls. */
228 unsigned all_reduced : 1; /* 1 if all givs using this biv have
229 been reduced. */
230 };
231
232
233 /* Definitions used by the basic induction variable discovery code. */
234 enum iv_mode
235 {
236 UNKNOWN_INDUCT,
237 BASIC_INDUCT,
238 NOT_BASIC_INDUCT,
239 GENERAL_INDUCT
240 };
241
242
243 /* A `struct iv' is created for every register. */
244
245 struct iv
246 {
247 enum iv_mode type;
248 union
249 {
250 struct iv_class *class;
251 struct induction *info;
252 } iv;
253 };
254
255
256 #define REG_IV_TYPE(ivs, n) ivs->regs[n].type
257 #define REG_IV_INFO(ivs, n) ivs->regs[n].iv.info
258 #define REG_IV_CLASS(ivs, n) ivs->regs[n].iv.class
259
260
261 struct loop_ivs
262 {
263 /* Indexed by register number, contains pointer to `struct
264 iv' if register is an induction variable. */
265 struct iv *regs;
266
267 /* Size of regs array. */
268 unsigned int n_regs;
269
270 /* The head of a list which links together (via the next field)
271 every iv class for the current loop. */
272 struct iv_class *list;
273 };
274
275
276 typedef struct loop_mem_info
277 {
278 rtx mem; /* The MEM itself. */
279 rtx reg; /* Corresponding pseudo, if any. */
280 int optimize; /* Nonzero if we can optimize access to this MEM. */
281 } loop_mem_info;
282
283
284
285 struct loop_reg
286 {
287 /* Number of times the reg is set during the loop being scanned.
288 During code motion, a negative value indicates a reg that has
289 been made a candidate; in particular -2 means that it is an
290 candidate that we know is equal to a constant and -1 means that
291 it is a candidate not known equal to a constant. After code
292 motion, regs moved have 0 (which is accurate now) while the
293 failed candidates have the original number of times set.
294
295 Therefore, at all times, == 0 indicates an invariant register;
296 < 0 a conditionally invariant one. */
297 int set_in_loop;
298
299 /* Original value of set_in_loop; same except that this value
300 is not set negative for a reg whose sets have been made candidates
301 and not set to 0 for a reg that is moved. */
302 int n_times_set;
303
304 /* Contains the insn in which a register was used if it was used
305 exactly once; contains const0_rtx if it was used more than once. */
306 rtx single_usage;
307
308 /* Nonzero indicates that the register cannot be moved or strength
309 reduced. */
310 char may_not_optimize;
311
312 /* Nonzero means reg N has already been moved out of one loop.
313 This reduces the desire to move it out of another. */
314 char moved_once;
315 };
316
317
318 struct loop_regs
319 {
320 int num; /* Number of regs used in table. */
321 int size; /* Size of table. */
322 struct loop_reg *array; /* Register usage info. array. */
323 int multiple_uses; /* Nonzero if a reg has multiple uses. */
324 };
325
326
327
328 struct loop_movables
329 {
330 /* Head of movable chain. */
331 struct movable *head;
332 /* Last movable in chain. */
333 struct movable *last;
334 };
335
336
337 /* Information pertaining to a loop. */
338
339 struct loop_info
340 {
341 /* Nonzero if there is a subroutine call in the current loop. */
342 int has_call;
343 /* Nonzero if there is a libcall in the current loop. */
344 int has_libcall;
345 /* Nonzero if there is a non constant call in the current loop. */
346 int has_nonconst_call;
347 /* Nonzero if there is a prefetch instruction in the current loop. */
348 int has_prefetch;
349 /* Nonzero if there is a volatile memory reference in the current
350 loop. */
351 int has_volatile;
352 /* Nonzero if there is a tablejump in the current loop. */
353 int has_tablejump;
354 /* Nonzero if there are ways to leave the loop other than falling
355 off the end. */
356 int has_multiple_exit_targets;
357 /* Nonzero if there is an indirect jump in the current function. */
358 int has_indirect_jump;
359 /* Register or constant initial loop value. */
360 rtx initial_value;
361 /* Register or constant value used for comparison test. */
362 rtx comparison_value;
363 /* Register or constant approximate final value. */
364 rtx final_value;
365 /* Register or constant initial loop value with term common to
366 final_value removed. */
367 rtx initial_equiv_value;
368 /* Register or constant final loop value with term common to
369 initial_value removed. */
370 rtx final_equiv_value;
371 /* Register corresponding to iteration variable. */
372 rtx iteration_var;
373 /* Constant loop increment. */
374 rtx increment;
375 enum rtx_code comparison_code;
376 /* Holds the number of loop iterations. It is zero if the number
377 could not be calculated. Must be unsigned since the number of
378 iterations can be as high as 2^wordsize - 1. For loops with a
379 wider iterator, this number will be zero if the number of loop
380 iterations is too large for an unsigned integer to hold. */
381 unsigned HOST_WIDE_INT n_iterations;
382 int used_count_register;
383 /* The loop iterator induction variable. */
384 struct iv_class *iv;
385 /* List of MEMs that are stored in this loop. */
386 rtx store_mems;
387 /* Array of MEMs that are used (read or written) in this loop, but
388 cannot be aliased by anything in this loop, except perhaps
389 themselves. In other words, if mems[i] is altered during
390 the loop, it is altered by an expression that is rtx_equal_p to
391 it. */
392 loop_mem_info *mems;
393 /* The index of the next available slot in MEMS. */
394 int mems_idx;
395 /* The number of elements allocated in MEMS. */
396 int mems_allocated;
397 /* Nonzero if we don't know what MEMs were changed in the current
398 loop. This happens if the loop contains a call (in which case
399 `has_call' will also be set) or if we store into more than
400 NUM_STORES MEMs. */
401 int unknown_address_altered;
402 /* The above doesn't count any readonly memory locations that are
403 stored. This does. */
404 int unknown_constant_address_altered;
405 /* Count of memory write instructions discovered in the loop. */
406 int num_mem_sets;
407 /* The insn where the first of these was found. */
408 rtx first_loop_store_insn;
409 /* The chain of movable insns in loop. */
410 struct loop_movables movables;
411 /* The registers used the in loop. */
412 struct loop_regs regs;
413 /* The induction variable information in loop. */
414 struct loop_ivs ivs;
415 /* Nonzero if call is in pre_header extended basic block. */
416 int pre_header_has_call;
417 };
418
419 /* Not really meaningful values, but at least something. */
420 #ifndef SIMULTANEOUS_PREFETCHES
421 #define SIMULTANEOUS_PREFETCHES 3
422 #endif
423 #ifndef PREFETCH_BLOCK
424 #define PREFETCH_BLOCK 32
425 #endif
426 #ifndef HAVE_prefetch
427 #define HAVE_prefetch 0
428 #define CODE_FOR_prefetch 0
429 #define gen_prefetch(a,b,c) (gcc_unreachable (), NULL_RTX)
430 #endif
431
432 /* Give up the prefetch optimizations once we exceed a given threshold.
433 It is unlikely that we would be able to optimize something in a loop
434 with so many detected prefetches. */
435 #define MAX_PREFETCHES 100
436 /* The number of prefetch blocks that are beneficial to fetch at once before
437 a loop with a known (and low) iteration count. */
438 #define PREFETCH_BLOCKS_BEFORE_LOOP_MAX 6
439 /* For very tiny loops it is not worthwhile to prefetch even before the loop,
440 since it is likely that the data are already in the cache. */
441 #define PREFETCH_BLOCKS_BEFORE_LOOP_MIN 2
442
443 /* Parameterize some prefetch heuristics so they can be turned on and off
444 easily for performance testing on new architectures. These can be
445 defined in target-dependent files. */
446
447 /* Prefetch is worthwhile only when loads/stores are dense. */
448 #ifndef PREFETCH_ONLY_DENSE_MEM
449 #define PREFETCH_ONLY_DENSE_MEM 1
450 #endif
451
452 /* Define what we mean by "dense" loads and stores; This value divided by 256
453 is the minimum percentage of memory references that worth prefetching. */
454 #ifndef PREFETCH_DENSE_MEM
455 #define PREFETCH_DENSE_MEM 220
456 #endif
457
458 /* Do not prefetch for a loop whose iteration count is known to be low. */
459 #ifndef PREFETCH_NO_LOW_LOOPCNT
460 #define PREFETCH_NO_LOW_LOOPCNT 1
461 #endif
462
463 /* Define what we mean by a "low" iteration count. */
464 #ifndef PREFETCH_LOW_LOOPCNT
465 #define PREFETCH_LOW_LOOPCNT 32
466 #endif
467
468 /* Do not prefetch for a loop that contains a function call; such a loop is
469 probably not an internal loop. */
470 #ifndef PREFETCH_NO_CALL
471 #define PREFETCH_NO_CALL 1
472 #endif
473
474 /* Do not prefetch accesses with an extreme stride. */
475 #ifndef PREFETCH_NO_EXTREME_STRIDE
476 #define PREFETCH_NO_EXTREME_STRIDE 1
477 #endif
478
479 /* Define what we mean by an "extreme" stride. */
480 #ifndef PREFETCH_EXTREME_STRIDE
481 #define PREFETCH_EXTREME_STRIDE 4096
482 #endif
483
484 /* Define a limit to how far apart indices can be and still be merged
485 into a single prefetch. */
486 #ifndef PREFETCH_EXTREME_DIFFERENCE
487 #define PREFETCH_EXTREME_DIFFERENCE 4096
488 #endif
489
490 /* Issue prefetch instructions before the loop to fetch data to be used
491 in the first few loop iterations. */
492 #ifndef PREFETCH_BEFORE_LOOP
493 #define PREFETCH_BEFORE_LOOP 1
494 #endif
495
496 /* Do not handle reversed order prefetches (negative stride). */
497 #ifndef PREFETCH_NO_REVERSE_ORDER
498 #define PREFETCH_NO_REVERSE_ORDER 1
499 #endif
500
501 /* Prefetch even if the GIV is in conditional code. */
502 #ifndef PREFETCH_CONDITIONAL
503 #define PREFETCH_CONDITIONAL 1
504 #endif
505
506 #define LOOP_REG_LIFETIME(LOOP, REGNO) \
507 ((REGNO_LAST_LUID (REGNO) - REGNO_FIRST_LUID (REGNO)))
508
509 #define LOOP_REG_GLOBAL_P(LOOP, REGNO) \
510 ((REGNO_LAST_LUID (REGNO) > INSN_LUID ((LOOP)->end) \
511 || REGNO_FIRST_LUID (REGNO) < INSN_LUID ((LOOP)->start)))
512
513 #define LOOP_REGNO_NREGS(REGNO, SET_DEST) \
514 ((REGNO) < FIRST_PSEUDO_REGISTER \
515 ? (int) hard_regno_nregs[(REGNO)][GET_MODE (SET_DEST)] : 1)
516
517
518 /* Vector mapping INSN_UIDs to luids.
519 The luids are like uids but increase monotonically always.
520 We use them to see whether a jump comes from outside a given loop. */
521
522 static int *uid_luid;
523
524 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
525 number the insn is contained in. */
526
527 static struct loop **uid_loop;
528
529 /* 1 + largest uid of any insn. */
530
531 static int max_uid_for_loop;
532
533 /* Number of loops detected in current function. Used as index to the
534 next few tables. */
535
536 static int max_loop_num;
537
538 /* Bound on pseudo register number before loop optimization.
539 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
540 static unsigned int max_reg_before_loop;
541
542 /* The value to pass to the next call of reg_scan_update. */
543 static int loop_max_reg;
544 \f
545 /* During the analysis of a loop, a chain of `struct movable's
546 is made to record all the movable insns found.
547 Then the entire chain can be scanned to decide which to move. */
548
549 struct movable
550 {
551 rtx insn; /* A movable insn */
552 rtx set_src; /* The expression this reg is set from. */
553 rtx set_dest; /* The destination of this SET. */
554 rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST
555 of any registers used within the LIBCALL. */
556 int consec; /* Number of consecutive following insns
557 that must be moved with this one. */
558 unsigned int regno; /* The register it sets */
559 short lifetime; /* lifetime of that register;
560 may be adjusted when matching movables
561 that load the same value are found. */
562 short savings; /* Number of insns we can move for this reg,
563 including other movables that force this
564 or match this one. */
565 ENUM_BITFIELD(machine_mode) savemode : 8; /* Nonzero means it is a mode for
566 a low part that we should avoid changing when
567 clearing the rest of the reg. */
568 unsigned int cond : 1; /* 1 if only conditionally movable */
569 unsigned int force : 1; /* 1 means MUST move this insn */
570 unsigned int global : 1; /* 1 means reg is live outside this loop */
571 /* If PARTIAL is 1, GLOBAL means something different:
572 that the reg is live outside the range from where it is set
573 to the following label. */
574 unsigned int done : 1; /* 1 inhibits further processing of this */
575
576 unsigned int partial : 1; /* 1 means this reg is used for zero-extending.
577 In particular, moving it does not make it
578 invariant. */
579 unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to
580 load SRC, rather than copying INSN. */
581 unsigned int move_insn_first:1;/* Same as above, if this is necessary for the
582 first insn of a consecutive sets group. */
583 unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */
584 unsigned int insert_temp : 1; /* 1 means we copy to a new pseudo and replace
585 the original insn with a copy from that
586 pseudo, rather than deleting it. */
587 struct movable *match; /* First entry for same value */
588 struct movable *forces; /* An insn that must be moved if this is */
589 struct movable *next;
590 };
591
592
593 static FILE *loop_dump_stream;
594
595 /* Forward declarations. */
596
597 static void invalidate_loops_containing_label (rtx);
598 static void find_and_verify_loops (rtx, struct loops *);
599 static void mark_loop_jump (rtx, struct loop *);
600 static void prescan_loop (struct loop *);
601 static int reg_in_basic_block_p (rtx, rtx);
602 static int consec_sets_invariant_p (const struct loop *, rtx, int, rtx);
603 static int labels_in_range_p (rtx, int);
604 static void count_one_set (struct loop_regs *, rtx, rtx, rtx *);
605 static void note_addr_stored (rtx, rtx, void *);
606 static void note_set_pseudo_multiple_uses (rtx, rtx, void *);
607 static int loop_reg_used_before_p (const struct loop *, rtx, rtx);
608 static rtx find_regs_nested (rtx, rtx);
609 static void scan_loop (struct loop*, int);
610 #if 0
611 static void replace_call_address (rtx, rtx, rtx);
612 #endif
613 static rtx skip_consec_insns (rtx, int);
614 static int libcall_benefit (rtx);
615 static rtx libcall_other_reg (rtx, rtx);
616 static void record_excess_regs (rtx, rtx, rtx *);
617 static void ignore_some_movables (struct loop_movables *);
618 static void force_movables (struct loop_movables *);
619 static void combine_movables (struct loop_movables *, struct loop_regs *);
620 static int num_unmoved_movables (const struct loop *);
621 static int regs_match_p (rtx, rtx, struct loop_movables *);
622 static int rtx_equal_for_loop_p (rtx, rtx, struct loop_movables *,
623 struct loop_regs *);
624 static void add_label_notes (rtx, rtx);
625 static void move_movables (struct loop *loop, struct loop_movables *, int,
626 int);
627 static void loop_movables_add (struct loop_movables *, struct movable *);
628 static void loop_movables_free (struct loop_movables *);
629 static int count_nonfixed_reads (const struct loop *, rtx);
630 static void loop_bivs_find (struct loop *);
631 static void loop_bivs_init_find (struct loop *);
632 static void loop_bivs_check (struct loop *);
633 static void loop_givs_find (struct loop *);
634 static void loop_givs_check (struct loop *);
635 static int loop_biv_eliminable_p (struct loop *, struct iv_class *, int, int);
636 static int loop_giv_reduce_benefit (struct loop *, struct iv_class *,
637 struct induction *, rtx);
638 static void loop_givs_dead_check (struct loop *, struct iv_class *);
639 static void loop_givs_reduce (struct loop *, struct iv_class *);
640 static void loop_givs_rescan (struct loop *, struct iv_class *, rtx *);
641 static void loop_ivs_free (struct loop *);
642 static void strength_reduce (struct loop *, int);
643 static void find_single_use_in_loop (struct loop_regs *, rtx, rtx);
644 static int valid_initial_value_p (rtx, rtx, int, rtx);
645 static void find_mem_givs (const struct loop *, rtx, rtx, int, int);
646 static void record_biv (struct loop *, struct induction *, rtx, rtx, rtx,
647 rtx, rtx *, int, int);
648 static void check_final_value (const struct loop *, struct induction *);
649 static void loop_ivs_dump (const struct loop *, FILE *, int);
650 static void loop_iv_class_dump (const struct iv_class *, FILE *, int);
651 static void loop_biv_dump (const struct induction *, FILE *, int);
652 static void loop_giv_dump (const struct induction *, FILE *, int);
653 static void record_giv (const struct loop *, struct induction *, rtx, rtx,
654 rtx, rtx, rtx, rtx, int, enum g_types, int, int,
655 rtx *);
656 static void update_giv_derive (const struct loop *, rtx);
657 static HOST_WIDE_INT get_monotonic_increment (struct iv_class *);
658 static bool biased_biv_fits_mode_p (const struct loop *, struct iv_class *,
659 HOST_WIDE_INT, enum machine_mode,
660 unsigned HOST_WIDE_INT);
661 static bool biv_fits_mode_p (const struct loop *, struct iv_class *,
662 HOST_WIDE_INT, enum machine_mode, bool);
663 static bool extension_within_bounds_p (const struct loop *, struct iv_class *,
664 HOST_WIDE_INT, rtx);
665 static void check_ext_dependent_givs (const struct loop *, struct iv_class *);
666 static int basic_induction_var (const struct loop *, rtx, enum machine_mode,
667 rtx, rtx, rtx *, rtx *, rtx **);
668 static rtx simplify_giv_expr (const struct loop *, rtx, rtx *, int *);
669 static int general_induction_var (const struct loop *loop, rtx, rtx *, rtx *,
670 rtx *, rtx *, int, int *, enum machine_mode);
671 static int consec_sets_giv (const struct loop *, int, rtx, rtx, rtx, rtx *,
672 rtx *, rtx *, rtx *);
673 static int check_dbra_loop (struct loop *, int);
674 static rtx express_from_1 (rtx, rtx, rtx);
675 static rtx combine_givs_p (struct induction *, struct induction *);
676 static int cmp_combine_givs_stats (const void *, const void *);
677 static void combine_givs (struct loop_regs *, struct iv_class *);
678 static int product_cheap_p (rtx, rtx);
679 static int maybe_eliminate_biv (const struct loop *, struct iv_class *, int,
680 int, int);
681 static int maybe_eliminate_biv_1 (const struct loop *, rtx, rtx,
682 struct iv_class *, int, basic_block, rtx);
683 static int last_use_this_basic_block (rtx, rtx);
684 static void record_initial (rtx, rtx, void *);
685 static void update_reg_last_use (rtx, rtx);
686 static rtx next_insn_in_loop (const struct loop *, rtx);
687 static void loop_regs_scan (const struct loop *, int);
688 static int count_insns_in_loop (const struct loop *);
689 static int find_mem_in_note_1 (rtx *, void *);
690 static rtx find_mem_in_note (rtx);
691 static void load_mems (const struct loop *);
692 static int insert_loop_mem (rtx *, void *);
693 static int replace_loop_mem (rtx *, void *);
694 static void replace_loop_mems (rtx, rtx, rtx, int);
695 static int replace_loop_reg (rtx *, void *);
696 static void replace_loop_regs (rtx insn, rtx, rtx);
697 static void note_reg_stored (rtx, rtx, void *);
698 static void try_copy_prop (const struct loop *, rtx, unsigned int);
699 static void try_swap_copy_prop (const struct loop *, rtx, unsigned int);
700 static rtx check_insn_for_givs (struct loop *, rtx, int, int);
701 static rtx check_insn_for_bivs (struct loop *, rtx, int, int);
702 static rtx gen_add_mult (rtx, rtx, rtx, rtx);
703 static void loop_regs_update (const struct loop *, rtx);
704 static int iv_add_mult_cost (rtx, rtx, rtx, rtx);
705 static int loop_invariant_p (const struct loop *, rtx);
706 static rtx loop_insn_hoist (const struct loop *, rtx);
707 static void loop_iv_add_mult_emit_before (const struct loop *, rtx, rtx, rtx,
708 rtx, basic_block, rtx);
709 static rtx loop_insn_emit_before (const struct loop *, basic_block,
710 rtx, rtx);
711 static int loop_insn_first_p (rtx, rtx);
712 static rtx get_condition_for_loop (const struct loop *, rtx);
713 static void loop_iv_add_mult_sink (const struct loop *, rtx, rtx, rtx, rtx);
714 static void loop_iv_add_mult_hoist (const struct loop *, rtx, rtx, rtx, rtx);
715 static rtx extend_value_for_giv (struct induction *, rtx);
716 static rtx loop_insn_sink (const struct loop *, rtx);
717
718 static rtx loop_insn_emit_after (const struct loop *, basic_block, rtx, rtx);
719 static rtx loop_call_insn_emit_before (const struct loop *, basic_block,
720 rtx, rtx);
721 static rtx loop_call_insn_hoist (const struct loop *, rtx);
722 static rtx loop_insn_sink_or_swim (const struct loop *, rtx);
723
724 static void loop_dump_aux (const struct loop *, FILE *, int);
725 static void loop_delete_insns (rtx, rtx);
726 static HOST_WIDE_INT remove_constant_addition (rtx *);
727 static rtx gen_load_of_final_value (rtx, rtx);
728 void debug_ivs (const struct loop *);
729 void debug_iv_class (const struct iv_class *);
730 void debug_biv (const struct induction *);
731 void debug_giv (const struct induction *);
732 void debug_loop (const struct loop *);
733 void debug_loops (const struct loops *);
734
735 typedef struct loop_replace_args
736 {
737 rtx match;
738 rtx replacement;
739 rtx insn;
740 } loop_replace_args;
741
742 /* Nonzero iff INSN is between START and END, inclusive. */
743 #define INSN_IN_RANGE_P(INSN, START, END) \
744 (INSN_UID (INSN) < max_uid_for_loop \
745 && INSN_LUID (INSN) >= INSN_LUID (START) \
746 && INSN_LUID (INSN) <= INSN_LUID (END))
747
748 /* Indirect_jump_in_function is computed once per function. */
749 static int indirect_jump_in_function;
750 static int indirect_jump_in_function_p (rtx);
751
752 static int compute_luids (rtx, rtx, int);
753
754 static int biv_elimination_giv_has_0_offset (struct induction *,
755 struct induction *, rtx);
756 \f
757 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
758 copy the value of the strength reduced giv to its original register. */
759 static int copy_cost;
760
761 /* Cost of using a register, to normalize the benefits of a giv. */
762 static int reg_address_cost;
763
764 void
765 init_loop (void)
766 {
767 rtx reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
768
769 reg_address_cost = address_cost (reg, SImode);
770
771 copy_cost = COSTS_N_INSNS (1);
772 }
773 \f
774 /* Compute the mapping from uids to luids.
775 LUIDs are numbers assigned to insns, like uids,
776 except that luids increase monotonically through the code.
777 Start at insn START and stop just before END. Assign LUIDs
778 starting with PREV_LUID + 1. Return the last assigned LUID + 1. */
779 static int
780 compute_luids (rtx start, rtx end, int prev_luid)
781 {
782 int i;
783 rtx insn;
784
785 for (insn = start, i = prev_luid; insn != end; insn = NEXT_INSN (insn))
786 {
787 if (INSN_UID (insn) >= max_uid_for_loop)
788 continue;
789 /* Don't assign luids to line-number NOTEs, so that the distance in
790 luids between two insns is not affected by -g. */
791 if (!NOTE_P (insn)
792 || NOTE_LINE_NUMBER (insn) <= 0)
793 uid_luid[INSN_UID (insn)] = ++i;
794 else
795 /* Give a line number note the same luid as preceding insn. */
796 uid_luid[INSN_UID (insn)] = i;
797 }
798 return i + 1;
799 }
800 \f
801 /* Entry point of this file. Perform loop optimization
802 on the current function. F is the first insn of the function
803 and DUMPFILE is a stream for output of a trace of actions taken
804 (or 0 if none should be output). */
805
806 void
807 loop_optimize (rtx f, FILE *dumpfile, int flags)
808 {
809 rtx insn;
810 int i;
811 struct loops loops_data;
812 struct loops *loops = &loops_data;
813 struct loop_info *loops_info;
814
815 loop_dump_stream = dumpfile;
816
817 init_recog_no_volatile ();
818
819 max_reg_before_loop = max_reg_num ();
820 loop_max_reg = max_reg_before_loop;
821
822 regs_may_share = 0;
823
824 /* Count the number of loops. */
825
826 max_loop_num = 0;
827 for (insn = f; insn; insn = NEXT_INSN (insn))
828 {
829 if (NOTE_P (insn)
830 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
831 max_loop_num++;
832 }
833
834 /* Don't waste time if no loops. */
835 if (max_loop_num == 0)
836 return;
837
838 loops->num = max_loop_num;
839
840 /* Get size to use for tables indexed by uids.
841 Leave some space for labels allocated by find_and_verify_loops. */
842 max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
843
844 uid_luid = xcalloc (max_uid_for_loop, sizeof (int));
845 uid_loop = xcalloc (max_uid_for_loop, sizeof (struct loop *));
846
847 /* Allocate storage for array of loops. */
848 loops->array = xcalloc (loops->num, sizeof (struct loop));
849
850 /* Find and process each loop.
851 First, find them, and record them in order of their beginnings. */
852 find_and_verify_loops (f, loops);
853
854 /* Allocate and initialize auxiliary loop information. */
855 loops_info = xcalloc (loops->num, sizeof (struct loop_info));
856 for (i = 0; i < (int) loops->num; i++)
857 loops->array[i].aux = loops_info + i;
858
859 /* Now find all register lifetimes. This must be done after
860 find_and_verify_loops, because it might reorder the insns in the
861 function. */
862 reg_scan (f, max_reg_before_loop);
863
864 /* This must occur after reg_scan so that registers created by gcse
865 will have entries in the register tables.
866
867 We could have added a call to reg_scan after gcse_main in toplev.c,
868 but moving this call to init_alias_analysis is more efficient. */
869 init_alias_analysis ();
870
871 /* See if we went too far. Note that get_max_uid already returns
872 one more that the maximum uid of all insn. */
873 gcc_assert (get_max_uid () <= max_uid_for_loop);
874 /* Now reset it to the actual size we need. See above. */
875 max_uid_for_loop = get_max_uid ();
876
877 /* find_and_verify_loops has already called compute_luids, but it
878 might have rearranged code afterwards, so we need to recompute
879 the luids now. */
880 compute_luids (f, NULL_RTX, 0);
881
882 /* Don't leave gaps in uid_luid for insns that have been
883 deleted. It is possible that the first or last insn
884 using some register has been deleted by cross-jumping.
885 Make sure that uid_luid for that former insn's uid
886 points to the general area where that insn used to be. */
887 for (i = 0; i < max_uid_for_loop; i++)
888 {
889 uid_luid[0] = uid_luid[i];
890 if (uid_luid[0] != 0)
891 break;
892 }
893 for (i = 0; i < max_uid_for_loop; i++)
894 if (uid_luid[i] == 0)
895 uid_luid[i] = uid_luid[i - 1];
896
897 /* Determine if the function has indirect jump. On some systems
898 this prevents low overhead loop instructions from being used. */
899 indirect_jump_in_function = indirect_jump_in_function_p (f);
900
901 /* Now scan the loops, last ones first, since this means inner ones are done
902 before outer ones. */
903 for (i = max_loop_num - 1; i >= 0; i--)
904 {
905 struct loop *loop = &loops->array[i];
906
907 if (! loop->invalid && loop->end)
908 {
909 scan_loop (loop, flags);
910 ggc_collect ();
911 }
912 }
913
914 end_alias_analysis ();
915
916 /* Clean up. */
917 for (i = 0; i < (int) loops->num; i++)
918 free (loops_info[i].mems);
919
920 free (uid_luid);
921 free (uid_loop);
922 free (loops_info);
923 free (loops->array);
924 }
925 \f
926 /* Returns the next insn, in execution order, after INSN. START and
927 END are the NOTE_INSN_LOOP_BEG and NOTE_INSN_LOOP_END for the loop,
928 respectively. LOOP->TOP, if non-NULL, is the top of the loop in the
929 insn-stream; it is used with loops that are entered near the
930 bottom. */
931
932 static rtx
933 next_insn_in_loop (const struct loop *loop, rtx insn)
934 {
935 insn = NEXT_INSN (insn);
936
937 if (insn == loop->end)
938 {
939 if (loop->top)
940 /* Go to the top of the loop, and continue there. */
941 insn = loop->top;
942 else
943 /* We're done. */
944 insn = NULL_RTX;
945 }
946
947 if (insn == loop->scan_start)
948 /* We're done. */
949 insn = NULL_RTX;
950
951 return insn;
952 }
953
954 /* Find any register references hidden inside X and add them to
955 the dependency list DEPS. This is used to look inside CLOBBER (MEM
956 when checking whether a PARALLEL can be pulled out of a loop. */
957
958 static rtx
959 find_regs_nested (rtx deps, rtx x)
960 {
961 enum rtx_code code = GET_CODE (x);
962 if (code == REG)
963 deps = gen_rtx_EXPR_LIST (VOIDmode, x, deps);
964 else
965 {
966 const char *fmt = GET_RTX_FORMAT (code);
967 int i, j;
968 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
969 {
970 if (fmt[i] == 'e')
971 deps = find_regs_nested (deps, XEXP (x, i));
972 else if (fmt[i] == 'E')
973 for (j = 0; j < XVECLEN (x, i); j++)
974 deps = find_regs_nested (deps, XVECEXP (x, i, j));
975 }
976 }
977 return deps;
978 }
979
980 /* Optimize one loop described by LOOP. */
981
982 /* ??? Could also move memory writes out of loops if the destination address
983 is invariant, the source is invariant, the memory write is not volatile,
984 and if we can prove that no read inside the loop can read this address
985 before the write occurs. If there is a read of this address after the
986 write, then we can also mark the memory read as invariant. */
987
988 static void
989 scan_loop (struct loop *loop, int flags)
990 {
991 struct loop_info *loop_info = LOOP_INFO (loop);
992 struct loop_regs *regs = LOOP_REGS (loop);
993 int i;
994 rtx loop_start = loop->start;
995 rtx loop_end = loop->end;
996 rtx p;
997 /* 1 if we are scanning insns that could be executed zero times. */
998 int maybe_never = 0;
999 /* 1 if we are scanning insns that might never be executed
1000 due to a subroutine call which might exit before they are reached. */
1001 int call_passed = 0;
1002 /* Number of insns in the loop. */
1003 int insn_count;
1004 int tem;
1005 rtx temp, update_start, update_end;
1006 /* The SET from an insn, if it is the only SET in the insn. */
1007 rtx set, set1;
1008 /* Chain describing insns movable in current loop. */
1009 struct loop_movables *movables = LOOP_MOVABLES (loop);
1010 /* Ratio of extra register life span we can justify
1011 for saving an instruction. More if loop doesn't call subroutines
1012 since in that case saving an insn makes more difference
1013 and more registers are available. */
1014 int threshold;
1015 int in_libcall;
1016
1017 loop->top = 0;
1018
1019 movables->head = 0;
1020 movables->last = 0;
1021
1022 /* Determine whether this loop starts with a jump down to a test at
1023 the end. This will occur for a small number of loops with a test
1024 that is too complex to duplicate in front of the loop.
1025
1026 We search for the first insn or label in the loop, skipping NOTEs.
1027 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
1028 (because we might have a loop executed only once that contains a
1029 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
1030 (in case we have a degenerate loop).
1031
1032 Note that if we mistakenly think that a loop is entered at the top
1033 when, in fact, it is entered at the exit test, the only effect will be
1034 slightly poorer optimization. Making the opposite error can generate
1035 incorrect code. Since very few loops now start with a jump to the
1036 exit test, the code here to detect that case is very conservative. */
1037
1038 for (p = NEXT_INSN (loop_start);
1039 p != loop_end
1040 && !LABEL_P (p) && ! INSN_P (p)
1041 && (!NOTE_P (p)
1042 || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
1043 && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END));
1044 p = NEXT_INSN (p))
1045 ;
1046
1047 loop->scan_start = p;
1048
1049 /* If loop end is the end of the current function, then emit a
1050 NOTE_INSN_DELETED after loop_end and set loop->sink to the dummy
1051 note insn. This is the position we use when sinking insns out of
1052 the loop. */
1053 if (NEXT_INSN (loop->end) != 0)
1054 loop->sink = NEXT_INSN (loop->end);
1055 else
1056 loop->sink = emit_note_after (NOTE_INSN_DELETED, loop->end);
1057
1058 /* Set up variables describing this loop. */
1059 prescan_loop (loop);
1060 threshold = (loop_info->has_call ? 1 : 2) * (1 + n_non_fixed_regs);
1061
1062 /* If loop has a jump before the first label,
1063 the true entry is the target of that jump.
1064 Start scan from there.
1065 But record in LOOP->TOP the place where the end-test jumps
1066 back to so we can scan that after the end of the loop. */
1067 if (JUMP_P (p)
1068 /* Loop entry must be unconditional jump (and not a RETURN) */
1069 && any_uncondjump_p (p)
1070 && JUMP_LABEL (p) != 0
1071 /* Check to see whether the jump actually
1072 jumps out of the loop (meaning it's no loop).
1073 This case can happen for things like
1074 do {..} while (0). If this label was generated previously
1075 by loop, we can't tell anything about it and have to reject
1076 the loop. */
1077 && INSN_IN_RANGE_P (JUMP_LABEL (p), loop_start, loop_end))
1078 {
1079 loop->top = next_label (loop->scan_start);
1080 loop->scan_start = JUMP_LABEL (p);
1081 }
1082
1083 /* If LOOP->SCAN_START was an insn created by loop, we don't know its luid
1084 as required by loop_reg_used_before_p. So skip such loops. (This
1085 test may never be true, but it's best to play it safe.)
1086
1087 Also, skip loops where we do not start scanning at a label. This
1088 test also rejects loops starting with a JUMP_INSN that failed the
1089 test above. */
1090
1091 if (INSN_UID (loop->scan_start) >= max_uid_for_loop
1092 || !LABEL_P (loop->scan_start))
1093 {
1094 if (loop_dump_stream)
1095 fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n",
1096 INSN_UID (loop_start), INSN_UID (loop_end));
1097 return;
1098 }
1099
1100 /* Allocate extra space for REGs that might be created by load_mems.
1101 We allocate a little extra slop as well, in the hopes that we
1102 won't have to reallocate the regs array. */
1103 loop_regs_scan (loop, loop_info->mems_idx + 16);
1104 insn_count = count_insns_in_loop (loop);
1105
1106 if (loop_dump_stream)
1107 fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n",
1108 INSN_UID (loop_start), INSN_UID (loop_end), insn_count);
1109
1110 /* Scan through the loop finding insns that are safe to move.
1111 Set REGS->ARRAY[I].SET_IN_LOOP negative for the reg I being set, so that
1112 this reg will be considered invariant for subsequent insns.
1113 We consider whether subsequent insns use the reg
1114 in deciding whether it is worth actually moving.
1115
1116 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
1117 and therefore it is possible that the insns we are scanning
1118 would never be executed. At such times, we must make sure
1119 that it is safe to execute the insn once instead of zero times.
1120 When MAYBE_NEVER is 0, all insns will be executed at least once
1121 so that is not a problem. */
1122
1123 for (in_libcall = 0, p = next_insn_in_loop (loop, loop->scan_start);
1124 p != NULL_RTX;
1125 p = next_insn_in_loop (loop, p))
1126 {
1127 if (in_libcall && INSN_P (p) && find_reg_note (p, REG_RETVAL, NULL_RTX))
1128 in_libcall--;
1129 if (NONJUMP_INSN_P (p))
1130 {
1131 /* Do not scan past an optimization barrier. */
1132 if (GET_CODE (PATTERN (p)) == ASM_INPUT)
1133 break;
1134 temp = find_reg_note (p, REG_LIBCALL, NULL_RTX);
1135 if (temp)
1136 in_libcall++;
1137 if (! in_libcall
1138 && (set = single_set (p))
1139 && REG_P (SET_DEST (set))
1140 && SET_DEST (set) != frame_pointer_rtx
1141 #ifdef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
1142 && SET_DEST (set) != pic_offset_table_rtx
1143 #endif
1144 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
1145 {
1146 int tem1 = 0;
1147 int tem2 = 0;
1148 int move_insn = 0;
1149 int insert_temp = 0;
1150 rtx src = SET_SRC (set);
1151 rtx dependencies = 0;
1152
1153 /* Figure out what to use as a source of this insn. If a
1154 REG_EQUIV note is given or if a REG_EQUAL note with a
1155 constant operand is specified, use it as the source and
1156 mark that we should move this insn by calling
1157 emit_move_insn rather that duplicating the insn.
1158
1159 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL
1160 note is present. */
1161 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
1162 if (temp)
1163 src = XEXP (temp, 0), move_insn = 1;
1164 else
1165 {
1166 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
1167 if (temp && CONSTANT_P (XEXP (temp, 0)))
1168 src = XEXP (temp, 0), move_insn = 1;
1169 if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
1170 {
1171 src = XEXP (temp, 0);
1172 /* A libcall block can use regs that don't appear in
1173 the equivalent expression. To move the libcall,
1174 we must move those regs too. */
1175 dependencies = libcall_other_reg (p, src);
1176 }
1177 }
1178
1179 /* For parallels, add any possible uses to the dependencies, as
1180 we can't move the insn without resolving them first.
1181 MEMs inside CLOBBERs may also reference registers; these
1182 count as implicit uses. */
1183 if (GET_CODE (PATTERN (p)) == PARALLEL)
1184 {
1185 for (i = 0; i < XVECLEN (PATTERN (p), 0); i++)
1186 {
1187 rtx x = XVECEXP (PATTERN (p), 0, i);
1188 if (GET_CODE (x) == USE)
1189 dependencies
1190 = gen_rtx_EXPR_LIST (VOIDmode, XEXP (x, 0),
1191 dependencies);
1192 else if (GET_CODE (x) == CLOBBER
1193 && MEM_P (XEXP (x, 0)))
1194 dependencies = find_regs_nested (dependencies,
1195 XEXP (XEXP (x, 0), 0));
1196 }
1197 }
1198
1199 if (/* The register is used in basic blocks other
1200 than the one where it is set (meaning that
1201 something after this point in the loop might
1202 depend on its value before the set). */
1203 ! reg_in_basic_block_p (p, SET_DEST (set))
1204 /* And the set is not guaranteed to be executed once
1205 the loop starts, or the value before the set is
1206 needed before the set occurs...
1207
1208 ??? Note we have quadratic behavior here, mitigated
1209 by the fact that the previous test will often fail for
1210 large loops. Rather than re-scanning the entire loop
1211 each time for register usage, we should build tables
1212 of the register usage and use them here instead. */
1213 && (maybe_never
1214 || loop_reg_used_before_p (loop, set, p)))
1215 /* It is unsafe to move the set. However, it may be OK to
1216 move the source into a new pseudo, and substitute a
1217 reg-to-reg copy for the original insn.
1218
1219 This code used to consider it OK to move a set of a variable
1220 which was not created by the user and not used in an exit
1221 test.
1222 That behavior is incorrect and was removed. */
1223 insert_temp = 1;
1224
1225 /* Don't try to optimize a MODE_CC set with a constant
1226 source. It probably will be combined with a conditional
1227 jump. */
1228 if (GET_MODE_CLASS (GET_MODE (SET_DEST (set))) == MODE_CC
1229 && CONSTANT_P (src))
1230 ;
1231 /* Don't try to optimize a register that was made
1232 by loop-optimization for an inner loop.
1233 We don't know its life-span, so we can't compute
1234 the benefit. */
1235 else if (REGNO (SET_DEST (set)) >= max_reg_before_loop)
1236 ;
1237 /* Don't move the source and add a reg-to-reg copy:
1238 - with -Os (this certainly increases size),
1239 - if the mode doesn't support copy operations (obviously),
1240 - if the source is already a reg (the motion will gain nothing),
1241 - if the source is a legitimate constant (likewise). */
1242 else if (insert_temp
1243 && (optimize_size
1244 || ! can_copy_p (GET_MODE (SET_SRC (set)))
1245 || REG_P (SET_SRC (set))
1246 || (CONSTANT_P (SET_SRC (set))
1247 && LEGITIMATE_CONSTANT_P (SET_SRC (set)))))
1248 ;
1249 else if ((tem = loop_invariant_p (loop, src))
1250 && (dependencies == 0
1251 || (tem2
1252 = loop_invariant_p (loop, dependencies)) != 0)
1253 && (regs->array[REGNO (SET_DEST (set))].set_in_loop == 1
1254 || (tem1
1255 = consec_sets_invariant_p
1256 (loop, SET_DEST (set),
1257 regs->array[REGNO (SET_DEST (set))].set_in_loop,
1258 p)))
1259 /* If the insn can cause a trap (such as divide by zero),
1260 can't move it unless it's guaranteed to be executed
1261 once loop is entered. Even a function call might
1262 prevent the trap insn from being reached
1263 (since it might exit!) */
1264 && ! ((maybe_never || call_passed)
1265 && may_trap_p (src)))
1266 {
1267 struct movable *m;
1268 int regno = REGNO (SET_DEST (set));
1269
1270 /* A potential lossage is where we have a case where two insns
1271 can be combined as long as they are both in the loop, but
1272 we move one of them outside the loop. For large loops,
1273 this can lose. The most common case of this is the address
1274 of a function being called.
1275
1276 Therefore, if this register is marked as being used
1277 exactly once if we are in a loop with calls
1278 (a "large loop"), see if we can replace the usage of
1279 this register with the source of this SET. If we can,
1280 delete this insn.
1281
1282 Don't do this if P has a REG_RETVAL note or if we have
1283 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
1284
1285 if (loop_info->has_call
1286 && regs->array[regno].single_usage != 0
1287 && regs->array[regno].single_usage != const0_rtx
1288 && REGNO_FIRST_UID (regno) == INSN_UID (p)
1289 && (REGNO_LAST_UID (regno)
1290 == INSN_UID (regs->array[regno].single_usage))
1291 && regs->array[regno].set_in_loop == 1
1292 && GET_CODE (SET_SRC (set)) != ASM_OPERANDS
1293 && ! side_effects_p (SET_SRC (set))
1294 && ! find_reg_note (p, REG_RETVAL, NULL_RTX)
1295 && (! SMALL_REGISTER_CLASSES
1296 || (! (REG_P (SET_SRC (set))
1297 && (REGNO (SET_SRC (set))
1298 < FIRST_PSEUDO_REGISTER))))
1299 && regno >= FIRST_PSEUDO_REGISTER
1300 /* This test is not redundant; SET_SRC (set) might be
1301 a call-clobbered register and the life of REGNO
1302 might span a call. */
1303 && ! modified_between_p (SET_SRC (set), p,
1304 regs->array[regno].single_usage)
1305 && no_labels_between_p (p,
1306 regs->array[regno].single_usage)
1307 && validate_replace_rtx (SET_DEST (set), SET_SRC (set),
1308 regs->array[regno].single_usage))
1309 {
1310 /* Replace any usage in a REG_EQUAL note. Must copy
1311 the new source, so that we don't get rtx sharing
1312 between the SET_SOURCE and REG_NOTES of insn p. */
1313 REG_NOTES (regs->array[regno].single_usage)
1314 = (replace_rtx
1315 (REG_NOTES (regs->array[regno].single_usage),
1316 SET_DEST (set), copy_rtx (SET_SRC (set))));
1317
1318 delete_insn (p);
1319 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set));
1320 i++)
1321 regs->array[regno+i].set_in_loop = 0;
1322 continue;
1323 }
1324
1325 m = xmalloc (sizeof (struct movable));
1326 m->next = 0;
1327 m->insn = p;
1328 m->set_src = src;
1329 m->dependencies = dependencies;
1330 m->set_dest = SET_DEST (set);
1331 m->force = 0;
1332 m->consec
1333 = regs->array[REGNO (SET_DEST (set))].set_in_loop - 1;
1334 m->done = 0;
1335 m->forces = 0;
1336 m->partial = 0;
1337 m->move_insn = move_insn;
1338 m->move_insn_first = 0;
1339 m->insert_temp = insert_temp;
1340 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
1341 m->savemode = VOIDmode;
1342 m->regno = regno;
1343 /* Set M->cond if either loop_invariant_p
1344 or consec_sets_invariant_p returned 2
1345 (only conditionally invariant). */
1346 m->cond = ((tem | tem1 | tem2) > 1);
1347 m->global = LOOP_REG_GLOBAL_P (loop, regno);
1348 m->match = 0;
1349 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
1350 m->savings = regs->array[regno].n_times_set;
1351 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
1352 m->savings += libcall_benefit (p);
1353 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set)); i++)
1354 regs->array[regno+i].set_in_loop = move_insn ? -2 : -1;
1355 /* Add M to the end of the chain MOVABLES. */
1356 loop_movables_add (movables, m);
1357
1358 if (m->consec > 0)
1359 {
1360 /* It is possible for the first instruction to have a
1361 REG_EQUAL note but a non-invariant SET_SRC, so we must
1362 remember the status of the first instruction in case
1363 the last instruction doesn't have a REG_EQUAL note. */
1364 m->move_insn_first = m->move_insn;
1365
1366 /* Skip this insn, not checking REG_LIBCALL notes. */
1367 p = next_nonnote_insn (p);
1368 /* Skip the consecutive insns, if there are any. */
1369 p = skip_consec_insns (p, m->consec);
1370 /* Back up to the last insn of the consecutive group. */
1371 p = prev_nonnote_insn (p);
1372
1373 /* We must now reset m->move_insn, m->is_equiv, and
1374 possibly m->set_src to correspond to the effects of
1375 all the insns. */
1376 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
1377 if (temp)
1378 m->set_src = XEXP (temp, 0), m->move_insn = 1;
1379 else
1380 {
1381 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
1382 if (temp && CONSTANT_P (XEXP (temp, 0)))
1383 m->set_src = XEXP (temp, 0), m->move_insn = 1;
1384 else
1385 m->move_insn = 0;
1386
1387 }
1388 m->is_equiv
1389 = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
1390 }
1391 }
1392 /* If this register is always set within a STRICT_LOW_PART
1393 or set to zero, then its high bytes are constant.
1394 So clear them outside the loop and within the loop
1395 just load the low bytes.
1396 We must check that the machine has an instruction to do so.
1397 Also, if the value loaded into the register
1398 depends on the same register, this cannot be done. */
1399 else if (SET_SRC (set) == const0_rtx
1400 && NONJUMP_INSN_P (NEXT_INSN (p))
1401 && (set1 = single_set (NEXT_INSN (p)))
1402 && GET_CODE (set1) == SET
1403 && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART)
1404 && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG)
1405 && (SUBREG_REG (XEXP (SET_DEST (set1), 0))
1406 == SET_DEST (set))
1407 && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1)))
1408 {
1409 int regno = REGNO (SET_DEST (set));
1410 if (regs->array[regno].set_in_loop == 2)
1411 {
1412 struct movable *m;
1413 m = xmalloc (sizeof (struct movable));
1414 m->next = 0;
1415 m->insn = p;
1416 m->set_dest = SET_DEST (set);
1417 m->dependencies = 0;
1418 m->force = 0;
1419 m->consec = 0;
1420 m->done = 0;
1421 m->forces = 0;
1422 m->move_insn = 0;
1423 m->move_insn_first = 0;
1424 m->insert_temp = insert_temp;
1425 m->partial = 1;
1426 /* If the insn may not be executed on some cycles,
1427 we can't clear the whole reg; clear just high part.
1428 Not even if the reg is used only within this loop.
1429 Consider this:
1430 while (1)
1431 while (s != t) {
1432 if (foo ()) x = *s;
1433 use (x);
1434 }
1435 Clearing x before the inner loop could clobber a value
1436 being saved from the last time around the outer loop.
1437 However, if the reg is not used outside this loop
1438 and all uses of the register are in the same
1439 basic block as the store, there is no problem.
1440
1441 If this insn was made by loop, we don't know its
1442 INSN_LUID and hence must make a conservative
1443 assumption. */
1444 m->global = (INSN_UID (p) >= max_uid_for_loop
1445 || LOOP_REG_GLOBAL_P (loop, regno)
1446 || (labels_in_range_p
1447 (p, REGNO_FIRST_LUID (regno))));
1448 if (maybe_never && m->global)
1449 m->savemode = GET_MODE (SET_SRC (set1));
1450 else
1451 m->savemode = VOIDmode;
1452 m->regno = regno;
1453 m->cond = 0;
1454 m->match = 0;
1455 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
1456 m->savings = 1;
1457 for (i = 0;
1458 i < LOOP_REGNO_NREGS (regno, SET_DEST (set));
1459 i++)
1460 regs->array[regno+i].set_in_loop = -1;
1461 /* Add M to the end of the chain MOVABLES. */
1462 loop_movables_add (movables, m);
1463 }
1464 }
1465 }
1466 }
1467 /* Past a call insn, we get to insns which might not be executed
1468 because the call might exit. This matters for insns that trap.
1469 Constant and pure call insns always return, so they don't count. */
1470 else if (CALL_P (p) && ! CONST_OR_PURE_CALL_P (p))
1471 call_passed = 1;
1472 /* Past a label or a jump, we get to insns for which we
1473 can't count on whether or how many times they will be
1474 executed during each iteration. Therefore, we can
1475 only move out sets of trivial variables
1476 (those not used after the loop). */
1477 /* Similar code appears twice in strength_reduce. */
1478 else if ((LABEL_P (p) || JUMP_P (p))
1479 /* If we enter the loop in the middle, and scan around to the
1480 beginning, don't set maybe_never for that. This must be an
1481 unconditional jump, otherwise the code at the top of the
1482 loop might never be executed. Unconditional jumps are
1483 followed by a barrier then the loop_end. */
1484 && ! (JUMP_P (p) && JUMP_LABEL (p) == loop->top
1485 && NEXT_INSN (NEXT_INSN (p)) == loop_end
1486 && any_uncondjump_p (p)))
1487 maybe_never = 1;
1488 }
1489
1490 /* If one movable subsumes another, ignore that other. */
1491
1492 ignore_some_movables (movables);
1493
1494 /* For each movable insn, see if the reg that it loads
1495 leads when it dies right into another conditionally movable insn.
1496 If so, record that the second insn "forces" the first one,
1497 since the second can be moved only if the first is. */
1498
1499 force_movables (movables);
1500
1501 /* See if there are multiple movable insns that load the same value.
1502 If there are, make all but the first point at the first one
1503 through the `match' field, and add the priorities of them
1504 all together as the priority of the first. */
1505
1506 combine_movables (movables, regs);
1507
1508 /* Now consider each movable insn to decide whether it is worth moving.
1509 Store 0 in regs->array[I].set_in_loop for each reg I that is moved.
1510
1511 For machines with few registers this increases code size, so do not
1512 move moveables when optimizing for code size on such machines.
1513 (The 18 below is the value for i386.) */
1514
1515 if (!optimize_size
1516 || (reg_class_size[GENERAL_REGS] > 18 && !loop_info->has_call))
1517 {
1518 move_movables (loop, movables, threshold, insn_count);
1519
1520 /* Recalculate regs->array if move_movables has created new
1521 registers. */
1522 if (max_reg_num () > regs->num)
1523 {
1524 loop_regs_scan (loop, 0);
1525 for (update_start = loop_start;
1526 PREV_INSN (update_start)
1527 && !LABEL_P (PREV_INSN (update_start));
1528 update_start = PREV_INSN (update_start))
1529 ;
1530 update_end = NEXT_INSN (loop_end);
1531
1532 reg_scan_update (update_start, update_end, loop_max_reg);
1533 loop_max_reg = max_reg_num ();
1534 }
1535 }
1536
1537 /* Now candidates that still are negative are those not moved.
1538 Change regs->array[I].set_in_loop to indicate that those are not actually
1539 invariant. */
1540 for (i = 0; i < regs->num; i++)
1541 if (regs->array[i].set_in_loop < 0)
1542 regs->array[i].set_in_loop = regs->array[i].n_times_set;
1543
1544 /* Now that we've moved some things out of the loop, we might be able to
1545 hoist even more memory references. */
1546 load_mems (loop);
1547
1548 /* Recalculate regs->array if load_mems has created new registers. */
1549 if (max_reg_num () > regs->num)
1550 loop_regs_scan (loop, 0);
1551
1552 for (update_start = loop_start;
1553 PREV_INSN (update_start)
1554 && !LABEL_P (PREV_INSN (update_start));
1555 update_start = PREV_INSN (update_start))
1556 ;
1557 update_end = NEXT_INSN (loop_end);
1558
1559 reg_scan_update (update_start, update_end, loop_max_reg);
1560 loop_max_reg = max_reg_num ();
1561
1562 if (flag_strength_reduce)
1563 {
1564 if (update_end && LABEL_P (update_end))
1565 /* Ensure our label doesn't go away. */
1566 LABEL_NUSES (update_end)++;
1567
1568 strength_reduce (loop, flags);
1569
1570 reg_scan_update (update_start, update_end, loop_max_reg);
1571 loop_max_reg = max_reg_num ();
1572
1573 if (update_end && LABEL_P (update_end)
1574 && --LABEL_NUSES (update_end) == 0)
1575 delete_related_insns (update_end);
1576 }
1577
1578
1579 /* The movable information is required for strength reduction. */
1580 loop_movables_free (movables);
1581
1582 free (regs->array);
1583 regs->array = 0;
1584 regs->num = 0;
1585 }
1586 \f
1587 /* Add elements to *OUTPUT to record all the pseudo-regs
1588 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
1589
1590 static void
1591 record_excess_regs (rtx in_this, rtx not_in_this, rtx *output)
1592 {
1593 enum rtx_code code;
1594 const char *fmt;
1595 int i;
1596
1597 code = GET_CODE (in_this);
1598
1599 switch (code)
1600 {
1601 case PC:
1602 case CC0:
1603 case CONST_INT:
1604 case CONST_DOUBLE:
1605 case CONST:
1606 case SYMBOL_REF:
1607 case LABEL_REF:
1608 return;
1609
1610 case REG:
1611 if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER
1612 && ! reg_mentioned_p (in_this, not_in_this))
1613 *output = gen_rtx_EXPR_LIST (VOIDmode, in_this, *output);
1614 return;
1615
1616 default:
1617 break;
1618 }
1619
1620 fmt = GET_RTX_FORMAT (code);
1621 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1622 {
1623 int j;
1624
1625 switch (fmt[i])
1626 {
1627 case 'E':
1628 for (j = 0; j < XVECLEN (in_this, i); j++)
1629 record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output);
1630 break;
1631
1632 case 'e':
1633 record_excess_regs (XEXP (in_this, i), not_in_this, output);
1634 break;
1635 }
1636 }
1637 }
1638 \f
1639 /* Check what regs are referred to in the libcall block ending with INSN,
1640 aside from those mentioned in the equivalent value.
1641 If there are none, return 0.
1642 If there are one or more, return an EXPR_LIST containing all of them. */
1643
1644 static rtx
1645 libcall_other_reg (rtx insn, rtx equiv)
1646 {
1647 rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
1648 rtx p = XEXP (note, 0);
1649 rtx output = 0;
1650
1651 /* First, find all the regs used in the libcall block
1652 that are not mentioned as inputs to the result. */
1653
1654 while (p != insn)
1655 {
1656 if (INSN_P (p))
1657 record_excess_regs (PATTERN (p), equiv, &output);
1658 p = NEXT_INSN (p);
1659 }
1660
1661 return output;
1662 }
1663 \f
1664 /* Return 1 if all uses of REG
1665 are between INSN and the end of the basic block. */
1666
1667 static int
1668 reg_in_basic_block_p (rtx insn, rtx reg)
1669 {
1670 int regno = REGNO (reg);
1671 rtx p;
1672
1673 if (REGNO_FIRST_UID (regno) != INSN_UID (insn))
1674 return 0;
1675
1676 /* Search this basic block for the already recorded last use of the reg. */
1677 for (p = insn; p; p = NEXT_INSN (p))
1678 {
1679 switch (GET_CODE (p))
1680 {
1681 case NOTE:
1682 break;
1683
1684 case INSN:
1685 case CALL_INSN:
1686 /* Ordinary insn: if this is the last use, we win. */
1687 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1688 return 1;
1689 break;
1690
1691 case JUMP_INSN:
1692 /* Jump insn: if this is the last use, we win. */
1693 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1694 return 1;
1695 /* Otherwise, it's the end of the basic block, so we lose. */
1696 return 0;
1697
1698 case CODE_LABEL:
1699 case BARRIER:
1700 /* It's the end of the basic block, so we lose. */
1701 return 0;
1702
1703 default:
1704 break;
1705 }
1706 }
1707
1708 /* The "last use" that was recorded can't be found after the first
1709 use. This can happen when the last use was deleted while
1710 processing an inner loop, this inner loop was then completely
1711 unrolled, and the outer loop is always exited after the inner loop,
1712 so that everything after the first use becomes a single basic block. */
1713 return 1;
1714 }
1715 \f
1716 /* Compute the benefit of eliminating the insns in the block whose
1717 last insn is LAST. This may be a group of insns used to compute a
1718 value directly or can contain a library call. */
1719
1720 static int
1721 libcall_benefit (rtx last)
1722 {
1723 rtx insn;
1724 int benefit = 0;
1725
1726 for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0);
1727 insn != last; insn = NEXT_INSN (insn))
1728 {
1729 if (CALL_P (insn))
1730 benefit += 10; /* Assume at least this many insns in a library
1731 routine. */
1732 else if (NONJUMP_INSN_P (insn)
1733 && GET_CODE (PATTERN (insn)) != USE
1734 && GET_CODE (PATTERN (insn)) != CLOBBER)
1735 benefit++;
1736 }
1737
1738 return benefit;
1739 }
1740 \f
1741 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1742
1743 static rtx
1744 skip_consec_insns (rtx insn, int count)
1745 {
1746 for (; count > 0; count--)
1747 {
1748 rtx temp;
1749
1750 /* If first insn of libcall sequence, skip to end. */
1751 /* Do this at start of loop, since INSN is guaranteed to
1752 be an insn here. */
1753 if (!NOTE_P (insn)
1754 && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
1755 insn = XEXP (temp, 0);
1756
1757 do
1758 insn = NEXT_INSN (insn);
1759 while (NOTE_P (insn));
1760 }
1761
1762 return insn;
1763 }
1764
1765 /* Ignore any movable whose insn falls within a libcall
1766 which is part of another movable.
1767 We make use of the fact that the movable for the libcall value
1768 was made later and so appears later on the chain. */
1769
1770 static void
1771 ignore_some_movables (struct loop_movables *movables)
1772 {
1773 struct movable *m, *m1;
1774
1775 for (m = movables->head; m; m = m->next)
1776 {
1777 /* Is this a movable for the value of a libcall? */
1778 rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX);
1779 if (note)
1780 {
1781 rtx insn;
1782 /* Check for earlier movables inside that range,
1783 and mark them invalid. We cannot use LUIDs here because
1784 insns created by loop.c for prior loops don't have LUIDs.
1785 Rather than reject all such insns from movables, we just
1786 explicitly check each insn in the libcall (since invariant
1787 libcalls aren't that common). */
1788 for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn))
1789 for (m1 = movables->head; m1 != m; m1 = m1->next)
1790 if (m1->insn == insn)
1791 m1->done = 1;
1792 }
1793 }
1794 }
1795
1796 /* For each movable insn, see if the reg that it loads
1797 leads when it dies right into another conditionally movable insn.
1798 If so, record that the second insn "forces" the first one,
1799 since the second can be moved only if the first is. */
1800
1801 static void
1802 force_movables (struct loop_movables *movables)
1803 {
1804 struct movable *m, *m1;
1805
1806 for (m1 = movables->head; m1; m1 = m1->next)
1807 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1808 if (!m1->partial && !m1->done)
1809 {
1810 int regno = m1->regno;
1811 for (m = m1->next; m; m = m->next)
1812 /* ??? Could this be a bug? What if CSE caused the
1813 register of M1 to be used after this insn?
1814 Since CSE does not update regno_last_uid,
1815 this insn M->insn might not be where it dies.
1816 But very likely this doesn't matter; what matters is
1817 that M's reg is computed from M1's reg. */
1818 if (INSN_UID (m->insn) == REGNO_LAST_UID (regno)
1819 && !m->done)
1820 break;
1821 if (m != 0 && m->set_src == m1->set_dest
1822 /* If m->consec, m->set_src isn't valid. */
1823 && m->consec == 0)
1824 m = 0;
1825
1826 /* Increase the priority of the moving the first insn
1827 since it permits the second to be moved as well.
1828 Likewise for insns already forced by the first insn. */
1829 if (m != 0)
1830 {
1831 struct movable *m2;
1832
1833 m->forces = m1;
1834 for (m2 = m1; m2; m2 = m2->forces)
1835 {
1836 m2->lifetime += m->lifetime;
1837 m2->savings += m->savings;
1838 }
1839 }
1840 }
1841 }
1842 \f
1843 /* Find invariant expressions that are equal and can be combined into
1844 one register. */
1845
1846 static void
1847 combine_movables (struct loop_movables *movables, struct loop_regs *regs)
1848 {
1849 struct movable *m;
1850 char *matched_regs = xmalloc (regs->num);
1851 enum machine_mode mode;
1852
1853 /* Regs that are set more than once are not allowed to match
1854 or be matched. I'm no longer sure why not. */
1855 /* Only pseudo registers are allowed to match or be matched,
1856 since move_movables does not validate the change. */
1857 /* Perhaps testing m->consec_sets would be more appropriate here? */
1858
1859 for (m = movables->head; m; m = m->next)
1860 if (m->match == 0 && regs->array[m->regno].n_times_set == 1
1861 && m->regno >= FIRST_PSEUDO_REGISTER
1862 && !m->insert_temp
1863 && !m->partial)
1864 {
1865 struct movable *m1;
1866 int regno = m->regno;
1867
1868 memset (matched_regs, 0, regs->num);
1869 matched_regs[regno] = 1;
1870
1871 /* We want later insns to match the first one. Don't make the first
1872 one match any later ones. So start this loop at m->next. */
1873 for (m1 = m->next; m1; m1 = m1->next)
1874 if (m != m1 && m1->match == 0
1875 && !m1->insert_temp
1876 && regs->array[m1->regno].n_times_set == 1
1877 && m1->regno >= FIRST_PSEUDO_REGISTER
1878 /* A reg used outside the loop mustn't be eliminated. */
1879 && !m1->global
1880 /* A reg used for zero-extending mustn't be eliminated. */
1881 && !m1->partial
1882 && (matched_regs[m1->regno]
1883 ||
1884 (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)
1885 /* See if the source of M1 says it matches M. */
1886 && ((REG_P (m1->set_src)
1887 && matched_regs[REGNO (m1->set_src)])
1888 || rtx_equal_for_loop_p (m->set_src, m1->set_src,
1889 movables, regs))))
1890 && ((m->dependencies == m1->dependencies)
1891 || rtx_equal_p (m->dependencies, m1->dependencies)))
1892 {
1893 m->lifetime += m1->lifetime;
1894 m->savings += m1->savings;
1895 m1->done = 1;
1896 m1->match = m;
1897 matched_regs[m1->regno] = 1;
1898 }
1899 }
1900
1901 /* Now combine the regs used for zero-extension.
1902 This can be done for those not marked `global'
1903 provided their lives don't overlap. */
1904
1905 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1906 mode = GET_MODE_WIDER_MODE (mode))
1907 {
1908 struct movable *m0 = 0;
1909
1910 /* Combine all the registers for extension from mode MODE.
1911 Don't combine any that are used outside this loop. */
1912 for (m = movables->head; m; m = m->next)
1913 if (m->partial && ! m->global
1914 && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn)))))
1915 {
1916 struct movable *m1;
1917
1918 int first = REGNO_FIRST_LUID (m->regno);
1919 int last = REGNO_LAST_LUID (m->regno);
1920
1921 if (m0 == 0)
1922 {
1923 /* First one: don't check for overlap, just record it. */
1924 m0 = m;
1925 continue;
1926 }
1927
1928 /* Make sure they extend to the same mode.
1929 (Almost always true.) */
1930 if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest))
1931 continue;
1932
1933 /* We already have one: check for overlap with those
1934 already combined together. */
1935 for (m1 = movables->head; m1 != m; m1 = m1->next)
1936 if (m1 == m0 || (m1->partial && m1->match == m0))
1937 if (! (REGNO_FIRST_LUID (m1->regno) > last
1938 || REGNO_LAST_LUID (m1->regno) < first))
1939 goto overlap;
1940
1941 /* No overlap: we can combine this with the others. */
1942 m0->lifetime += m->lifetime;
1943 m0->savings += m->savings;
1944 m->done = 1;
1945 m->match = m0;
1946
1947 overlap:
1948 ;
1949 }
1950 }
1951
1952 /* Clean up. */
1953 free (matched_regs);
1954 }
1955
1956 /* Returns the number of movable instructions in LOOP that were not
1957 moved outside the loop. */
1958
1959 static int
1960 num_unmoved_movables (const struct loop *loop)
1961 {
1962 int num = 0;
1963 struct movable *m;
1964
1965 for (m = LOOP_MOVABLES (loop)->head; m; m = m->next)
1966 if (!m->done)
1967 ++num;
1968
1969 return num;
1970 }
1971
1972 \f
1973 /* Return 1 if regs X and Y will become the same if moved. */
1974
1975 static int
1976 regs_match_p (rtx x, rtx y, struct loop_movables *movables)
1977 {
1978 unsigned int xn = REGNO (x);
1979 unsigned int yn = REGNO (y);
1980 struct movable *mx, *my;
1981
1982 for (mx = movables->head; mx; mx = mx->next)
1983 if (mx->regno == xn)
1984 break;
1985
1986 for (my = movables->head; my; my = my->next)
1987 if (my->regno == yn)
1988 break;
1989
1990 return (mx && my
1991 && ((mx->match == my->match && mx->match != 0)
1992 || mx->match == my
1993 || mx == my->match));
1994 }
1995
1996 /* Return 1 if X and Y are identical-looking rtx's.
1997 This is the Lisp function EQUAL for rtx arguments.
1998
1999 If two registers are matching movables or a movable register and an
2000 equivalent constant, consider them equal. */
2001
2002 static int
2003 rtx_equal_for_loop_p (rtx x, rtx y, struct loop_movables *movables,
2004 struct loop_regs *regs)
2005 {
2006 int i;
2007 int j;
2008 struct movable *m;
2009 enum rtx_code code;
2010 const char *fmt;
2011
2012 if (x == y)
2013 return 1;
2014 if (x == 0 || y == 0)
2015 return 0;
2016
2017 code = GET_CODE (x);
2018
2019 /* If we have a register and a constant, they may sometimes be
2020 equal. */
2021 if (REG_P (x) && regs->array[REGNO (x)].set_in_loop == -2
2022 && CONSTANT_P (y))
2023 {
2024 for (m = movables->head; m; m = m->next)
2025 if (m->move_insn && m->regno == REGNO (x)
2026 && rtx_equal_p (m->set_src, y))
2027 return 1;
2028 }
2029 else if (REG_P (y) && regs->array[REGNO (y)].set_in_loop == -2
2030 && CONSTANT_P (x))
2031 {
2032 for (m = movables->head; m; m = m->next)
2033 if (m->move_insn && m->regno == REGNO (y)
2034 && rtx_equal_p (m->set_src, x))
2035 return 1;
2036 }
2037
2038 /* Otherwise, rtx's of different codes cannot be equal. */
2039 if (code != GET_CODE (y))
2040 return 0;
2041
2042 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
2043 (REG:SI x) and (REG:HI x) are NOT equivalent. */
2044
2045 if (GET_MODE (x) != GET_MODE (y))
2046 return 0;
2047
2048 /* These types of rtx's can be compared nonrecursively. */
2049 switch (code)
2050 {
2051 case PC:
2052 case CC0:
2053 case CONST_INT:
2054 case CONST_DOUBLE:
2055 return 0;
2056
2057 case REG:
2058 return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables));
2059
2060 case LABEL_REF:
2061 return XEXP (x, 0) == XEXP (y, 0);
2062 case SYMBOL_REF:
2063 return XSTR (x, 0) == XSTR (y, 0);
2064
2065 default:
2066 break;
2067 }
2068
2069 /* Compare the elements. If any pair of corresponding elements
2070 fail to match, return 0 for the whole things. */
2071
2072 fmt = GET_RTX_FORMAT (code);
2073 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2074 {
2075 switch (fmt[i])
2076 {
2077 case 'w':
2078 if (XWINT (x, i) != XWINT (y, i))
2079 return 0;
2080 break;
2081
2082 case 'i':
2083 if (XINT (x, i) != XINT (y, i))
2084 return 0;
2085 break;
2086
2087 case 'E':
2088 /* Two vectors must have the same length. */
2089 if (XVECLEN (x, i) != XVECLEN (y, i))
2090 return 0;
2091
2092 /* And the corresponding elements must match. */
2093 for (j = 0; j < XVECLEN (x, i); j++)
2094 if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j),
2095 movables, regs) == 0)
2096 return 0;
2097 break;
2098
2099 case 'e':
2100 if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables, regs)
2101 == 0)
2102 return 0;
2103 break;
2104
2105 case 's':
2106 if (strcmp (XSTR (x, i), XSTR (y, i)))
2107 return 0;
2108 break;
2109
2110 case 'u':
2111 /* These are just backpointers, so they don't matter. */
2112 break;
2113
2114 case '0':
2115 break;
2116
2117 /* It is believed that rtx's at this level will never
2118 contain anything but integers and other rtx's,
2119 except for within LABEL_REFs and SYMBOL_REFs. */
2120 default:
2121 gcc_unreachable ();
2122 }
2123 }
2124 return 1;
2125 }
2126 \f
2127 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
2128 insns in INSNS which use the reference. LABEL_NUSES for CODE_LABEL
2129 references is incremented once for each added note. */
2130
2131 static void
2132 add_label_notes (rtx x, rtx insns)
2133 {
2134 enum rtx_code code = GET_CODE (x);
2135 int i, j;
2136 const char *fmt;
2137 rtx insn;
2138
2139 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
2140 {
2141 /* This code used to ignore labels that referred to dispatch tables to
2142 avoid flow generating (slightly) worse code.
2143
2144 We no longer ignore such label references (see LABEL_REF handling in
2145 mark_jump_label for additional information). */
2146 for (insn = insns; insn; insn = NEXT_INSN (insn))
2147 if (reg_mentioned_p (XEXP (x, 0), insn))
2148 {
2149 REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_LABEL, XEXP (x, 0),
2150 REG_NOTES (insn));
2151 if (LABEL_P (XEXP (x, 0)))
2152 LABEL_NUSES (XEXP (x, 0))++;
2153 }
2154 }
2155
2156 fmt = GET_RTX_FORMAT (code);
2157 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2158 {
2159 if (fmt[i] == 'e')
2160 add_label_notes (XEXP (x, i), insns);
2161 else if (fmt[i] == 'E')
2162 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2163 add_label_notes (XVECEXP (x, i, j), insns);
2164 }
2165 }
2166 \f
2167 /* Scan MOVABLES, and move the insns that deserve to be moved.
2168 If two matching movables are combined, replace one reg with the
2169 other throughout. */
2170
2171 static void
2172 move_movables (struct loop *loop, struct loop_movables *movables,
2173 int threshold, int insn_count)
2174 {
2175 struct loop_regs *regs = LOOP_REGS (loop);
2176 int nregs = regs->num;
2177 rtx new_start = 0;
2178 struct movable *m;
2179 rtx p;
2180 rtx loop_start = loop->start;
2181 rtx loop_end = loop->end;
2182 /* Map of pseudo-register replacements to handle combining
2183 when we move several insns that load the same value
2184 into different pseudo-registers. */
2185 rtx *reg_map = xcalloc (nregs, sizeof (rtx));
2186 char *already_moved = xcalloc (nregs, sizeof (char));
2187
2188 for (m = movables->head; m; m = m->next)
2189 {
2190 /* Describe this movable insn. */
2191
2192 if (loop_dump_stream)
2193 {
2194 fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ",
2195 INSN_UID (m->insn), m->regno, m->lifetime);
2196 if (m->consec > 0)
2197 fprintf (loop_dump_stream, "consec %d, ", m->consec);
2198 if (m->cond)
2199 fprintf (loop_dump_stream, "cond ");
2200 if (m->force)
2201 fprintf (loop_dump_stream, "force ");
2202 if (m->global)
2203 fprintf (loop_dump_stream, "global ");
2204 if (m->done)
2205 fprintf (loop_dump_stream, "done ");
2206 if (m->move_insn)
2207 fprintf (loop_dump_stream, "move-insn ");
2208 if (m->match)
2209 fprintf (loop_dump_stream, "matches %d ",
2210 INSN_UID (m->match->insn));
2211 if (m->forces)
2212 fprintf (loop_dump_stream, "forces %d ",
2213 INSN_UID (m->forces->insn));
2214 }
2215
2216 /* Ignore the insn if it's already done (it matched something else).
2217 Otherwise, see if it is now safe to move. */
2218
2219 if (!m->done
2220 && (! m->cond
2221 || (1 == loop_invariant_p (loop, m->set_src)
2222 && (m->dependencies == 0
2223 || 1 == loop_invariant_p (loop, m->dependencies))
2224 && (m->consec == 0
2225 || 1 == consec_sets_invariant_p (loop, m->set_dest,
2226 m->consec + 1,
2227 m->insn))))
2228 && (! m->forces || m->forces->done))
2229 {
2230 int regno;
2231 rtx p;
2232 int savings = m->savings;
2233
2234 /* We have an insn that is safe to move.
2235 Compute its desirability. */
2236
2237 p = m->insn;
2238 regno = m->regno;
2239
2240 if (loop_dump_stream)
2241 fprintf (loop_dump_stream, "savings %d ", savings);
2242
2243 if (regs->array[regno].moved_once && loop_dump_stream)
2244 fprintf (loop_dump_stream, "halved since already moved ");
2245
2246 /* An insn MUST be moved if we already moved something else
2247 which is safe only if this one is moved too: that is,
2248 if already_moved[REGNO] is nonzero. */
2249
2250 /* An insn is desirable to move if the new lifetime of the
2251 register is no more than THRESHOLD times the old lifetime.
2252 If it's not desirable, it means the loop is so big
2253 that moving won't speed things up much,
2254 and it is liable to make register usage worse. */
2255
2256 /* It is also desirable to move if it can be moved at no
2257 extra cost because something else was already moved. */
2258
2259 if (already_moved[regno]
2260 || (threshold * savings * m->lifetime) >=
2261 (regs->array[regno].moved_once ? insn_count * 2 : insn_count)
2262 || (m->forces && m->forces->done
2263 && regs->array[m->forces->regno].n_times_set == 1))
2264 {
2265 int count;
2266 struct movable *m1;
2267 rtx first = NULL_RTX;
2268 rtx newreg = NULL_RTX;
2269
2270 if (m->insert_temp)
2271 newreg = gen_reg_rtx (GET_MODE (m->set_dest));
2272
2273 /* Now move the insns that set the reg. */
2274
2275 if (m->partial && m->match)
2276 {
2277 rtx newpat, i1;
2278 rtx r1, r2;
2279 /* Find the end of this chain of matching regs.
2280 Thus, we load each reg in the chain from that one reg.
2281 And that reg is loaded with 0 directly,
2282 since it has ->match == 0. */
2283 for (m1 = m; m1->match; m1 = m1->match);
2284 newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)),
2285 SET_DEST (PATTERN (m1->insn)));
2286 i1 = loop_insn_hoist (loop, newpat);
2287
2288 /* Mark the moved, invariant reg as being allowed to
2289 share a hard reg with the other matching invariant. */
2290 REG_NOTES (i1) = REG_NOTES (m->insn);
2291 r1 = SET_DEST (PATTERN (m->insn));
2292 r2 = SET_DEST (PATTERN (m1->insn));
2293 regs_may_share
2294 = gen_rtx_EXPR_LIST (VOIDmode, r1,
2295 gen_rtx_EXPR_LIST (VOIDmode, r2,
2296 regs_may_share));
2297 delete_insn (m->insn);
2298
2299 if (new_start == 0)
2300 new_start = i1;
2301
2302 if (loop_dump_stream)
2303 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
2304 }
2305 /* If we are to re-generate the item being moved with a
2306 new move insn, first delete what we have and then emit
2307 the move insn before the loop. */
2308 else if (m->move_insn)
2309 {
2310 rtx i1, temp, seq;
2311
2312 for (count = m->consec; count >= 0; count--)
2313 {
2314 if (!NOTE_P (p))
2315 {
2316 /* If this is the first insn of a library
2317 call sequence, something is very
2318 wrong. */
2319 gcc_assert (!find_reg_note
2320 (p, REG_LIBCALL, NULL_RTX));
2321
2322 /* If this is the last insn of a libcall
2323 sequence, then delete every insn in the
2324 sequence except the last. The last insn
2325 is handled in the normal manner. */
2326 temp = find_reg_note (p, REG_RETVAL, NULL_RTX);
2327
2328 if (temp)
2329 {
2330 temp = XEXP (temp, 0);
2331 while (temp != p)
2332 temp = delete_insn (temp);
2333 }
2334 }
2335
2336 temp = p;
2337 p = delete_insn (p);
2338
2339 /* simplify_giv_expr expects that it can walk the insns
2340 at m->insn forwards and see this old sequence we are
2341 tossing here. delete_insn does preserve the next
2342 pointers, but when we skip over a NOTE we must fix
2343 it up. Otherwise that code walks into the non-deleted
2344 insn stream. */
2345 while (p && NOTE_P (p))
2346 p = NEXT_INSN (temp) = NEXT_INSN (p);
2347
2348 if (m->insert_temp)
2349 {
2350 /* Replace the original insn with a move from
2351 our newly created temp. */
2352 start_sequence ();
2353 emit_move_insn (m->set_dest, newreg);
2354 seq = get_insns ();
2355 end_sequence ();
2356 emit_insn_before (seq, p);
2357 }
2358 }
2359
2360 start_sequence ();
2361 emit_move_insn (m->insert_temp ? newreg : m->set_dest,
2362 m->set_src);
2363 seq = get_insns ();
2364 end_sequence ();
2365
2366 add_label_notes (m->set_src, seq);
2367
2368 i1 = loop_insn_hoist (loop, seq);
2369 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
2370 set_unique_reg_note (i1,
2371 m->is_equiv ? REG_EQUIV : REG_EQUAL,
2372 m->set_src);
2373
2374 if (loop_dump_stream)
2375 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
2376
2377 /* The more regs we move, the less we like moving them. */
2378 threshold -= 3;
2379 }
2380 else
2381 {
2382 for (count = m->consec; count >= 0; count--)
2383 {
2384 rtx i1, temp;
2385
2386 /* If first insn of libcall sequence, skip to end. */
2387 /* Do this at start of loop, since p is guaranteed to
2388 be an insn here. */
2389 if (!NOTE_P (p)
2390 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
2391 p = XEXP (temp, 0);
2392
2393 /* If last insn of libcall sequence, move all
2394 insns except the last before the loop. The last
2395 insn is handled in the normal manner. */
2396 if (!NOTE_P (p)
2397 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
2398 {
2399 rtx fn_address = 0;
2400 rtx fn_reg = 0;
2401 rtx fn_address_insn = 0;
2402
2403 first = 0;
2404 for (temp = XEXP (temp, 0); temp != p;
2405 temp = NEXT_INSN (temp))
2406 {
2407 rtx body;
2408 rtx n;
2409 rtx next;
2410
2411 if (NOTE_P (temp))
2412 continue;
2413
2414 body = PATTERN (temp);
2415
2416 /* Find the next insn after TEMP,
2417 not counting USE or NOTE insns. */
2418 for (next = NEXT_INSN (temp); next != p;
2419 next = NEXT_INSN (next))
2420 if (! (NONJUMP_INSN_P (next)
2421 && GET_CODE (PATTERN (next)) == USE)
2422 && !NOTE_P (next))
2423 break;
2424
2425 /* If that is the call, this may be the insn
2426 that loads the function address.
2427
2428 Extract the function address from the insn
2429 that loads it into a register.
2430 If this insn was cse'd, we get incorrect code.
2431
2432 So emit a new move insn that copies the
2433 function address into the register that the
2434 call insn will use. flow.c will delete any
2435 redundant stores that we have created. */
2436 if (CALL_P (next)
2437 && GET_CODE (body) == SET
2438 && REG_P (SET_DEST (body))
2439 && (n = find_reg_note (temp, REG_EQUAL,
2440 NULL_RTX)))
2441 {
2442 fn_reg = SET_SRC (body);
2443 if (!REG_P (fn_reg))
2444 fn_reg = SET_DEST (body);
2445 fn_address = XEXP (n, 0);
2446 fn_address_insn = temp;
2447 }
2448 /* We have the call insn.
2449 If it uses the register we suspect it might,
2450 load it with the correct address directly. */
2451 if (CALL_P (temp)
2452 && fn_address != 0
2453 && reg_referenced_p (fn_reg, body))
2454 loop_insn_emit_after (loop, 0, fn_address_insn,
2455 gen_move_insn
2456 (fn_reg, fn_address));
2457
2458 if (CALL_P (temp))
2459 {
2460 i1 = loop_call_insn_hoist (loop, body);
2461 /* Because the USAGE information potentially
2462 contains objects other than hard registers
2463 we need to copy it. */
2464 if (CALL_INSN_FUNCTION_USAGE (temp))
2465 CALL_INSN_FUNCTION_USAGE (i1)
2466 = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp));
2467 }
2468 else
2469 i1 = loop_insn_hoist (loop, body);
2470 if (first == 0)
2471 first = i1;
2472 if (temp == fn_address_insn)
2473 fn_address_insn = i1;
2474 REG_NOTES (i1) = REG_NOTES (temp);
2475 REG_NOTES (temp) = NULL;
2476 delete_insn (temp);
2477 }
2478 if (new_start == 0)
2479 new_start = first;
2480 }
2481 if (m->savemode != VOIDmode)
2482 {
2483 /* P sets REG to zero; but we should clear only
2484 the bits that are not covered by the mode
2485 m->savemode. */
2486 rtx reg = m->set_dest;
2487 rtx sequence;
2488 rtx tem;
2489
2490 start_sequence ();
2491 tem = expand_simple_binop
2492 (GET_MODE (reg), AND, reg,
2493 GEN_INT ((((HOST_WIDE_INT) 1
2494 << GET_MODE_BITSIZE (m->savemode)))
2495 - 1),
2496 reg, 1, OPTAB_LIB_WIDEN);
2497 gcc_assert (tem);
2498 if (tem != reg)
2499 emit_move_insn (reg, tem);
2500 sequence = get_insns ();
2501 end_sequence ();
2502 i1 = loop_insn_hoist (loop, sequence);
2503 }
2504 else if (CALL_P (p))
2505 {
2506 i1 = loop_call_insn_hoist (loop, PATTERN (p));
2507 /* Because the USAGE information potentially
2508 contains objects other than hard registers
2509 we need to copy it. */
2510 if (CALL_INSN_FUNCTION_USAGE (p))
2511 CALL_INSN_FUNCTION_USAGE (i1)
2512 = copy_rtx (CALL_INSN_FUNCTION_USAGE (p));
2513 }
2514 else if (count == m->consec && m->move_insn_first)
2515 {
2516 rtx seq;
2517 /* The SET_SRC might not be invariant, so we must
2518 use the REG_EQUAL note. */
2519 start_sequence ();
2520 emit_move_insn (m->insert_temp ? newreg : m->set_dest,
2521 m->set_src);
2522 seq = get_insns ();
2523 end_sequence ();
2524
2525 add_label_notes (m->set_src, seq);
2526
2527 i1 = loop_insn_hoist (loop, seq);
2528 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
2529 set_unique_reg_note (i1, m->is_equiv ? REG_EQUIV
2530 : REG_EQUAL, m->set_src);
2531 }
2532 else if (m->insert_temp)
2533 {
2534 rtx *reg_map2 = xcalloc (REGNO (newreg),
2535 sizeof(rtx));
2536 reg_map2 [m->regno] = newreg;
2537
2538 i1 = loop_insn_hoist (loop, copy_rtx (PATTERN (p)));
2539 replace_regs (i1, reg_map2, REGNO (newreg), 1);
2540 free (reg_map2);
2541 }
2542 else
2543 i1 = loop_insn_hoist (loop, PATTERN (p));
2544
2545 if (REG_NOTES (i1) == 0)
2546 {
2547 REG_NOTES (i1) = REG_NOTES (p);
2548 REG_NOTES (p) = NULL;
2549
2550 /* If there is a REG_EQUAL note present whose value
2551 is not loop invariant, then delete it, since it
2552 may cause problems with later optimization passes.
2553 It is possible for cse to create such notes
2554 like this as a result of record_jump_cond. */
2555
2556 if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX))
2557 && ! loop_invariant_p (loop, XEXP (temp, 0)))
2558 remove_note (i1, temp);
2559 }
2560
2561 if (new_start == 0)
2562 new_start = i1;
2563
2564 if (loop_dump_stream)
2565 fprintf (loop_dump_stream, " moved to %d",
2566 INSN_UID (i1));
2567
2568 /* If library call, now fix the REG_NOTES that contain
2569 insn pointers, namely REG_LIBCALL on FIRST
2570 and REG_RETVAL on I1. */
2571 if ((temp = find_reg_note (i1, REG_RETVAL, NULL_RTX)))
2572 {
2573 XEXP (temp, 0) = first;
2574 temp = find_reg_note (first, REG_LIBCALL, NULL_RTX);
2575 XEXP (temp, 0) = i1;
2576 }
2577
2578 temp = p;
2579 delete_insn (p);
2580 p = NEXT_INSN (p);
2581
2582 /* simplify_giv_expr expects that it can walk the insns
2583 at m->insn forwards and see this old sequence we are
2584 tossing here. delete_insn does preserve the next
2585 pointers, but when we skip over a NOTE we must fix
2586 it up. Otherwise that code walks into the non-deleted
2587 insn stream. */
2588 while (p && NOTE_P (p))
2589 p = NEXT_INSN (temp) = NEXT_INSN (p);
2590
2591 if (m->insert_temp)
2592 {
2593 rtx seq;
2594 /* Replace the original insn with a move from
2595 our newly created temp. */
2596 start_sequence ();
2597 emit_move_insn (m->set_dest, newreg);
2598 seq = get_insns ();
2599 end_sequence ();
2600 emit_insn_before (seq, p);
2601 }
2602 }
2603
2604 /* The more regs we move, the less we like moving them. */
2605 threshold -= 3;
2606 }
2607
2608 m->done = 1;
2609
2610 if (!m->insert_temp)
2611 {
2612 /* Any other movable that loads the same register
2613 MUST be moved. */
2614 already_moved[regno] = 1;
2615
2616 /* This reg has been moved out of one loop. */
2617 regs->array[regno].moved_once = 1;
2618
2619 /* The reg set here is now invariant. */
2620 if (! m->partial)
2621 {
2622 int i;
2623 for (i = 0; i < LOOP_REGNO_NREGS (regno, m->set_dest); i++)
2624 regs->array[regno+i].set_in_loop = 0;
2625 }
2626
2627 /* Change the length-of-life info for the register
2628 to say it lives at least the full length of this loop.
2629 This will help guide optimizations in outer loops. */
2630
2631 if (REGNO_FIRST_LUID (regno) > INSN_LUID (loop_start))
2632 /* This is the old insn before all the moved insns.
2633 We can't use the moved insn because it is out of range
2634 in uid_luid. Only the old insns have luids. */
2635 REGNO_FIRST_UID (regno) = INSN_UID (loop_start);
2636 if (REGNO_LAST_LUID (regno) < INSN_LUID (loop_end))
2637 REGNO_LAST_UID (regno) = INSN_UID (loop_end);
2638 }
2639
2640 /* Combine with this moved insn any other matching movables. */
2641
2642 if (! m->partial)
2643 for (m1 = movables->head; m1; m1 = m1->next)
2644 if (m1->match == m)
2645 {
2646 rtx temp;
2647
2648 reg_map[m1->regno] = m->set_dest;
2649
2650 /* Get rid of the matching insn
2651 and prevent further processing of it. */
2652 m1->done = 1;
2653
2654 /* If library call, delete all insns. */
2655 if ((temp = find_reg_note (m1->insn, REG_RETVAL,
2656 NULL_RTX)))
2657 delete_insn_chain (XEXP (temp, 0), m1->insn);
2658 else
2659 delete_insn (m1->insn);
2660
2661 /* Any other movable that loads the same register
2662 MUST be moved. */
2663 already_moved[m1->regno] = 1;
2664
2665 /* The reg merged here is now invariant,
2666 if the reg it matches is invariant. */
2667 if (! m->partial)
2668 {
2669 int i;
2670 for (i = 0;
2671 i < LOOP_REGNO_NREGS (regno, m1->set_dest);
2672 i++)
2673 regs->array[m1->regno+i].set_in_loop = 0;
2674 }
2675 }
2676 }
2677 else if (loop_dump_stream)
2678 fprintf (loop_dump_stream, "not desirable");
2679 }
2680 else if (loop_dump_stream && !m->match)
2681 fprintf (loop_dump_stream, "not safe");
2682
2683 if (loop_dump_stream)
2684 fprintf (loop_dump_stream, "\n");
2685 }
2686
2687 if (new_start == 0)
2688 new_start = loop_start;
2689
2690 /* Go through all the instructions in the loop, making
2691 all the register substitutions scheduled in REG_MAP. */
2692 for (p = new_start; p != loop_end; p = NEXT_INSN (p))
2693 if (INSN_P (p))
2694 {
2695 replace_regs (PATTERN (p), reg_map, nregs, 0);
2696 replace_regs (REG_NOTES (p), reg_map, nregs, 0);
2697 INSN_CODE (p) = -1;
2698 }
2699
2700 /* Clean up. */
2701 free (reg_map);
2702 free (already_moved);
2703 }
2704
2705
2706 static void
2707 loop_movables_add (struct loop_movables *movables, struct movable *m)
2708 {
2709 if (movables->head == 0)
2710 movables->head = m;
2711 else
2712 movables->last->next = m;
2713 movables->last = m;
2714 }
2715
2716
2717 static void
2718 loop_movables_free (struct loop_movables *movables)
2719 {
2720 struct movable *m;
2721 struct movable *m_next;
2722
2723 for (m = movables->head; m; m = m_next)
2724 {
2725 m_next = m->next;
2726 free (m);
2727 }
2728 }
2729 \f
2730 #if 0
2731 /* Scan X and replace the address of any MEM in it with ADDR.
2732 REG is the address that MEM should have before the replacement. */
2733
2734 static void
2735 replace_call_address (rtx x, rtx reg, rtx addr)
2736 {
2737 enum rtx_code code;
2738 int i;
2739 const char *fmt;
2740
2741 if (x == 0)
2742 return;
2743 code = GET_CODE (x);
2744 switch (code)
2745 {
2746 case PC:
2747 case CC0:
2748 case CONST_INT:
2749 case CONST_DOUBLE:
2750 case CONST:
2751 case SYMBOL_REF:
2752 case LABEL_REF:
2753 case REG:
2754 return;
2755
2756 case SET:
2757 /* Short cut for very common case. */
2758 replace_call_address (XEXP (x, 1), reg, addr);
2759 return;
2760
2761 case CALL:
2762 /* Short cut for very common case. */
2763 replace_call_address (XEXP (x, 0), reg, addr);
2764 return;
2765
2766 case MEM:
2767 /* If this MEM uses a reg other than the one we expected,
2768 something is wrong. */
2769 gcc_assert (XEXP (x, 0) == reg);
2770 XEXP (x, 0) = addr;
2771 return;
2772
2773 default:
2774 break;
2775 }
2776
2777 fmt = GET_RTX_FORMAT (code);
2778 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2779 {
2780 if (fmt[i] == 'e')
2781 replace_call_address (XEXP (x, i), reg, addr);
2782 else if (fmt[i] == 'E')
2783 {
2784 int j;
2785 for (j = 0; j < XVECLEN (x, i); j++)
2786 replace_call_address (XVECEXP (x, i, j), reg, addr);
2787 }
2788 }
2789 }
2790 #endif
2791 \f
2792 /* Return the number of memory refs to addresses that vary
2793 in the rtx X. */
2794
2795 static int
2796 count_nonfixed_reads (const struct loop *loop, rtx x)
2797 {
2798 enum rtx_code code;
2799 int i;
2800 const char *fmt;
2801 int value;
2802
2803 if (x == 0)
2804 return 0;
2805
2806 code = GET_CODE (x);
2807 switch (code)
2808 {
2809 case PC:
2810 case CC0:
2811 case CONST_INT:
2812 case CONST_DOUBLE:
2813 case CONST:
2814 case SYMBOL_REF:
2815 case LABEL_REF:
2816 case REG:
2817 return 0;
2818
2819 case MEM:
2820 return ((loop_invariant_p (loop, XEXP (x, 0)) != 1)
2821 + count_nonfixed_reads (loop, XEXP (x, 0)));
2822
2823 default:
2824 break;
2825 }
2826
2827 value = 0;
2828 fmt = GET_RTX_FORMAT (code);
2829 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2830 {
2831 if (fmt[i] == 'e')
2832 value += count_nonfixed_reads (loop, XEXP (x, i));
2833 if (fmt[i] == 'E')
2834 {
2835 int j;
2836 for (j = 0; j < XVECLEN (x, i); j++)
2837 value += count_nonfixed_reads (loop, XVECEXP (x, i, j));
2838 }
2839 }
2840 return value;
2841 }
2842 \f
2843 /* Scan a loop setting the elements `loops_enclosed',
2844 `has_call', `has_nonconst_call', `has_volatile', `has_tablejump',
2845 `unknown_address_altered', `unknown_constant_address_altered', and
2846 `num_mem_sets' in LOOP. Also, fill in the array `mems' and the
2847 list `store_mems' in LOOP. */
2848
2849 static void
2850 prescan_loop (struct loop *loop)
2851 {
2852 int level = 1;
2853 rtx insn;
2854 struct loop_info *loop_info = LOOP_INFO (loop);
2855 rtx start = loop->start;
2856 rtx end = loop->end;
2857 /* The label after END. Jumping here is just like falling off the
2858 end of the loop. We use next_nonnote_insn instead of next_label
2859 as a hedge against the (pathological) case where some actual insn
2860 might end up between the two. */
2861 rtx exit_target = next_nonnote_insn (end);
2862
2863 loop_info->has_indirect_jump = indirect_jump_in_function;
2864 loop_info->pre_header_has_call = 0;
2865 loop_info->has_call = 0;
2866 loop_info->has_nonconst_call = 0;
2867 loop_info->has_prefetch = 0;
2868 loop_info->has_volatile = 0;
2869 loop_info->has_tablejump = 0;
2870 loop_info->has_multiple_exit_targets = 0;
2871 loop->level = 1;
2872
2873 loop_info->unknown_address_altered = 0;
2874 loop_info->unknown_constant_address_altered = 0;
2875 loop_info->store_mems = NULL_RTX;
2876 loop_info->first_loop_store_insn = NULL_RTX;
2877 loop_info->mems_idx = 0;
2878 loop_info->num_mem_sets = 0;
2879
2880 for (insn = start; insn && !LABEL_P (insn);
2881 insn = PREV_INSN (insn))
2882 {
2883 if (CALL_P (insn))
2884 {
2885 loop_info->pre_header_has_call = 1;
2886 break;
2887 }
2888 }
2889
2890 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2891 insn = NEXT_INSN (insn))
2892 {
2893 switch (GET_CODE (insn))
2894 {
2895 case NOTE:
2896 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
2897 {
2898 ++level;
2899 /* Count number of loops contained in this one. */
2900 loop->level++;
2901 }
2902 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
2903 --level;
2904 break;
2905
2906 case CALL_INSN:
2907 if (! CONST_OR_PURE_CALL_P (insn))
2908 {
2909 loop_info->unknown_address_altered = 1;
2910 loop_info->has_nonconst_call = 1;
2911 }
2912 else if (pure_call_p (insn))
2913 loop_info->has_nonconst_call = 1;
2914 loop_info->has_call = 1;
2915 if (can_throw_internal (insn))
2916 loop_info->has_multiple_exit_targets = 1;
2917 break;
2918
2919 case JUMP_INSN:
2920 if (! loop_info->has_multiple_exit_targets)
2921 {
2922 rtx set = pc_set (insn);
2923
2924 if (set)
2925 {
2926 rtx src = SET_SRC (set);
2927 rtx label1, label2;
2928
2929 if (GET_CODE (src) == IF_THEN_ELSE)
2930 {
2931 label1 = XEXP (src, 1);
2932 label2 = XEXP (src, 2);
2933 }
2934 else
2935 {
2936 label1 = src;
2937 label2 = NULL_RTX;
2938 }
2939
2940 do
2941 {
2942 if (label1 && label1 != pc_rtx)
2943 {
2944 if (GET_CODE (label1) != LABEL_REF)
2945 {
2946 /* Something tricky. */
2947 loop_info->has_multiple_exit_targets = 1;
2948 break;
2949 }
2950 else if (XEXP (label1, 0) != exit_target
2951 && LABEL_OUTSIDE_LOOP_P (label1))
2952 {
2953 /* A jump outside the current loop. */
2954 loop_info->has_multiple_exit_targets = 1;
2955 break;
2956 }
2957 }
2958
2959 label1 = label2;
2960 label2 = NULL_RTX;
2961 }
2962 while (label1);
2963 }
2964 else
2965 {
2966 /* A return, or something tricky. */
2967 loop_info->has_multiple_exit_targets = 1;
2968 }
2969 }
2970 /* Fall through. */
2971
2972 case INSN:
2973 if (volatile_refs_p (PATTERN (insn)))
2974 loop_info->has_volatile = 1;
2975
2976 if (JUMP_P (insn)
2977 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
2978 || GET_CODE (PATTERN (insn)) == ADDR_VEC))
2979 loop_info->has_tablejump = 1;
2980
2981 note_stores (PATTERN (insn), note_addr_stored, loop_info);
2982 if (! loop_info->first_loop_store_insn && loop_info->store_mems)
2983 loop_info->first_loop_store_insn = insn;
2984
2985 if (flag_non_call_exceptions && can_throw_internal (insn))
2986 loop_info->has_multiple_exit_targets = 1;
2987 break;
2988
2989 default:
2990 break;
2991 }
2992 }
2993
2994 /* Now, rescan the loop, setting up the LOOP_MEMS array. */
2995 if (/* An exception thrown by a called function might land us
2996 anywhere. */
2997 ! loop_info->has_nonconst_call
2998 /* We don't want loads for MEMs moved to a location before the
2999 one at which their stack memory becomes allocated. (Note
3000 that this is not a problem for malloc, etc., since those
3001 require actual function calls. */
3002 && ! current_function_calls_alloca
3003 /* There are ways to leave the loop other than falling off the
3004 end. */
3005 && ! loop_info->has_multiple_exit_targets)
3006 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
3007 insn = NEXT_INSN (insn))
3008 for_each_rtx (&insn, insert_loop_mem, loop_info);
3009
3010 /* BLKmode MEMs are added to LOOP_STORE_MEM as necessary so
3011 that loop_invariant_p and load_mems can use true_dependence
3012 to determine what is really clobbered. */
3013 if (loop_info->unknown_address_altered)
3014 {
3015 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
3016
3017 loop_info->store_mems
3018 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
3019 }
3020 if (loop_info->unknown_constant_address_altered)
3021 {
3022 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
3023 MEM_READONLY_P (mem) = 1;
3024 loop_info->store_mems
3025 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
3026 }
3027 }
3028 \f
3029 /* Invalidate all loops containing LABEL. */
3030
3031 static void
3032 invalidate_loops_containing_label (rtx label)
3033 {
3034 struct loop *loop;
3035 for (loop = uid_loop[INSN_UID (label)]; loop; loop = loop->outer)
3036 loop->invalid = 1;
3037 }
3038
3039 /* Scan the function looking for loops. Record the start and end of each loop.
3040 Also mark as invalid loops any loops that contain a setjmp or are branched
3041 to from outside the loop. */
3042
3043 static void
3044 find_and_verify_loops (rtx f, struct loops *loops)
3045 {
3046 rtx insn;
3047 rtx label;
3048 int num_loops;
3049 struct loop *current_loop;
3050 struct loop *next_loop;
3051 struct loop *loop;
3052
3053 num_loops = loops->num;
3054
3055 compute_luids (f, NULL_RTX, 0);
3056
3057 /* If there are jumps to undefined labels,
3058 treat them as jumps out of any/all loops.
3059 This also avoids writing past end of tables when there are no loops. */
3060 uid_loop[0] = NULL;
3061
3062 /* Find boundaries of loops, mark which loops are contained within
3063 loops, and invalidate loops that have setjmp. */
3064
3065 num_loops = 0;
3066 current_loop = NULL;
3067 for (insn = f; insn; insn = NEXT_INSN (insn))
3068 {
3069 if (NOTE_P (insn))
3070 switch (NOTE_LINE_NUMBER (insn))
3071 {
3072 case NOTE_INSN_LOOP_BEG:
3073 next_loop = loops->array + num_loops;
3074 next_loop->num = num_loops;
3075 num_loops++;
3076 next_loop->start = insn;
3077 next_loop->outer = current_loop;
3078 current_loop = next_loop;
3079 break;
3080
3081 case NOTE_INSN_LOOP_END:
3082 gcc_assert (current_loop);
3083
3084 current_loop->end = insn;
3085 current_loop = current_loop->outer;
3086 break;
3087
3088 default:
3089 break;
3090 }
3091
3092 if (CALL_P (insn)
3093 && find_reg_note (insn, REG_SETJMP, NULL))
3094 {
3095 /* In this case, we must invalidate our current loop and any
3096 enclosing loop. */
3097 for (loop = current_loop; loop; loop = loop->outer)
3098 {
3099 loop->invalid = 1;
3100 if (loop_dump_stream)
3101 fprintf (loop_dump_stream,
3102 "\nLoop at %d ignored due to setjmp.\n",
3103 INSN_UID (loop->start));
3104 }
3105 }
3106
3107 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
3108 enclosing loop, but this doesn't matter. */
3109 uid_loop[INSN_UID (insn)] = current_loop;
3110 }
3111
3112 /* Any loop containing a label used in an initializer must be invalidated,
3113 because it can be jumped into from anywhere. */
3114 for (label = forced_labels; label; label = XEXP (label, 1))
3115 invalidate_loops_containing_label (XEXP (label, 0));
3116
3117 /* Any loop containing a label used for an exception handler must be
3118 invalidated, because it can be jumped into from anywhere. */
3119 for_each_eh_label (invalidate_loops_containing_label);
3120
3121 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
3122 loop that it is not contained within, that loop is marked invalid.
3123 If any INSN or CALL_INSN uses a label's address, then the loop containing
3124 that label is marked invalid, because it could be jumped into from
3125 anywhere.
3126
3127 Also look for blocks of code ending in an unconditional branch that
3128 exits the loop. If such a block is surrounded by a conditional
3129 branch around the block, move the block elsewhere (see below) and
3130 invert the jump to point to the code block. This may eliminate a
3131 label in our loop and will simplify processing by both us and a
3132 possible second cse pass. */
3133
3134 for (insn = f; insn; insn = NEXT_INSN (insn))
3135 if (INSN_P (insn))
3136 {
3137 struct loop *this_loop = uid_loop[INSN_UID (insn)];
3138
3139 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
3140 {
3141 rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
3142 if (note)
3143 invalidate_loops_containing_label (XEXP (note, 0));
3144 }
3145
3146 if (!JUMP_P (insn))
3147 continue;
3148
3149 mark_loop_jump (PATTERN (insn), this_loop);
3150
3151 /* See if this is an unconditional branch outside the loop. */
3152 if (this_loop
3153 && (GET_CODE (PATTERN (insn)) == RETURN
3154 || (any_uncondjump_p (insn)
3155 && onlyjump_p (insn)
3156 && (uid_loop[INSN_UID (JUMP_LABEL (insn))]
3157 != this_loop)))
3158 && get_max_uid () < max_uid_for_loop)
3159 {
3160 rtx p;
3161 rtx our_next = next_real_insn (insn);
3162 rtx last_insn_to_move = NEXT_INSN (insn);
3163 struct loop *dest_loop;
3164 struct loop *outer_loop = NULL;
3165
3166 /* Go backwards until we reach the start of the loop, a label,
3167 or a JUMP_INSN. */
3168 for (p = PREV_INSN (insn);
3169 !LABEL_P (p)
3170 && ! (NOTE_P (p)
3171 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
3172 && !JUMP_P (p);
3173 p = PREV_INSN (p))
3174 ;
3175
3176 /* Check for the case where we have a jump to an inner nested
3177 loop, and do not perform the optimization in that case. */
3178
3179 if (JUMP_LABEL (insn))
3180 {
3181 dest_loop = uid_loop[INSN_UID (JUMP_LABEL (insn))];
3182 if (dest_loop)
3183 {
3184 for (outer_loop = dest_loop; outer_loop;
3185 outer_loop = outer_loop->outer)
3186 if (outer_loop == this_loop)
3187 break;
3188 }
3189 }
3190
3191 /* Make sure that the target of P is within the current loop. */
3192
3193 if (JUMP_P (p) && JUMP_LABEL (p)
3194 && uid_loop[INSN_UID (JUMP_LABEL (p))] != this_loop)
3195 outer_loop = this_loop;
3196
3197 /* If we stopped on a JUMP_INSN to the next insn after INSN,
3198 we have a block of code to try to move.
3199
3200 We look backward and then forward from the target of INSN
3201 to find a BARRIER at the same loop depth as the target.
3202 If we find such a BARRIER, we make a new label for the start
3203 of the block, invert the jump in P and point it to that label,
3204 and move the block of code to the spot we found. */
3205
3206 if (! outer_loop
3207 && JUMP_P (p)
3208 && JUMP_LABEL (p) != 0
3209 /* Just ignore jumps to labels that were never emitted.
3210 These always indicate compilation errors. */
3211 && INSN_UID (JUMP_LABEL (p)) != 0
3212 && any_condjump_p (p) && onlyjump_p (p)
3213 && next_real_insn (JUMP_LABEL (p)) == our_next
3214 /* If it's not safe to move the sequence, then we
3215 mustn't try. */
3216 && insns_safe_to_move_p (p, NEXT_INSN (insn),
3217 &last_insn_to_move))
3218 {
3219 rtx target
3220 = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn ();
3221 struct loop *target_loop = uid_loop[INSN_UID (target)];
3222 rtx loc, loc2;
3223 rtx tmp;
3224
3225 /* Search for possible garbage past the conditional jumps
3226 and look for the last barrier. */
3227 for (tmp = last_insn_to_move;
3228 tmp && !LABEL_P (tmp); tmp = NEXT_INSN (tmp))
3229 if (BARRIER_P (tmp))
3230 last_insn_to_move = tmp;
3231
3232 for (loc = target; loc; loc = PREV_INSN (loc))
3233 if (BARRIER_P (loc)
3234 /* Don't move things inside a tablejump. */
3235 && ((loc2 = next_nonnote_insn (loc)) == 0
3236 || !LABEL_P (loc2)
3237 || (loc2 = next_nonnote_insn (loc2)) == 0
3238 || !JUMP_P (loc2)
3239 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
3240 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
3241 && uid_loop[INSN_UID (loc)] == target_loop)
3242 break;
3243
3244 if (loc == 0)
3245 for (loc = target; loc; loc = NEXT_INSN (loc))
3246 if (BARRIER_P (loc)
3247 /* Don't move things inside a tablejump. */
3248 && ((loc2 = next_nonnote_insn (loc)) == 0
3249 || !LABEL_P (loc2)
3250 || (loc2 = next_nonnote_insn (loc2)) == 0
3251 || !JUMP_P (loc2)
3252 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
3253 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
3254 && uid_loop[INSN_UID (loc)] == target_loop)
3255 break;
3256
3257 if (loc)
3258 {
3259 rtx cond_label = JUMP_LABEL (p);
3260 rtx new_label = get_label_after (p);
3261
3262 /* Ensure our label doesn't go away. */
3263 LABEL_NUSES (cond_label)++;
3264
3265 /* Verify that uid_loop is large enough and that
3266 we can invert P. */
3267 if (invert_jump (p, new_label, 1))
3268 {
3269 rtx q, r;
3270 bool only_notes;
3271
3272 /* If no suitable BARRIER was found, create a suitable
3273 one before TARGET. Since TARGET is a fall through
3274 path, we'll need to insert a jump around our block
3275 and add a BARRIER before TARGET.
3276
3277 This creates an extra unconditional jump outside
3278 the loop. However, the benefits of removing rarely
3279 executed instructions from inside the loop usually
3280 outweighs the cost of the extra unconditional jump
3281 outside the loop. */
3282 if (loc == 0)
3283 {
3284 rtx temp;
3285
3286 temp = gen_jump (JUMP_LABEL (insn));
3287 temp = emit_jump_insn_before (temp, target);
3288 JUMP_LABEL (temp) = JUMP_LABEL (insn);
3289 LABEL_NUSES (JUMP_LABEL (insn))++;
3290 loc = emit_barrier_before (target);
3291 }
3292
3293 /* Include the BARRIER after INSN and copy the
3294 block after LOC. */
3295 only_notes = squeeze_notes (&new_label,
3296 &last_insn_to_move);
3297 gcc_assert (!only_notes);
3298
3299 reorder_insns (new_label, last_insn_to_move, loc);
3300
3301 /* All those insns are now in TARGET_LOOP. */
3302 for (q = new_label;
3303 q != NEXT_INSN (last_insn_to_move);
3304 q = NEXT_INSN (q))
3305 uid_loop[INSN_UID (q)] = target_loop;
3306
3307 /* The label jumped to by INSN is no longer a loop
3308 exit. Unless INSN does not have a label (e.g.,
3309 it is a RETURN insn), search loop->exit_labels
3310 to find its label_ref, and remove it. Also turn
3311 off LABEL_OUTSIDE_LOOP_P bit. */
3312 if (JUMP_LABEL (insn))
3313 {
3314 for (q = 0, r = this_loop->exit_labels;
3315 r;
3316 q = r, r = LABEL_NEXTREF (r))
3317 if (XEXP (r, 0) == JUMP_LABEL (insn))
3318 {
3319 LABEL_OUTSIDE_LOOP_P (r) = 0;
3320 if (q)
3321 LABEL_NEXTREF (q) = LABEL_NEXTREF (r);
3322 else
3323 this_loop->exit_labels = LABEL_NEXTREF (r);
3324 break;
3325 }
3326
3327 for (loop = this_loop; loop && loop != target_loop;
3328 loop = loop->outer)
3329 loop->exit_count--;
3330
3331 /* If we didn't find it, then something is
3332 wrong. */
3333 gcc_assert (r);
3334 }
3335
3336 /* P is now a jump outside the loop, so it must be put
3337 in loop->exit_labels, and marked as such.
3338 The easiest way to do this is to just call
3339 mark_loop_jump again for P. */
3340 mark_loop_jump (PATTERN (p), this_loop);
3341
3342 /* If INSN now jumps to the insn after it,
3343 delete INSN. */
3344 if (JUMP_LABEL (insn) != 0
3345 && (next_real_insn (JUMP_LABEL (insn))
3346 == next_real_insn (insn)))
3347 delete_related_insns (insn);
3348 }
3349
3350 /* Continue the loop after where the conditional
3351 branch used to jump, since the only branch insn
3352 in the block (if it still remains) is an inter-loop
3353 branch and hence needs no processing. */
3354 insn = NEXT_INSN (cond_label);
3355
3356 if (--LABEL_NUSES (cond_label) == 0)
3357 delete_related_insns (cond_label);
3358
3359 /* This loop will be continued with NEXT_INSN (insn). */
3360 insn = PREV_INSN (insn);
3361 }
3362 }
3363 }
3364 }
3365 }
3366
3367 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
3368 loops it is contained in, mark the target loop invalid.
3369
3370 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
3371
3372 static void
3373 mark_loop_jump (rtx x, struct loop *loop)
3374 {
3375 struct loop *dest_loop;
3376 struct loop *outer_loop;
3377 int i;
3378
3379 switch (GET_CODE (x))
3380 {
3381 case PC:
3382 case USE:
3383 case CLOBBER:
3384 case REG:
3385 case MEM:
3386 case CONST_INT:
3387 case CONST_DOUBLE:
3388 case RETURN:
3389 return;
3390
3391 case CONST:
3392 /* There could be a label reference in here. */
3393 mark_loop_jump (XEXP (x, 0), loop);
3394 return;
3395
3396 case PLUS:
3397 case MINUS:
3398 case MULT:
3399 mark_loop_jump (XEXP (x, 0), loop);
3400 mark_loop_jump (XEXP (x, 1), loop);
3401 return;
3402
3403 case LO_SUM:
3404 /* This may refer to a LABEL_REF or SYMBOL_REF. */
3405 mark_loop_jump (XEXP (x, 1), loop);
3406 return;
3407
3408 case SIGN_EXTEND:
3409 case ZERO_EXTEND:
3410 mark_loop_jump (XEXP (x, 0), loop);
3411 return;
3412
3413 case LABEL_REF:
3414 dest_loop = uid_loop[INSN_UID (XEXP (x, 0))];
3415
3416 /* Link together all labels that branch outside the loop. This
3417 is used by final_[bg]iv_value and the loop unrolling code. Also
3418 mark this LABEL_REF so we know that this branch should predict
3419 false. */
3420
3421 /* A check to make sure the label is not in an inner nested loop,
3422 since this does not count as a loop exit. */
3423 if (dest_loop)
3424 {
3425 for (outer_loop = dest_loop; outer_loop;
3426 outer_loop = outer_loop->outer)
3427 if (outer_loop == loop)
3428 break;
3429 }
3430 else
3431 outer_loop = NULL;
3432
3433 if (loop && ! outer_loop)
3434 {
3435 LABEL_OUTSIDE_LOOP_P (x) = 1;
3436 LABEL_NEXTREF (x) = loop->exit_labels;
3437 loop->exit_labels = x;
3438
3439 for (outer_loop = loop;
3440 outer_loop && outer_loop != dest_loop;
3441 outer_loop = outer_loop->outer)
3442 outer_loop->exit_count++;
3443 }
3444
3445 /* If this is inside a loop, but not in the current loop or one enclosed
3446 by it, it invalidates at least one loop. */
3447
3448 if (! dest_loop)
3449 return;
3450
3451 /* We must invalidate every nested loop containing the target of this
3452 label, except those that also contain the jump insn. */
3453
3454 for (; dest_loop; dest_loop = dest_loop->outer)
3455 {
3456 /* Stop when we reach a loop that also contains the jump insn. */
3457 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
3458 if (dest_loop == outer_loop)
3459 return;
3460
3461 /* If we get here, we know we need to invalidate a loop. */
3462 if (loop_dump_stream && ! dest_loop->invalid)
3463 fprintf (loop_dump_stream,
3464 "\nLoop at %d ignored due to multiple entry points.\n",
3465 INSN_UID (dest_loop->start));
3466
3467 dest_loop->invalid = 1;
3468 }
3469 return;
3470
3471 case SET:
3472 /* If this is not setting pc, ignore. */
3473 if (SET_DEST (x) == pc_rtx)
3474 mark_loop_jump (SET_SRC (x), loop);
3475 return;
3476
3477 case IF_THEN_ELSE:
3478 mark_loop_jump (XEXP (x, 1), loop);
3479 mark_loop_jump (XEXP (x, 2), loop);
3480 return;
3481
3482 case PARALLEL:
3483 case ADDR_VEC:
3484 for (i = 0; i < XVECLEN (x, 0); i++)
3485 mark_loop_jump (XVECEXP (x, 0, i), loop);
3486 return;
3487
3488 case ADDR_DIFF_VEC:
3489 for (i = 0; i < XVECLEN (x, 1); i++)
3490 mark_loop_jump (XVECEXP (x, 1, i), loop);
3491 return;
3492
3493 default:
3494 /* Strictly speaking this is not a jump into the loop, only a possible
3495 jump out of the loop. However, we have no way to link the destination
3496 of this jump onto the list of exit labels. To be safe we mark this
3497 loop and any containing loops as invalid. */
3498 if (loop)
3499 {
3500 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
3501 {
3502 if (loop_dump_stream && ! outer_loop->invalid)
3503 fprintf (loop_dump_stream,
3504 "\nLoop at %d ignored due to unknown exit jump.\n",
3505 INSN_UID (outer_loop->start));
3506 outer_loop->invalid = 1;
3507 }
3508 }
3509 return;
3510 }
3511 }
3512 \f
3513 /* Return nonzero if there is a label in the range from
3514 insn INSN to and including the insn whose luid is END
3515 INSN must have an assigned luid (i.e., it must not have
3516 been previously created by loop.c). */
3517
3518 static int
3519 labels_in_range_p (rtx insn, int end)
3520 {
3521 while (insn && INSN_LUID (insn) <= end)
3522 {
3523 if (LABEL_P (insn))
3524 return 1;
3525 insn = NEXT_INSN (insn);
3526 }
3527
3528 return 0;
3529 }
3530
3531 /* Record that a memory reference X is being set. */
3532
3533 static void
3534 note_addr_stored (rtx x, rtx y ATTRIBUTE_UNUSED,
3535 void *data ATTRIBUTE_UNUSED)
3536 {
3537 struct loop_info *loop_info = data;
3538
3539 if (x == 0 || !MEM_P (x))
3540 return;
3541
3542 /* Count number of memory writes.
3543 This affects heuristics in strength_reduce. */
3544 loop_info->num_mem_sets++;
3545
3546 /* BLKmode MEM means all memory is clobbered. */
3547 if (GET_MODE (x) == BLKmode)
3548 {
3549 if (MEM_READONLY_P (x))
3550 loop_info->unknown_constant_address_altered = 1;
3551 else
3552 loop_info->unknown_address_altered = 1;
3553
3554 return;
3555 }
3556
3557 loop_info->store_mems = gen_rtx_EXPR_LIST (VOIDmode, x,
3558 loop_info->store_mems);
3559 }
3560
3561 /* X is a value modified by an INSN that references a biv inside a loop
3562 exit test (i.e., X is somehow related to the value of the biv). If X
3563 is a pseudo that is used more than once, then the biv is (effectively)
3564 used more than once. DATA is a pointer to a loop_regs structure. */
3565
3566 static void
3567 note_set_pseudo_multiple_uses (rtx x, rtx y ATTRIBUTE_UNUSED, void *data)
3568 {
3569 struct loop_regs *regs = (struct loop_regs *) data;
3570
3571 if (x == 0)
3572 return;
3573
3574 while (GET_CODE (x) == STRICT_LOW_PART
3575 || GET_CODE (x) == SIGN_EXTRACT
3576 || GET_CODE (x) == ZERO_EXTRACT
3577 || GET_CODE (x) == SUBREG)
3578 x = XEXP (x, 0);
3579
3580 if (!REG_P (x) || REGNO (x) < FIRST_PSEUDO_REGISTER)
3581 return;
3582
3583 /* If we do not have usage information, or if we know the register
3584 is used more than once, note that fact for check_dbra_loop. */
3585 if (REGNO (x) >= max_reg_before_loop
3586 || ! regs->array[REGNO (x)].single_usage
3587 || regs->array[REGNO (x)].single_usage == const0_rtx)
3588 regs->multiple_uses = 1;
3589 }
3590 \f
3591 /* Return nonzero if the rtx X is invariant over the current loop.
3592
3593 The value is 2 if we refer to something only conditionally invariant.
3594
3595 A memory ref is invariant if it is not volatile and does not conflict
3596 with anything stored in `loop_info->store_mems'. */
3597
3598 static int
3599 loop_invariant_p (const struct loop *loop, rtx x)
3600 {
3601 struct loop_info *loop_info = LOOP_INFO (loop);
3602 struct loop_regs *regs = LOOP_REGS (loop);
3603 int i;
3604 enum rtx_code code;
3605 const char *fmt;
3606 int conditional = 0;
3607 rtx mem_list_entry;
3608
3609 if (x == 0)
3610 return 1;
3611 code = GET_CODE (x);
3612 switch (code)
3613 {
3614 case CONST_INT:
3615 case CONST_DOUBLE:
3616 case SYMBOL_REF:
3617 case CONST:
3618 return 1;
3619
3620 case LABEL_REF:
3621 return 1;
3622
3623 case PC:
3624 case CC0:
3625 case UNSPEC_VOLATILE:
3626 return 0;
3627
3628 case REG:
3629 if ((x == frame_pointer_rtx || x == hard_frame_pointer_rtx
3630 || x == arg_pointer_rtx || x == pic_offset_table_rtx)
3631 && ! current_function_has_nonlocal_goto)
3632 return 1;
3633
3634 if (LOOP_INFO (loop)->has_call
3635 && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)])
3636 return 0;
3637
3638 /* Out-of-range regs can occur when we are called from unrolling.
3639 These registers created by the unroller are set in the loop,
3640 hence are never invariant.
3641 Other out-of-range regs can be generated by load_mems; those that
3642 are written to in the loop are not invariant, while those that are
3643 not written to are invariant. It would be easy for load_mems
3644 to set n_times_set correctly for these registers, however, there
3645 is no easy way to distinguish them from registers created by the
3646 unroller. */
3647
3648 if (REGNO (x) >= (unsigned) regs->num)
3649 return 0;
3650
3651 if (regs->array[REGNO (x)].set_in_loop < 0)
3652 return 2;
3653
3654 return regs->array[REGNO (x)].set_in_loop == 0;
3655
3656 case MEM:
3657 /* Volatile memory references must be rejected. Do this before
3658 checking for read-only items, so that volatile read-only items
3659 will be rejected also. */
3660 if (MEM_VOLATILE_P (x))
3661 return 0;
3662
3663 /* See if there is any dependence between a store and this load. */
3664 mem_list_entry = loop_info->store_mems;
3665 while (mem_list_entry)
3666 {
3667 if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
3668 x, rtx_varies_p))
3669 return 0;
3670
3671 mem_list_entry = XEXP (mem_list_entry, 1);
3672 }
3673
3674 /* It's not invalidated by a store in memory
3675 but we must still verify the address is invariant. */
3676 break;
3677
3678 case ASM_OPERANDS:
3679 /* Don't mess with insns declared volatile. */
3680 if (MEM_VOLATILE_P (x))
3681 return 0;
3682 break;
3683
3684 default:
3685 break;
3686 }
3687
3688 fmt = GET_RTX_FORMAT (code);
3689 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3690 {
3691 if (fmt[i] == 'e')
3692 {
3693 int tem = loop_invariant_p (loop, XEXP (x, i));
3694 if (tem == 0)
3695 return 0;
3696 if (tem == 2)
3697 conditional = 1;
3698 }
3699 else if (fmt[i] == 'E')
3700 {
3701 int j;
3702 for (j = 0; j < XVECLEN (x, i); j++)
3703 {
3704 int tem = loop_invariant_p (loop, XVECEXP (x, i, j));
3705 if (tem == 0)
3706 return 0;
3707 if (tem == 2)
3708 conditional = 1;
3709 }
3710
3711 }
3712 }
3713
3714 return 1 + conditional;
3715 }
3716 \f
3717 /* Return nonzero if all the insns in the loop that set REG
3718 are INSN and the immediately following insns,
3719 and if each of those insns sets REG in an invariant way
3720 (not counting uses of REG in them).
3721
3722 The value is 2 if some of these insns are only conditionally invariant.
3723
3724 We assume that INSN itself is the first set of REG
3725 and that its source is invariant. */
3726
3727 static int
3728 consec_sets_invariant_p (const struct loop *loop, rtx reg, int n_sets,
3729 rtx insn)
3730 {
3731 struct loop_regs *regs = LOOP_REGS (loop);
3732 rtx p = insn;
3733 unsigned int regno = REGNO (reg);
3734 rtx temp;
3735 /* Number of sets we have to insist on finding after INSN. */
3736 int count = n_sets - 1;
3737 int old = regs->array[regno].set_in_loop;
3738 int value = 0;
3739 int this;
3740
3741 /* If N_SETS hit the limit, we can't rely on its value. */
3742 if (n_sets == 127)
3743 return 0;
3744
3745 regs->array[regno].set_in_loop = 0;
3746
3747 while (count > 0)
3748 {
3749 enum rtx_code code;
3750 rtx set;
3751
3752 p = NEXT_INSN (p);
3753 code = GET_CODE (p);
3754
3755 /* If library call, skip to end of it. */
3756 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3757 p = XEXP (temp, 0);
3758
3759 this = 0;
3760 if (code == INSN
3761 && (set = single_set (p))
3762 && REG_P (SET_DEST (set))
3763 && REGNO (SET_DEST (set)) == regno)
3764 {
3765 this = loop_invariant_p (loop, SET_SRC (set));
3766 if (this != 0)
3767 value |= this;
3768 else if ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX)))
3769 {
3770 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
3771 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
3772 notes are OK. */
3773 this = (CONSTANT_P (XEXP (temp, 0))
3774 || (find_reg_note (p, REG_RETVAL, NULL_RTX)
3775 && loop_invariant_p (loop, XEXP (temp, 0))));
3776 if (this != 0)
3777 value |= this;
3778 }
3779 }
3780 if (this != 0)
3781 count--;
3782 else if (code != NOTE)
3783 {
3784 regs->array[regno].set_in_loop = old;
3785 return 0;
3786 }
3787 }
3788
3789 regs->array[regno].set_in_loop = old;
3790 /* If loop_invariant_p ever returned 2, we return 2. */
3791 return 1 + (value & 2);
3792 }
3793 \f
3794 /* Look at all uses (not sets) of registers in X. For each, if it is
3795 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
3796 a different insn, set USAGE[REGNO] to const0_rtx. */
3797
3798 static void
3799 find_single_use_in_loop (struct loop_regs *regs, rtx insn, rtx x)
3800 {
3801 enum rtx_code code = GET_CODE (x);
3802 const char *fmt = GET_RTX_FORMAT (code);
3803 int i, j;
3804
3805 if (code == REG)
3806 regs->array[REGNO (x)].single_usage
3807 = (regs->array[REGNO (x)].single_usage != 0
3808 && regs->array[REGNO (x)].single_usage != insn)
3809 ? const0_rtx : insn;
3810
3811 else if (code == SET)
3812 {
3813 /* Don't count SET_DEST if it is a REG; otherwise count things
3814 in SET_DEST because if a register is partially modified, it won't
3815 show up as a potential movable so we don't care how USAGE is set
3816 for it. */
3817 if (!REG_P (SET_DEST (x)))
3818 find_single_use_in_loop (regs, insn, SET_DEST (x));
3819 find_single_use_in_loop (regs, insn, SET_SRC (x));
3820 }
3821 else
3822 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3823 {
3824 if (fmt[i] == 'e' && XEXP (x, i) != 0)
3825 find_single_use_in_loop (regs, insn, XEXP (x, i));
3826 else if (fmt[i] == 'E')
3827 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3828 find_single_use_in_loop (regs, insn, XVECEXP (x, i, j));
3829 }
3830 }
3831 \f
3832 /* Count and record any set in X which is contained in INSN. Update
3833 REGS->array[I].MAY_NOT_OPTIMIZE and LAST_SET for any register I set
3834 in X. */
3835
3836 static void
3837 count_one_set (struct loop_regs *regs, rtx insn, rtx x, rtx *last_set)
3838 {
3839 if (GET_CODE (x) == CLOBBER && REG_P (XEXP (x, 0)))
3840 /* Don't move a reg that has an explicit clobber.
3841 It's not worth the pain to try to do it correctly. */
3842 regs->array[REGNO (XEXP (x, 0))].may_not_optimize = 1;
3843
3844 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
3845 {
3846 rtx dest = SET_DEST (x);
3847 while (GET_CODE (dest) == SUBREG
3848 || GET_CODE (dest) == ZERO_EXTRACT
3849 || GET_CODE (dest) == STRICT_LOW_PART)
3850 dest = XEXP (dest, 0);
3851 if (REG_P (dest))
3852 {
3853 int i;
3854 int regno = REGNO (dest);
3855 for (i = 0; i < LOOP_REGNO_NREGS (regno, dest); i++)
3856 {
3857 /* If this is the first setting of this reg
3858 in current basic block, and it was set before,
3859 it must be set in two basic blocks, so it cannot
3860 be moved out of the loop. */
3861 if (regs->array[regno].set_in_loop > 0
3862 && last_set[regno] == 0)
3863 regs->array[regno+i].may_not_optimize = 1;
3864 /* If this is not first setting in current basic block,
3865 see if reg was used in between previous one and this.
3866 If so, neither one can be moved. */
3867 if (last_set[regno] != 0
3868 && reg_used_between_p (dest, last_set[regno], insn))
3869 regs->array[regno+i].may_not_optimize = 1;
3870 if (regs->array[regno+i].set_in_loop < 127)
3871 ++regs->array[regno+i].set_in_loop;
3872 last_set[regno+i] = insn;
3873 }
3874 }
3875 }
3876 }
3877 \f
3878 /* Given a loop that is bounded by LOOP->START and LOOP->END and that
3879 is entered at LOOP->SCAN_START, return 1 if the register set in SET
3880 contained in insn INSN is used by any insn that precedes INSN in
3881 cyclic order starting from the loop entry point.
3882
3883 We don't want to use INSN_LUID here because if we restrict INSN to those
3884 that have a valid INSN_LUID, it means we cannot move an invariant out
3885 from an inner loop past two loops. */
3886
3887 static int
3888 loop_reg_used_before_p (const struct loop *loop, rtx set, rtx insn)
3889 {
3890 rtx reg = SET_DEST (set);
3891 rtx p;
3892
3893 /* Scan forward checking for register usage. If we hit INSN, we
3894 are done. Otherwise, if we hit LOOP->END, wrap around to LOOP->START. */
3895 for (p = loop->scan_start; p != insn; p = NEXT_INSN (p))
3896 {
3897 if (INSN_P (p) && reg_overlap_mentioned_p (reg, PATTERN (p)))
3898 return 1;
3899
3900 if (p == loop->end)
3901 p = loop->start;
3902 }
3903
3904 return 0;
3905 }
3906 \f
3907
3908 /* Information we collect about arrays that we might want to prefetch. */
3909 struct prefetch_info
3910 {
3911 struct iv_class *class; /* Class this prefetch is based on. */
3912 struct induction *giv; /* GIV this prefetch is based on. */
3913 rtx base_address; /* Start prefetching from this address plus
3914 index. */
3915 HOST_WIDE_INT index;
3916 HOST_WIDE_INT stride; /* Prefetch stride in bytes in each
3917 iteration. */
3918 unsigned int bytes_accessed; /* Sum of sizes of all accesses to this
3919 prefetch area in one iteration. */
3920 unsigned int total_bytes; /* Total bytes loop will access in this block.
3921 This is set only for loops with known
3922 iteration counts and is 0xffffffff
3923 otherwise. */
3924 int prefetch_in_loop; /* Number of prefetch insns in loop. */
3925 int prefetch_before_loop; /* Number of prefetch insns before loop. */
3926 unsigned int write : 1; /* 1 for read/write prefetches. */
3927 };
3928
3929 /* Data used by check_store function. */
3930 struct check_store_data
3931 {
3932 rtx mem_address;
3933 int mem_write;
3934 };
3935
3936 static void check_store (rtx, rtx, void *);
3937 static void emit_prefetch_instructions (struct loop *);
3938 static int rtx_equal_for_prefetch_p (rtx, rtx);
3939
3940 /* Set mem_write when mem_address is found. Used as callback to
3941 note_stores. */
3942 static void
3943 check_store (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
3944 {
3945 struct check_store_data *d = (struct check_store_data *) data;
3946
3947 if ((MEM_P (x)) && rtx_equal_p (d->mem_address, XEXP (x, 0)))
3948 d->mem_write = 1;
3949 }
3950 \f
3951 /* Like rtx_equal_p, but attempts to swap commutative operands. This is
3952 important to get some addresses combined. Later more sophisticated
3953 transformations can be added when necessary.
3954
3955 ??? Same trick with swapping operand is done at several other places.
3956 It can be nice to develop some common way to handle this. */
3957
3958 static int
3959 rtx_equal_for_prefetch_p (rtx x, rtx y)
3960 {
3961 int i;
3962 int j;
3963 enum rtx_code code = GET_CODE (x);
3964 const char *fmt;
3965
3966 if (x == y)
3967 return 1;
3968 if (code != GET_CODE (y))
3969 return 0;
3970
3971 if (GET_MODE (x) != GET_MODE (y))
3972 return 0;
3973
3974 switch (code)
3975 {
3976 case PC:
3977 case CC0:
3978 case CONST_INT:
3979 case CONST_DOUBLE:
3980 return 0;
3981
3982 case LABEL_REF:
3983 return XEXP (x, 0) == XEXP (y, 0);
3984
3985 default:
3986 break;
3987 }
3988
3989 if (COMMUTATIVE_ARITH_P (x))
3990 {
3991 return ((rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 0))
3992 && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 1)))
3993 || (rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 1))
3994 && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 0))));
3995 }
3996
3997 /* Compare the elements. If any pair of corresponding elements fails to
3998 match, return 0 for the whole thing. */
3999
4000 fmt = GET_RTX_FORMAT (code);
4001 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4002 {
4003 switch (fmt[i])
4004 {
4005 case 'w':
4006 if (XWINT (x, i) != XWINT (y, i))
4007 return 0;
4008 break;
4009
4010 case 'i':
4011 if (XINT (x, i) != XINT (y, i))
4012 return 0;
4013 break;
4014
4015 case 'E':
4016 /* Two vectors must have the same length. */
4017 if (XVECLEN (x, i) != XVECLEN (y, i))
4018 return 0;
4019
4020 /* And the corresponding elements must match. */
4021 for (j = 0; j < XVECLEN (x, i); j++)
4022 if (rtx_equal_for_prefetch_p (XVECEXP (x, i, j),
4023 XVECEXP (y, i, j)) == 0)
4024 return 0;
4025 break;
4026
4027 case 'e':
4028 if (rtx_equal_for_prefetch_p (XEXP (x, i), XEXP (y, i)) == 0)
4029 return 0;
4030 break;
4031
4032 case 's':
4033 if (strcmp (XSTR (x, i), XSTR (y, i)))
4034 return 0;
4035 break;
4036
4037 case 'u':
4038 /* These are just backpointers, so they don't matter. */
4039 break;
4040
4041 case '0':
4042 break;
4043
4044 /* It is believed that rtx's at this level will never
4045 contain anything but integers and other rtx's,
4046 except for within LABEL_REFs and SYMBOL_REFs. */
4047 default:
4048 gcc_unreachable ();
4049 }
4050 }
4051 return 1;
4052 }
4053 \f
4054 /* Remove constant addition value from the expression X (when present)
4055 and return it. */
4056
4057 static HOST_WIDE_INT
4058 remove_constant_addition (rtx *x)
4059 {
4060 HOST_WIDE_INT addval = 0;
4061 rtx exp = *x;
4062
4063 /* Avoid clobbering a shared CONST expression. */
4064 if (GET_CODE (exp) == CONST)
4065 {
4066 if (GET_CODE (XEXP (exp, 0)) == PLUS
4067 && GET_CODE (XEXP (XEXP (exp, 0), 0)) == SYMBOL_REF
4068 && GET_CODE (XEXP (XEXP (exp, 0), 1)) == CONST_INT)
4069 {
4070 *x = XEXP (XEXP (exp, 0), 0);
4071 return INTVAL (XEXP (XEXP (exp, 0), 1));
4072 }
4073 return 0;
4074 }
4075
4076 if (GET_CODE (exp) == CONST_INT)
4077 {
4078 addval = INTVAL (exp);
4079 *x = const0_rtx;
4080 }
4081
4082 /* For plus expression recurse on ourself. */
4083 else if (GET_CODE (exp) == PLUS)
4084 {
4085 addval += remove_constant_addition (&XEXP (exp, 0));
4086 addval += remove_constant_addition (&XEXP (exp, 1));
4087
4088 /* In case our parameter was constant, remove extra zero from the
4089 expression. */
4090 if (XEXP (exp, 0) == const0_rtx)
4091 *x = XEXP (exp, 1);
4092 else if (XEXP (exp, 1) == const0_rtx)
4093 *x = XEXP (exp, 0);
4094 }
4095
4096 return addval;
4097 }
4098
4099 /* Attempt to identify accesses to arrays that are most likely to cause cache
4100 misses, and emit prefetch instructions a few prefetch blocks forward.
4101
4102 To detect the arrays we use the GIV information that was collected by the
4103 strength reduction pass.
4104
4105 The prefetch instructions are generated after the GIV information is done
4106 and before the strength reduction process. The new GIVs are injected into
4107 the strength reduction tables, so the prefetch addresses are optimized as
4108 well.
4109
4110 GIVs are split into base address, stride, and constant addition values.
4111 GIVs with the same address, stride and close addition values are combined
4112 into a single prefetch. Also writes to GIVs are detected, so that prefetch
4113 for write instructions can be used for the block we write to, on machines
4114 that support write prefetches.
4115
4116 Several heuristics are used to determine when to prefetch. They are
4117 controlled by defined symbols that can be overridden for each target. */
4118
4119 static void
4120 emit_prefetch_instructions (struct loop *loop)
4121 {
4122 int num_prefetches = 0;
4123 int num_real_prefetches = 0;
4124 int num_real_write_prefetches = 0;
4125 int num_prefetches_before = 0;
4126 int num_write_prefetches_before = 0;
4127 int ahead = 0;
4128 int i;
4129 struct iv_class *bl;
4130 struct induction *iv;
4131 struct prefetch_info info[MAX_PREFETCHES];
4132 struct loop_ivs *ivs = LOOP_IVS (loop);
4133
4134 if (!HAVE_prefetch || PREFETCH_BLOCK == 0)
4135 return;
4136
4137 /* Consider only loops w/o calls. When a call is done, the loop is probably
4138 slow enough to read the memory. */
4139 if (PREFETCH_NO_CALL && LOOP_INFO (loop)->has_call)
4140 {
4141 if (loop_dump_stream)
4142 fprintf (loop_dump_stream, "Prefetch: ignoring loop: has call.\n");
4143
4144 return;
4145 }
4146
4147 /* Don't prefetch in loops known to have few iterations. */
4148 if (PREFETCH_NO_LOW_LOOPCNT
4149 && LOOP_INFO (loop)->n_iterations
4150 && LOOP_INFO (loop)->n_iterations <= PREFETCH_LOW_LOOPCNT)
4151 {
4152 if (loop_dump_stream)
4153 fprintf (loop_dump_stream,
4154 "Prefetch: ignoring loop: not enough iterations.\n");
4155 return;
4156 }
4157
4158 /* Search all induction variables and pick those interesting for the prefetch
4159 machinery. */
4160 for (bl = ivs->list; bl; bl = bl->next)
4161 {
4162 struct induction *biv = bl->biv, *biv1;
4163 int basestride = 0;
4164
4165 biv1 = biv;
4166
4167 /* Expect all BIVs to be executed in each iteration. This makes our
4168 analysis more conservative. */
4169 while (biv1)
4170 {
4171 /* Discard non-constant additions that we can't handle well yet, and
4172 BIVs that are executed multiple times; such BIVs ought to be
4173 handled in the nested loop. We accept not_every_iteration BIVs,
4174 since these only result in larger strides and make our
4175 heuristics more conservative. */
4176 if (GET_CODE (biv->add_val) != CONST_INT)
4177 {
4178 if (loop_dump_stream)
4179 {
4180 fprintf (loop_dump_stream,
4181 "Prefetch: ignoring biv %d: non-constant addition at insn %d:",
4182 REGNO (biv->src_reg), INSN_UID (biv->insn));
4183 print_rtl (loop_dump_stream, biv->add_val);
4184 fprintf (loop_dump_stream, "\n");
4185 }
4186 break;
4187 }
4188
4189 if (biv->maybe_multiple)
4190 {
4191 if (loop_dump_stream)
4192 {
4193 fprintf (loop_dump_stream,
4194 "Prefetch: ignoring biv %d: maybe_multiple at insn %i:",
4195 REGNO (biv->src_reg), INSN_UID (biv->insn));
4196 print_rtl (loop_dump_stream, biv->add_val);
4197 fprintf (loop_dump_stream, "\n");
4198 }
4199 break;
4200 }
4201
4202 basestride += INTVAL (biv1->add_val);
4203 biv1 = biv1->next_iv;
4204 }
4205
4206 if (biv1 || !basestride)
4207 continue;
4208
4209 for (iv = bl->giv; iv; iv = iv->next_iv)
4210 {
4211 rtx address;
4212 rtx temp;
4213 HOST_WIDE_INT index = 0;
4214 int add = 1;
4215 HOST_WIDE_INT stride = 0;
4216 int stride_sign = 1;
4217 struct check_store_data d;
4218 const char *ignore_reason = NULL;
4219 int size = GET_MODE_SIZE (GET_MODE (iv));
4220
4221 /* See whether an induction variable is interesting to us and if
4222 not, report the reason. */
4223 if (iv->giv_type != DEST_ADDR)
4224 ignore_reason = "giv is not a destination address";
4225
4226 /* We are interested only in constant stride memory references
4227 in order to be able to compute density easily. */
4228 else if (GET_CODE (iv->mult_val) != CONST_INT)
4229 ignore_reason = "stride is not constant";
4230
4231 else
4232 {
4233 stride = INTVAL (iv->mult_val) * basestride;
4234 if (stride < 0)
4235 {
4236 stride = -stride;
4237 stride_sign = -1;
4238 }
4239
4240 /* On some targets, reversed order prefetches are not
4241 worthwhile. */
4242 if (PREFETCH_NO_REVERSE_ORDER && stride_sign < 0)
4243 ignore_reason = "reversed order stride";
4244
4245 /* Prefetch of accesses with an extreme stride might not be
4246 worthwhile, either. */
4247 else if (PREFETCH_NO_EXTREME_STRIDE
4248 && stride > PREFETCH_EXTREME_STRIDE)
4249 ignore_reason = "extreme stride";
4250
4251 /* Ignore GIVs with varying add values; we can't predict the
4252 value for the next iteration. */
4253 else if (!loop_invariant_p (loop, iv->add_val))
4254 ignore_reason = "giv has varying add value";
4255
4256 /* Ignore GIVs in the nested loops; they ought to have been
4257 handled already. */
4258 else if (iv->maybe_multiple)
4259 ignore_reason = "giv is in nested loop";
4260 }
4261
4262 if (ignore_reason != NULL)
4263 {
4264 if (loop_dump_stream)
4265 fprintf (loop_dump_stream,
4266 "Prefetch: ignoring giv at %d: %s.\n",
4267 INSN_UID (iv->insn), ignore_reason);
4268 continue;
4269 }
4270
4271 /* Determine the pointer to the basic array we are examining. It is
4272 the sum of the BIV's initial value and the GIV's add_val. */
4273 address = copy_rtx (iv->add_val);
4274 temp = copy_rtx (bl->initial_value);
4275
4276 address = simplify_gen_binary (PLUS, Pmode, temp, address);
4277 index = remove_constant_addition (&address);
4278
4279 d.mem_write = 0;
4280 d.mem_address = *iv->location;
4281
4282 /* When the GIV is not always executed, we might be better off by
4283 not dirtying the cache pages. */
4284 if (PREFETCH_CONDITIONAL || iv->always_executed)
4285 note_stores (PATTERN (iv->insn), check_store, &d);
4286 else
4287 {
4288 if (loop_dump_stream)
4289 fprintf (loop_dump_stream, "Prefetch: Ignoring giv at %d: %s\n",
4290 INSN_UID (iv->insn), "in conditional code.");
4291 continue;
4292 }
4293
4294 /* Attempt to find another prefetch to the same array and see if we
4295 can merge this one. */
4296 for (i = 0; i < num_prefetches; i++)
4297 if (rtx_equal_for_prefetch_p (address, info[i].base_address)
4298 && stride == info[i].stride)
4299 {
4300 /* In case both access same array (same location
4301 just with small difference in constant indexes), merge
4302 the prefetches. Just do the later and the earlier will
4303 get prefetched from previous iteration.
4304 The artificial threshold should not be too small,
4305 but also not bigger than small portion of memory usually
4306 traversed by single loop. */
4307 if (index >= info[i].index
4308 && index - info[i].index < PREFETCH_EXTREME_DIFFERENCE)
4309 {
4310 info[i].write |= d.mem_write;
4311 info[i].bytes_accessed += size;
4312 info[i].index = index;
4313 info[i].giv = iv;
4314 info[i].class = bl;
4315 info[num_prefetches].base_address = address;
4316 add = 0;
4317 break;
4318 }
4319
4320 if (index < info[i].index
4321 && info[i].index - index < PREFETCH_EXTREME_DIFFERENCE)
4322 {
4323 info[i].write |= d.mem_write;
4324 info[i].bytes_accessed += size;
4325 add = 0;
4326 break;
4327 }
4328 }
4329
4330 /* Merging failed. */
4331 if (add)
4332 {
4333 info[num_prefetches].giv = iv;
4334 info[num_prefetches].class = bl;
4335 info[num_prefetches].index = index;
4336 info[num_prefetches].stride = stride;
4337 info[num_prefetches].base_address = address;
4338 info[num_prefetches].write = d.mem_write;
4339 info[num_prefetches].bytes_accessed = size;
4340 num_prefetches++;
4341 if (num_prefetches >= MAX_PREFETCHES)
4342 {
4343 if (loop_dump_stream)
4344 fprintf (loop_dump_stream,
4345 "Maximal number of prefetches exceeded.\n");
4346 return;
4347 }
4348 }
4349 }
4350 }
4351
4352 for (i = 0; i < num_prefetches; i++)
4353 {
4354 int density;
4355
4356 /* Attempt to calculate the total number of bytes fetched by all
4357 iterations of the loop. Avoid overflow. */
4358 if (LOOP_INFO (loop)->n_iterations
4359 && ((unsigned HOST_WIDE_INT) (0xffffffff / info[i].stride)
4360 >= LOOP_INFO (loop)->n_iterations))
4361 info[i].total_bytes = info[i].stride * LOOP_INFO (loop)->n_iterations;
4362 else
4363 info[i].total_bytes = 0xffffffff;
4364
4365 density = info[i].bytes_accessed * 100 / info[i].stride;
4366
4367 /* Prefetch might be worthwhile only when the loads/stores are dense. */
4368 if (PREFETCH_ONLY_DENSE_MEM)
4369 if (density * 256 > PREFETCH_DENSE_MEM * 100
4370 && (info[i].total_bytes / PREFETCH_BLOCK
4371 >= PREFETCH_BLOCKS_BEFORE_LOOP_MIN))
4372 {
4373 info[i].prefetch_before_loop = 1;
4374 info[i].prefetch_in_loop
4375 = (info[i].total_bytes / PREFETCH_BLOCK
4376 > PREFETCH_BLOCKS_BEFORE_LOOP_MAX);
4377 }
4378 else
4379 {
4380 info[i].prefetch_in_loop = 0, info[i].prefetch_before_loop = 0;
4381 if (loop_dump_stream)
4382 fprintf (loop_dump_stream,
4383 "Prefetch: ignoring giv at %d: %d%% density is too low.\n",
4384 INSN_UID (info[i].giv->insn), density);
4385 }
4386 else
4387 info[i].prefetch_in_loop = 1, info[i].prefetch_before_loop = 1;
4388
4389 /* Find how many prefetch instructions we'll use within the loop. */
4390 if (info[i].prefetch_in_loop != 0)
4391 {
4392 info[i].prefetch_in_loop = ((info[i].stride + PREFETCH_BLOCK - 1)
4393 / PREFETCH_BLOCK);
4394 num_real_prefetches += info[i].prefetch_in_loop;
4395 if (info[i].write)
4396 num_real_write_prefetches += info[i].prefetch_in_loop;
4397 }
4398 }
4399
4400 /* Determine how many iterations ahead to prefetch within the loop, based
4401 on how many prefetches we currently expect to do within the loop. */
4402 if (num_real_prefetches != 0)
4403 {
4404 if ((ahead = SIMULTANEOUS_PREFETCHES / num_real_prefetches) == 0)
4405 {
4406 if (loop_dump_stream)
4407 fprintf (loop_dump_stream,
4408 "Prefetch: ignoring prefetches within loop: ahead is zero; %d < %d\n",
4409 SIMULTANEOUS_PREFETCHES, num_real_prefetches);
4410 num_real_prefetches = 0, num_real_write_prefetches = 0;
4411 }
4412 }
4413 /* We'll also use AHEAD to determine how many prefetch instructions to
4414 emit before a loop, so don't leave it zero. */
4415 if (ahead == 0)
4416 ahead = PREFETCH_BLOCKS_BEFORE_LOOP_MAX;
4417
4418 for (i = 0; i < num_prefetches; i++)
4419 {
4420 /* Update if we've decided not to prefetch anything within the loop. */
4421 if (num_real_prefetches == 0)
4422 info[i].prefetch_in_loop = 0;
4423
4424 /* Find how many prefetch instructions we'll use before the loop. */
4425 if (info[i].prefetch_before_loop != 0)
4426 {
4427 int n = info[i].total_bytes / PREFETCH_BLOCK;
4428 if (n > ahead)
4429 n = ahead;
4430 info[i].prefetch_before_loop = n;
4431 num_prefetches_before += n;
4432 if (info[i].write)
4433 num_write_prefetches_before += n;
4434 }
4435
4436 if (loop_dump_stream)
4437 {
4438 if (info[i].prefetch_in_loop == 0
4439 && info[i].prefetch_before_loop == 0)
4440 continue;
4441 fprintf (loop_dump_stream, "Prefetch insn: %d",
4442 INSN_UID (info[i].giv->insn));
4443 fprintf (loop_dump_stream,
4444 "; in loop: %d; before: %d; %s\n",
4445 info[i].prefetch_in_loop,
4446 info[i].prefetch_before_loop,
4447 info[i].write ? "read/write" : "read only");
4448 fprintf (loop_dump_stream,
4449 " density: %d%%; bytes_accessed: %u; total_bytes: %u\n",
4450 (int) (info[i].bytes_accessed * 100 / info[i].stride),
4451 info[i].bytes_accessed, info[i].total_bytes);
4452 fprintf (loop_dump_stream, " index: " HOST_WIDE_INT_PRINT_DEC
4453 "; stride: " HOST_WIDE_INT_PRINT_DEC "; address: ",
4454 info[i].index, info[i].stride);
4455 print_rtl (loop_dump_stream, info[i].base_address);
4456 fprintf (loop_dump_stream, "\n");
4457 }
4458 }
4459
4460 if (num_real_prefetches + num_prefetches_before > 0)
4461 {
4462 /* Record that this loop uses prefetch instructions. */
4463 LOOP_INFO (loop)->has_prefetch = 1;
4464
4465 if (loop_dump_stream)
4466 {
4467 fprintf (loop_dump_stream, "Real prefetches needed within loop: %d (write: %d)\n",
4468 num_real_prefetches, num_real_write_prefetches);
4469 fprintf (loop_dump_stream, "Real prefetches needed before loop: %d (write: %d)\n",
4470 num_prefetches_before, num_write_prefetches_before);
4471 }
4472 }
4473
4474 for (i = 0; i < num_prefetches; i++)
4475 {
4476 int y;
4477
4478 for (y = 0; y < info[i].prefetch_in_loop; y++)
4479 {
4480 rtx loc = copy_rtx (*info[i].giv->location);
4481 rtx insn;
4482 int bytes_ahead = PREFETCH_BLOCK * (ahead + y);
4483 rtx before_insn = info[i].giv->insn;
4484 rtx prev_insn = PREV_INSN (info[i].giv->insn);
4485 rtx seq;
4486
4487 /* We can save some effort by offsetting the address on
4488 architectures with offsettable memory references. */
4489 if (offsettable_address_p (0, VOIDmode, loc))
4490 loc = plus_constant (loc, bytes_ahead);
4491 else
4492 {
4493 rtx reg = gen_reg_rtx (Pmode);
4494 loop_iv_add_mult_emit_before (loop, loc, const1_rtx,
4495 GEN_INT (bytes_ahead), reg,
4496 0, before_insn);
4497 loc = reg;
4498 }
4499
4500 start_sequence ();
4501 /* Make sure the address operand is valid for prefetch. */
4502 if (! (*insn_data[(int)CODE_FOR_prefetch].operand[0].predicate)
4503 (loc, insn_data[(int)CODE_FOR_prefetch].operand[0].mode))
4504 loc = force_reg (Pmode, loc);
4505 emit_insn (gen_prefetch (loc, GEN_INT (info[i].write),
4506 GEN_INT (3)));
4507 seq = get_insns ();
4508 end_sequence ();
4509 emit_insn_before (seq, before_insn);
4510
4511 /* Check all insns emitted and record the new GIV
4512 information. */
4513 insn = NEXT_INSN (prev_insn);
4514 while (insn != before_insn)
4515 {
4516 insn = check_insn_for_givs (loop, insn,
4517 info[i].giv->always_executed,
4518 info[i].giv->maybe_multiple);
4519 insn = NEXT_INSN (insn);
4520 }
4521 }
4522
4523 if (PREFETCH_BEFORE_LOOP)
4524 {
4525 /* Emit insns before the loop to fetch the first cache lines or,
4526 if we're not prefetching within the loop, everything we expect
4527 to need. */
4528 for (y = 0; y < info[i].prefetch_before_loop; y++)
4529 {
4530 rtx reg = gen_reg_rtx (Pmode);
4531 rtx loop_start = loop->start;
4532 rtx init_val = info[i].class->initial_value;
4533 rtx add_val = simplify_gen_binary (PLUS, Pmode,
4534 info[i].giv->add_val,
4535 GEN_INT (y * PREFETCH_BLOCK));
4536
4537 /* Functions called by LOOP_IV_ADD_EMIT_BEFORE expect a
4538 non-constant INIT_VAL to have the same mode as REG, which
4539 in this case we know to be Pmode. */
4540 if (GET_MODE (init_val) != Pmode && !CONSTANT_P (init_val))
4541 {
4542 rtx seq;
4543
4544 start_sequence ();
4545 init_val = convert_to_mode (Pmode, init_val, 0);
4546 seq = get_insns ();
4547 end_sequence ();
4548 loop_insn_emit_before (loop, 0, loop_start, seq);
4549 }
4550 loop_iv_add_mult_emit_before (loop, init_val,
4551 info[i].giv->mult_val,
4552 add_val, reg, 0, loop_start);
4553 emit_insn_before (gen_prefetch (reg, GEN_INT (info[i].write),
4554 GEN_INT (3)),
4555 loop_start);
4556 }
4557 }
4558 }
4559
4560 return;
4561 }
4562 \f
4563 /* Communication with routines called via `note_stores'. */
4564
4565 static rtx note_insn;
4566
4567 /* Dummy register to have nonzero DEST_REG for DEST_ADDR type givs. */
4568
4569 static rtx addr_placeholder;
4570
4571 /* ??? Unfinished optimizations, and possible future optimizations,
4572 for the strength reduction code. */
4573
4574 /* ??? The interaction of biv elimination, and recognition of 'constant'
4575 bivs, may cause problems. */
4576
4577 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
4578 performance problems.
4579
4580 Perhaps don't eliminate things that can be combined with an addressing
4581 mode. Find all givs that have the same biv, mult_val, and add_val;
4582 then for each giv, check to see if its only use dies in a following
4583 memory address. If so, generate a new memory address and check to see
4584 if it is valid. If it is valid, then store the modified memory address,
4585 otherwise, mark the giv as not done so that it will get its own iv. */
4586
4587 /* ??? Could try to optimize branches when it is known that a biv is always
4588 positive. */
4589
4590 /* ??? When replace a biv in a compare insn, we should replace with closest
4591 giv so that an optimized branch can still be recognized by the combiner,
4592 e.g. the VAX acb insn. */
4593
4594 /* ??? Many of the checks involving uid_luid could be simplified if regscan
4595 was rerun in loop_optimize whenever a register was added or moved.
4596 Also, some of the optimizations could be a little less conservative. */
4597 \f
4598 /* Searches the insns between INSN and LOOP->END. Returns 1 if there
4599 is a backward branch in that range that branches to somewhere between
4600 LOOP->START and INSN. Returns 0 otherwise. */
4601
4602 /* ??? This is quadratic algorithm. Could be rewritten to be linear.
4603 In practice, this is not a problem, because this function is seldom called,
4604 and uses a negligible amount of CPU time on average. */
4605
4606 static int
4607 back_branch_in_range_p (const struct loop *loop, rtx insn)
4608 {
4609 rtx p, q, target_insn;
4610 rtx loop_start = loop->start;
4611 rtx loop_end = loop->end;
4612 rtx orig_loop_end = loop->end;
4613
4614 /* Stop before we get to the backward branch at the end of the loop. */
4615 loop_end = prev_nonnote_insn (loop_end);
4616 if (BARRIER_P (loop_end))
4617 loop_end = PREV_INSN (loop_end);
4618
4619 /* Check in case insn has been deleted, search forward for first non
4620 deleted insn following it. */
4621 while (INSN_DELETED_P (insn))
4622 insn = NEXT_INSN (insn);
4623
4624 /* Check for the case where insn is the last insn in the loop. Deal
4625 with the case where INSN was a deleted loop test insn, in which case
4626 it will now be the NOTE_LOOP_END. */
4627 if (insn == loop_end || insn == orig_loop_end)
4628 return 0;
4629
4630 for (p = NEXT_INSN (insn); p != loop_end; p = NEXT_INSN (p))
4631 {
4632 if (JUMP_P (p))
4633 {
4634 target_insn = JUMP_LABEL (p);
4635
4636 /* Search from loop_start to insn, to see if one of them is
4637 the target_insn. We can't use INSN_LUID comparisons here,
4638 since insn may not have an LUID entry. */
4639 for (q = loop_start; q != insn; q = NEXT_INSN (q))
4640 if (q == target_insn)
4641 return 1;
4642 }
4643 }
4644
4645 return 0;
4646 }
4647
4648 /* Scan the loop body and call FNCALL for each insn. In the addition to the
4649 LOOP and INSN parameters pass MAYBE_MULTIPLE and NOT_EVERY_ITERATION to the
4650 callback.
4651
4652 NOT_EVERY_ITERATION is 1 if current insn is not known to be executed at
4653 least once for every loop iteration except for the last one.
4654
4655 MAYBE_MULTIPLE is 1 if current insn may be executed more than once for every
4656 loop iteration.
4657 */
4658 typedef rtx (*loop_insn_callback) (struct loop *, rtx, int, int);
4659 static void
4660 for_each_insn_in_loop (struct loop *loop, loop_insn_callback fncall)
4661 {
4662 int not_every_iteration = 0;
4663 int maybe_multiple = 0;
4664 int past_loop_latch = 0;
4665 bool exit_test_is_entry = false;
4666 rtx p;
4667
4668 /* If loop_scan_start points to the loop exit test, the loop body
4669 cannot be counted on running on every iteration, and we have to
4670 be wary of subversive use of gotos inside expression
4671 statements. */
4672 if (prev_nonnote_insn (loop->scan_start) != prev_nonnote_insn (loop->start))
4673 {
4674 exit_test_is_entry = true;
4675 maybe_multiple = back_branch_in_range_p (loop, loop->scan_start);
4676 }
4677
4678 /* Scan through loop and update NOT_EVERY_ITERATION and MAYBE_MULTIPLE. */
4679 for (p = next_insn_in_loop (loop, loop->scan_start);
4680 p != NULL_RTX;
4681 p = next_insn_in_loop (loop, p))
4682 {
4683 p = fncall (loop, p, not_every_iteration, maybe_multiple);
4684
4685 /* Past CODE_LABEL, we get to insns that may be executed multiple
4686 times. The only way we can be sure that they can't is if every
4687 jump insn between here and the end of the loop either
4688 returns, exits the loop, is a jump to a location that is still
4689 behind the label, or is a jump to the loop start. */
4690
4691 if (LABEL_P (p))
4692 {
4693 rtx insn = p;
4694
4695 maybe_multiple = 0;
4696
4697 while (1)
4698 {
4699 insn = NEXT_INSN (insn);
4700 if (insn == loop->scan_start)
4701 break;
4702 if (insn == loop->end)
4703 {
4704 if (loop->top != 0)
4705 insn = loop->top;
4706 else
4707 break;
4708 if (insn == loop->scan_start)
4709 break;
4710 }
4711
4712 if (JUMP_P (insn)
4713 && GET_CODE (PATTERN (insn)) != RETURN
4714 && (!any_condjump_p (insn)
4715 || (JUMP_LABEL (insn) != 0
4716 && JUMP_LABEL (insn) != loop->scan_start
4717 && !loop_insn_first_p (p, JUMP_LABEL (insn)))))
4718 {
4719 maybe_multiple = 1;
4720 break;
4721 }
4722 }
4723 }
4724
4725 /* Past a jump, we get to insns for which we can't count
4726 on whether they will be executed during each iteration. */
4727 /* This code appears twice in strength_reduce. There is also similar
4728 code in scan_loop. */
4729 if (JUMP_P (p)
4730 /* If we enter the loop in the middle, and scan around to the
4731 beginning, don't set not_every_iteration for that.
4732 This can be any kind of jump, since we want to know if insns
4733 will be executed if the loop is executed. */
4734 && (exit_test_is_entry
4735 || !(JUMP_LABEL (p) == loop->top
4736 && ((NEXT_INSN (NEXT_INSN (p)) == loop->end
4737 && any_uncondjump_p (p))
4738 || (NEXT_INSN (p) == loop->end
4739 && any_condjump_p (p))))))
4740 {
4741 rtx label = 0;
4742
4743 /* If this is a jump outside the loop, then it also doesn't
4744 matter. Check to see if the target of this branch is on the
4745 loop->exits_labels list. */
4746
4747 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
4748 if (XEXP (label, 0) == JUMP_LABEL (p))
4749 break;
4750
4751 if (!label)
4752 not_every_iteration = 1;
4753 }
4754
4755 /* Note if we pass a loop latch. If we do, then we can not clear
4756 NOT_EVERY_ITERATION below when we pass the last CODE_LABEL in
4757 a loop since a jump before the last CODE_LABEL may have started
4758 a new loop iteration.
4759
4760 Note that LOOP_TOP is only set for rotated loops and we need
4761 this check for all loops, so compare against the CODE_LABEL
4762 which immediately follows LOOP_START. */
4763 if (JUMP_P (p)
4764 && JUMP_LABEL (p) == NEXT_INSN (loop->start))
4765 past_loop_latch = 1;
4766
4767 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
4768 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
4769 or not an insn is known to be executed each iteration of the
4770 loop, whether or not any iterations are known to occur.
4771
4772 Therefore, if we have just passed a label and have no more labels
4773 between here and the test insn of the loop, and we have not passed
4774 a jump to the top of the loop, then we know these insns will be
4775 executed each iteration. */
4776
4777 if (not_every_iteration
4778 && !past_loop_latch
4779 && LABEL_P (p)
4780 && no_labels_between_p (p, loop->end))
4781 not_every_iteration = 0;
4782 }
4783 }
4784 \f
4785 static void
4786 loop_bivs_find (struct loop *loop)
4787 {
4788 struct loop_regs *regs = LOOP_REGS (loop);
4789 struct loop_ivs *ivs = LOOP_IVS (loop);
4790 /* Temporary list pointers for traversing ivs->list. */
4791 struct iv_class *bl, **backbl;
4792
4793 ivs->list = 0;
4794
4795 for_each_insn_in_loop (loop, check_insn_for_bivs);
4796
4797 /* Scan ivs->list to remove all regs that proved not to be bivs.
4798 Make a sanity check against regs->n_times_set. */
4799 for (backbl = &ivs->list, bl = *backbl; bl; bl = bl->next)
4800 {
4801 if (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
4802 /* Above happens if register modified by subreg, etc. */
4803 /* Make sure it is not recognized as a basic induction var: */
4804 || regs->array[bl->regno].n_times_set != bl->biv_count
4805 /* If never incremented, it is invariant that we decided not to
4806 move. So leave it alone. */
4807 || ! bl->incremented)
4808 {
4809 if (loop_dump_stream)
4810 fprintf (loop_dump_stream, "Biv %d: discarded, %s\n",
4811 bl->regno,
4812 (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
4813 ? "not induction variable"
4814 : (! bl->incremented ? "never incremented"
4815 : "count error")));
4816
4817 REG_IV_TYPE (ivs, bl->regno) = NOT_BASIC_INDUCT;
4818 *backbl = bl->next;
4819 }
4820 else
4821 {
4822 backbl = &bl->next;
4823
4824 if (loop_dump_stream)
4825 fprintf (loop_dump_stream, "Biv %d: verified\n", bl->regno);
4826 }
4827 }
4828 }
4829
4830
4831 /* Determine how BIVS are initialized by looking through pre-header
4832 extended basic block. */
4833 static void
4834 loop_bivs_init_find (struct loop *loop)
4835 {
4836 struct loop_ivs *ivs = LOOP_IVS (loop);
4837 /* Temporary list pointers for traversing ivs->list. */
4838 struct iv_class *bl;
4839 int call_seen;
4840 rtx p;
4841
4842 /* Find initial value for each biv by searching backwards from loop_start,
4843 halting at first label. Also record any test condition. */
4844
4845 call_seen = 0;
4846 for (p = loop->start; p && !LABEL_P (p); p = PREV_INSN (p))
4847 {
4848 rtx test;
4849
4850 note_insn = p;
4851
4852 if (CALL_P (p))
4853 call_seen = 1;
4854
4855 if (INSN_P (p))
4856 note_stores (PATTERN (p), record_initial, ivs);
4857
4858 /* Record any test of a biv that branches around the loop if no store
4859 between it and the start of loop. We only care about tests with
4860 constants and registers and only certain of those. */
4861 if (JUMP_P (p)
4862 && JUMP_LABEL (p) != 0
4863 && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop->end)
4864 && (test = get_condition_for_loop (loop, p)) != 0
4865 && REG_P (XEXP (test, 0))
4866 && REGNO (XEXP (test, 0)) < max_reg_before_loop
4867 && (bl = REG_IV_CLASS (ivs, REGNO (XEXP (test, 0)))) != 0
4868 && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop->start)
4869 && bl->init_insn == 0)
4870 {
4871 /* If an NE test, we have an initial value! */
4872 if (GET_CODE (test) == NE)
4873 {
4874 bl->init_insn = p;
4875 bl->init_set = gen_rtx_SET (VOIDmode,
4876 XEXP (test, 0), XEXP (test, 1));
4877 }
4878 else
4879 bl->initial_test = test;
4880 }
4881 }
4882 }
4883
4884
4885 /* Look at the each biv and see if we can say anything better about its
4886 initial value from any initializing insns set up above. (This is done
4887 in two passes to avoid missing SETs in a PARALLEL.) */
4888 static void
4889 loop_bivs_check (struct loop *loop)
4890 {
4891 struct loop_ivs *ivs = LOOP_IVS (loop);
4892 /* Temporary list pointers for traversing ivs->list. */
4893 struct iv_class *bl;
4894 struct iv_class **backbl;
4895
4896 for (backbl = &ivs->list; (bl = *backbl); backbl = &bl->next)
4897 {
4898 rtx src;
4899 rtx note;
4900
4901 if (! bl->init_insn)
4902 continue;
4903
4904 /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value
4905 is a constant, use the value of that. */
4906 if (((note = find_reg_note (bl->init_insn, REG_EQUAL, 0)) != NULL
4907 && CONSTANT_P (XEXP (note, 0)))
4908 || ((note = find_reg_note (bl->init_insn, REG_EQUIV, 0)) != NULL
4909 && CONSTANT_P (XEXP (note, 0))))
4910 src = XEXP (note, 0);
4911 else
4912 src = SET_SRC (bl->init_set);
4913
4914 if (loop_dump_stream)
4915 fprintf (loop_dump_stream,
4916 "Biv %d: initialized at insn %d: initial value ",
4917 bl->regno, INSN_UID (bl->init_insn));
4918
4919 if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno])
4920 || GET_MODE (src) == VOIDmode)
4921 && valid_initial_value_p (src, bl->init_insn,
4922 LOOP_INFO (loop)->pre_header_has_call,
4923 loop->start))
4924 {
4925 bl->initial_value = src;
4926
4927 if (loop_dump_stream)
4928 {
4929 print_simple_rtl (loop_dump_stream, src);
4930 fputc ('\n', loop_dump_stream);
4931 }
4932 }
4933 /* If we can't make it a giv,
4934 let biv keep initial value of "itself". */
4935 else if (loop_dump_stream)
4936 fprintf (loop_dump_stream, "is complex\n");
4937 }
4938 }
4939
4940
4941 /* Search the loop for general induction variables. */
4942
4943 static void
4944 loop_givs_find (struct loop* loop)
4945 {
4946 for_each_insn_in_loop (loop, check_insn_for_givs);
4947 }
4948
4949
4950 /* For each giv for which we still don't know whether or not it is
4951 replaceable, check to see if it is replaceable because its final value
4952 can be calculated. */
4953
4954 static void
4955 loop_givs_check (struct loop *loop)
4956 {
4957 struct loop_ivs *ivs = LOOP_IVS (loop);
4958 struct iv_class *bl;
4959
4960 for (bl = ivs->list; bl; bl = bl->next)
4961 {
4962 struct induction *v;
4963
4964 for (v = bl->giv; v; v = v->next_iv)
4965 if (! v->replaceable && ! v->not_replaceable)
4966 check_final_value (loop, v);
4967 }
4968 }
4969
4970 /* Try to generate the simplest rtx for the expression
4971 (PLUS (MULT mult1 mult2) add1). This is used to calculate the initial
4972 value of giv's. */
4973
4974 static rtx
4975 fold_rtx_mult_add (rtx mult1, rtx mult2, rtx add1, enum machine_mode mode)
4976 {
4977 rtx temp, mult_res;
4978 rtx result;
4979
4980 /* The modes must all be the same. This should always be true. For now,
4981 check to make sure. */
4982 gcc_assert (GET_MODE (mult1) == mode || GET_MODE (mult1) == VOIDmode);
4983 gcc_assert (GET_MODE (mult2) == mode || GET_MODE (mult2) == VOIDmode);
4984 gcc_assert (GET_MODE (add1) == mode || GET_MODE (add1) == VOIDmode);
4985
4986 /* Ensure that if at least one of mult1/mult2 are constant, then mult2
4987 will be a constant. */
4988 if (GET_CODE (mult1) == CONST_INT)
4989 {
4990 temp = mult2;
4991 mult2 = mult1;
4992 mult1 = temp;
4993 }
4994
4995 mult_res = simplify_binary_operation (MULT, mode, mult1, mult2);
4996 if (! mult_res)
4997 mult_res = gen_rtx_MULT (mode, mult1, mult2);
4998
4999 /* Again, put the constant second. */
5000 if (GET_CODE (add1) == CONST_INT)
5001 {
5002 temp = add1;
5003 add1 = mult_res;
5004 mult_res = temp;
5005 }
5006
5007 result = simplify_binary_operation (PLUS, mode, add1, mult_res);
5008 if (! result)
5009 result = gen_rtx_PLUS (mode, add1, mult_res);
5010
5011 return result;
5012 }
5013
5014 /* Searches the list of induction struct's for the biv BL, to try to calculate
5015 the total increment value for one iteration of the loop as a constant.
5016
5017 Returns the increment value as an rtx, simplified as much as possible,
5018 if it can be calculated. Otherwise, returns 0. */
5019
5020 static rtx
5021 biv_total_increment (const struct iv_class *bl)
5022 {
5023 struct induction *v;
5024 rtx result;
5025
5026 /* For increment, must check every instruction that sets it. Each
5027 instruction must be executed only once each time through the loop.
5028 To verify this, we check that the insn is always executed, and that
5029 there are no backward branches after the insn that branch to before it.
5030 Also, the insn must have a mult_val of one (to make sure it really is
5031 an increment). */
5032
5033 result = const0_rtx;
5034 for (v = bl->biv; v; v = v->next_iv)
5035 {
5036 if (v->always_computable && v->mult_val == const1_rtx
5037 && ! v->maybe_multiple
5038 && SCALAR_INT_MODE_P (v->mode))
5039 {
5040 /* If we have already counted it, skip it. */
5041 if (v->same)
5042 continue;
5043
5044 result = fold_rtx_mult_add (result, const1_rtx, v->add_val, v->mode);
5045 }
5046 else
5047 return 0;
5048 }
5049
5050 return result;
5051 }
5052
5053 /* Try to prove that the register is dead after the loop exits. Trace every
5054 loop exit looking for an insn that will always be executed, which sets
5055 the register to some value, and appears before the first use of the register
5056 is found. If successful, then return 1, otherwise return 0. */
5057
5058 /* ?? Could be made more intelligent in the handling of jumps, so that
5059 it can search past if statements and other similar structures. */
5060
5061 static int
5062 reg_dead_after_loop (const struct loop *loop, rtx reg)
5063 {
5064 rtx insn, label;
5065 int jump_count = 0;
5066 int label_count = 0;
5067
5068 /* In addition to checking all exits of this loop, we must also check
5069 all exits of inner nested loops that would exit this loop. We don't
5070 have any way to identify those, so we just give up if there are any
5071 such inner loop exits. */
5072
5073 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
5074 label_count++;
5075
5076 if (label_count != loop->exit_count)
5077 return 0;
5078
5079 /* HACK: Must also search the loop fall through exit, create a label_ref
5080 here which points to the loop->end, and append the loop_number_exit_labels
5081 list to it. */
5082 label = gen_rtx_LABEL_REF (Pmode, loop->end);
5083 LABEL_NEXTREF (label) = loop->exit_labels;
5084
5085 for (; label; label = LABEL_NEXTREF (label))
5086 {
5087 /* Succeed if find an insn which sets the biv or if reach end of
5088 function. Fail if find an insn that uses the biv, or if come to
5089 a conditional jump. */
5090
5091 insn = NEXT_INSN (XEXP (label, 0));
5092 while (insn)
5093 {
5094 if (INSN_P (insn))
5095 {
5096 rtx set, note;
5097
5098 if (reg_referenced_p (reg, PATTERN (insn)))
5099 return 0;
5100
5101 note = find_reg_equal_equiv_note (insn);
5102 if (note && reg_overlap_mentioned_p (reg, XEXP (note, 0)))
5103 return 0;
5104
5105 set = single_set (insn);
5106 if (set && rtx_equal_p (SET_DEST (set), reg))
5107 break;
5108
5109 if (JUMP_P (insn))
5110 {
5111 if (GET_CODE (PATTERN (insn)) == RETURN)
5112 break;
5113 else if (!any_uncondjump_p (insn)
5114 /* Prevent infinite loop following infinite loops. */
5115 || jump_count++ > 20)
5116 return 0;
5117 else
5118 insn = JUMP_LABEL (insn);
5119 }
5120 }
5121
5122 insn = NEXT_INSN (insn);
5123 }
5124 }
5125
5126 /* Success, the register is dead on all loop exits. */
5127 return 1;
5128 }
5129
5130 /* Try to calculate the final value of the biv, the value it will have at
5131 the end of the loop. If we can do it, return that value. */
5132
5133 static rtx
5134 final_biv_value (const struct loop *loop, struct iv_class *bl)
5135 {
5136 unsigned HOST_WIDE_INT n_iterations = LOOP_INFO (loop)->n_iterations;
5137 rtx increment, tem;
5138
5139 /* ??? This only works for MODE_INT biv's. Reject all others for now. */
5140
5141 if (GET_MODE_CLASS (bl->biv->mode) != MODE_INT)
5142 return 0;
5143
5144 /* The final value for reversed bivs must be calculated differently than
5145 for ordinary bivs. In this case, there is already an insn after the
5146 loop which sets this biv's final value (if necessary), and there are
5147 no other loop exits, so we can return any value. */
5148 if (bl->reversed)
5149 {
5150 if (loop_dump_stream)
5151 fprintf (loop_dump_stream,
5152 "Final biv value for %d, reversed biv.\n", bl->regno);
5153
5154 return const0_rtx;
5155 }
5156
5157 /* Try to calculate the final value as initial value + (number of iterations
5158 * increment). For this to work, increment must be invariant, the only
5159 exit from the loop must be the fall through at the bottom (otherwise
5160 it may not have its final value when the loop exits), and the initial
5161 value of the biv must be invariant. */
5162
5163 if (n_iterations != 0
5164 && ! loop->exit_count
5165 && loop_invariant_p (loop, bl->initial_value))
5166 {
5167 increment = biv_total_increment (bl);
5168
5169 if (increment && loop_invariant_p (loop, increment))
5170 {
5171 /* Can calculate the loop exit value, emit insns after loop
5172 end to calculate this value into a temporary register in
5173 case it is needed later. */
5174
5175 tem = gen_reg_rtx (bl->biv->mode);
5176 record_base_value (REGNO (tem), bl->biv->add_val, 0);
5177 loop_iv_add_mult_sink (loop, increment, GEN_INT (n_iterations),
5178 bl->initial_value, tem);
5179
5180 if (loop_dump_stream)
5181 fprintf (loop_dump_stream,
5182 "Final biv value for %d, calculated.\n", bl->regno);
5183
5184 return tem;
5185 }
5186 }
5187
5188 /* Check to see if the biv is dead at all loop exits. */
5189 if (reg_dead_after_loop (loop, bl->biv->src_reg))
5190 {
5191 if (loop_dump_stream)
5192 fprintf (loop_dump_stream,
5193 "Final biv value for %d, biv dead after loop exit.\n",
5194 bl->regno);
5195
5196 return const0_rtx;
5197 }
5198
5199 return 0;
5200 }
5201
5202 /* Return nonzero if it is possible to eliminate the biv BL provided
5203 all givs are reduced. This is possible if either the reg is not
5204 used outside the loop, or we can compute what its final value will
5205 be. */
5206
5207 static int
5208 loop_biv_eliminable_p (struct loop *loop, struct iv_class *bl,
5209 int threshold, int insn_count)
5210 {
5211 /* For architectures with a decrement_and_branch_until_zero insn,
5212 don't do this if we put a REG_NONNEG note on the endtest for this
5213 biv. */
5214
5215 #ifdef HAVE_decrement_and_branch_until_zero
5216 if (bl->nonneg)
5217 {
5218 if (loop_dump_stream)
5219 fprintf (loop_dump_stream,
5220 "Cannot eliminate nonneg biv %d.\n", bl->regno);
5221 return 0;
5222 }
5223 #endif
5224
5225 /* Check that biv is used outside loop or if it has a final value.
5226 Compare against bl->init_insn rather than loop->start. We aren't
5227 concerned with any uses of the biv between init_insn and
5228 loop->start since these won't be affected by the value of the biv
5229 elsewhere in the function, so long as init_insn doesn't use the
5230 biv itself. */
5231
5232 if ((REGNO_LAST_LUID (bl->regno) < INSN_LUID (loop->end)
5233 && bl->init_insn
5234 && INSN_UID (bl->init_insn) < max_uid_for_loop
5235 && REGNO_FIRST_LUID (bl->regno) >= INSN_LUID (bl->init_insn)
5236 && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
5237 || (bl->final_value = final_biv_value (loop, bl)))
5238 return maybe_eliminate_biv (loop, bl, 0, threshold, insn_count);
5239
5240 if (loop_dump_stream)
5241 {
5242 fprintf (loop_dump_stream,
5243 "Cannot eliminate biv %d.\n",
5244 bl->regno);
5245 fprintf (loop_dump_stream,
5246 "First use: insn %d, last use: insn %d.\n",
5247 REGNO_FIRST_UID (bl->regno),
5248 REGNO_LAST_UID (bl->regno));
5249 }
5250 return 0;
5251 }
5252
5253
5254 /* Reduce each giv of BL that we have decided to reduce. */
5255
5256 static void
5257 loop_givs_reduce (struct loop *loop, struct iv_class *bl)
5258 {
5259 struct induction *v;
5260
5261 for (v = bl->giv; v; v = v->next_iv)
5262 {
5263 struct induction *tv;
5264 if (! v->ignore && v->same == 0)
5265 {
5266 int auto_inc_opt = 0;
5267
5268 /* If the code for derived givs immediately below has already
5269 allocated a new_reg, we must keep it. */
5270 if (! v->new_reg)
5271 v->new_reg = gen_reg_rtx (v->mode);
5272
5273 #ifdef AUTO_INC_DEC
5274 /* If the target has auto-increment addressing modes, and
5275 this is an address giv, then try to put the increment
5276 immediately after its use, so that flow can create an
5277 auto-increment addressing mode. */
5278 /* Don't do this for loops entered at the bottom, to avoid
5279 this invalid transformation:
5280 jmp L; -> jmp L;
5281 TOP: TOP:
5282 use giv use giv
5283 L: inc giv
5284 inc biv L:
5285 test biv test giv
5286 cbr TOP cbr TOP
5287 */
5288 if (v->giv_type == DEST_ADDR && bl->biv_count == 1
5289 && bl->biv->always_executed && ! bl->biv->maybe_multiple
5290 /* We don't handle reversed biv's because bl->biv->insn
5291 does not have a valid INSN_LUID. */
5292 && ! bl->reversed
5293 && v->always_executed && ! v->maybe_multiple
5294 && INSN_UID (v->insn) < max_uid_for_loop
5295 && !loop->top)
5296 {
5297 /* If other giv's have been combined with this one, then
5298 this will work only if all uses of the other giv's occur
5299 before this giv's insn. This is difficult to check.
5300
5301 We simplify this by looking for the common case where
5302 there is one DEST_REG giv, and this giv's insn is the
5303 last use of the dest_reg of that DEST_REG giv. If the
5304 increment occurs after the address giv, then we can
5305 perform the optimization. (Otherwise, the increment
5306 would have to go before other_giv, and we would not be
5307 able to combine it with the address giv to get an
5308 auto-inc address.) */
5309 if (v->combined_with)
5310 {
5311 struct induction *other_giv = 0;
5312
5313 for (tv = bl->giv; tv; tv = tv->next_iv)
5314 if (tv->same == v)
5315 {
5316 if (other_giv)
5317 break;
5318 else
5319 other_giv = tv;
5320 }
5321 if (! tv && other_giv
5322 && REGNO (other_giv->dest_reg) < max_reg_before_loop
5323 && (REGNO_LAST_UID (REGNO (other_giv->dest_reg))
5324 == INSN_UID (v->insn))
5325 && INSN_LUID (v->insn) < INSN_LUID (bl->biv->insn))
5326 auto_inc_opt = 1;
5327 }
5328 /* Check for case where increment is before the address
5329 giv. Do this test in "loop order". */
5330 else if ((INSN_LUID (v->insn) > INSN_LUID (bl->biv->insn)
5331 && (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
5332 || (INSN_LUID (bl->biv->insn)
5333 > INSN_LUID (loop->scan_start))))
5334 || (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
5335 && (INSN_LUID (loop->scan_start)
5336 < INSN_LUID (bl->biv->insn))))
5337 auto_inc_opt = -1;
5338 else
5339 auto_inc_opt = 1;
5340
5341 #ifdef HAVE_cc0
5342 {
5343 rtx prev;
5344
5345 /* We can't put an insn immediately after one setting
5346 cc0, or immediately before one using cc0. */
5347 if ((auto_inc_opt == 1 && sets_cc0_p (PATTERN (v->insn)))
5348 || (auto_inc_opt == -1
5349 && (prev = prev_nonnote_insn (v->insn)) != 0
5350 && INSN_P (prev)
5351 && sets_cc0_p (PATTERN (prev))))
5352 auto_inc_opt = 0;
5353 }
5354 #endif
5355
5356 if (auto_inc_opt)
5357 v->auto_inc_opt = 1;
5358 }
5359 #endif
5360
5361 /* For each place where the biv is incremented, add an insn
5362 to increment the new, reduced reg for the giv. */
5363 for (tv = bl->biv; tv; tv = tv->next_iv)
5364 {
5365 rtx insert_before;
5366
5367 /* Skip if location is the same as a previous one. */
5368 if (tv->same)
5369 continue;
5370 if (! auto_inc_opt)
5371 insert_before = NEXT_INSN (tv->insn);
5372 else if (auto_inc_opt == 1)
5373 insert_before = NEXT_INSN (v->insn);
5374 else
5375 insert_before = v->insn;
5376
5377 if (tv->mult_val == const1_rtx)
5378 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
5379 v->new_reg, v->new_reg,
5380 0, insert_before);
5381 else /* tv->mult_val == const0_rtx */
5382 /* A multiply is acceptable here
5383 since this is presumed to be seldom executed. */
5384 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
5385 v->add_val, v->new_reg,
5386 0, insert_before);
5387 }
5388
5389 /* Add code at loop start to initialize giv's reduced reg. */
5390
5391 loop_iv_add_mult_hoist (loop,
5392 extend_value_for_giv (v, bl->initial_value),
5393 v->mult_val, v->add_val, v->new_reg);
5394 }
5395 }
5396 }
5397
5398
5399 /* Check for givs whose first use is their definition and whose
5400 last use is the definition of another giv. If so, it is likely
5401 dead and should not be used to derive another giv nor to
5402 eliminate a biv. */
5403
5404 static void
5405 loop_givs_dead_check (struct loop *loop ATTRIBUTE_UNUSED, struct iv_class *bl)
5406 {
5407 struct induction *v;
5408
5409 for (v = bl->giv; v; v = v->next_iv)
5410 {
5411 if (v->ignore
5412 || (v->same && v->same->ignore))
5413 continue;
5414
5415 if (v->giv_type == DEST_REG
5416 && REGNO_FIRST_UID (REGNO (v->dest_reg)) == INSN_UID (v->insn))
5417 {
5418 struct induction *v1;
5419
5420 for (v1 = bl->giv; v1; v1 = v1->next_iv)
5421 if (REGNO_LAST_UID (REGNO (v->dest_reg)) == INSN_UID (v1->insn))
5422 v->maybe_dead = 1;
5423 }
5424 }
5425 }
5426
5427
5428 static void
5429 loop_givs_rescan (struct loop *loop, struct iv_class *bl, rtx *reg_map)
5430 {
5431 struct induction *v;
5432
5433 for (v = bl->giv; v; v = v->next_iv)
5434 {
5435 if (v->same && v->same->ignore)
5436 v->ignore = 1;
5437
5438 if (v->ignore)
5439 continue;
5440
5441 /* Update expression if this was combined, in case other giv was
5442 replaced. */
5443 if (v->same)
5444 v->new_reg = replace_rtx (v->new_reg,
5445 v->same->dest_reg, v->same->new_reg);
5446
5447 /* See if this register is known to be a pointer to something. If
5448 so, see if we can find the alignment. First see if there is a
5449 destination register that is a pointer. If so, this shares the
5450 alignment too. Next see if we can deduce anything from the
5451 computational information. If not, and this is a DEST_ADDR
5452 giv, at least we know that it's a pointer, though we don't know
5453 the alignment. */
5454 if (REG_P (v->new_reg)
5455 && v->giv_type == DEST_REG
5456 && REG_POINTER (v->dest_reg))
5457 mark_reg_pointer (v->new_reg,
5458 REGNO_POINTER_ALIGN (REGNO (v->dest_reg)));
5459 else if (REG_P (v->new_reg)
5460 && REG_POINTER (v->src_reg))
5461 {
5462 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->src_reg));
5463
5464 if (align == 0
5465 || GET_CODE (v->add_val) != CONST_INT
5466 || INTVAL (v->add_val) % (align / BITS_PER_UNIT) != 0)
5467 align = 0;
5468
5469 mark_reg_pointer (v->new_reg, align);
5470 }
5471 else if (REG_P (v->new_reg)
5472 && REG_P (v->add_val)
5473 && REG_POINTER (v->add_val))
5474 {
5475 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->add_val));
5476
5477 if (align == 0 || GET_CODE (v->mult_val) != CONST_INT
5478 || INTVAL (v->mult_val) % (align / BITS_PER_UNIT) != 0)
5479 align = 0;
5480
5481 mark_reg_pointer (v->new_reg, align);
5482 }
5483 else if (REG_P (v->new_reg) && v->giv_type == DEST_ADDR)
5484 mark_reg_pointer (v->new_reg, 0);
5485
5486 if (v->giv_type == DEST_ADDR)
5487 {
5488 /* Store reduced reg as the address in the memref where we found
5489 this giv. */
5490 if (validate_change_maybe_volatile (v->insn, v->location,
5491 v->new_reg))
5492 /* Yay, it worked! */;
5493 /* Not replaceable; emit an insn to set the original
5494 giv reg from the reduced giv. */
5495 else if (REG_P (*v->location))
5496 loop_insn_emit_before (loop, 0, v->insn,
5497 gen_move_insn (*v->location,
5498 v->new_reg));
5499 else if (GET_CODE (*v->location) == PLUS
5500 && REG_P (XEXP (*v->location, 0))
5501 && CONSTANT_P (XEXP (*v->location, 1)))
5502 {
5503 rtx tem;
5504 start_sequence ();
5505 tem = expand_simple_binop (GET_MODE (*v->location), MINUS,
5506 v->new_reg, XEXP (*v->location, 1),
5507 NULL_RTX, 0, OPTAB_LIB_WIDEN);
5508 emit_move_insn (XEXP (*v->location, 0), tem);
5509 tem = get_insns ();
5510 end_sequence ();
5511 loop_insn_emit_before (loop, 0, v->insn, tem);
5512 }
5513 else
5514 {
5515 /* If it wasn't a reg, create a pseudo and use that. */
5516 rtx reg, seq;
5517 start_sequence ();
5518 reg = force_reg (v->mode, *v->location);
5519 if (validate_change_maybe_volatile (v->insn, v->location, reg))
5520 {
5521 seq = get_insns ();
5522 end_sequence ();
5523 loop_insn_emit_before (loop, 0, v->insn, seq);
5524 }
5525 else
5526 {
5527 end_sequence ();
5528 if (loop_dump_stream)
5529 fprintf (loop_dump_stream,
5530 "unable to reduce iv in insn %d\n",
5531 INSN_UID (v->insn));
5532 bl->all_reduced = 0;
5533 v->ignore = 1;
5534 continue;
5535 }
5536 }
5537 }
5538 else if (v->replaceable)
5539 {
5540 reg_map[REGNO (v->dest_reg)] = v->new_reg;
5541 }
5542 else
5543 {
5544 rtx original_insn = v->insn;
5545 rtx note;
5546
5547 /* Not replaceable; emit an insn to set the original giv reg from
5548 the reduced giv, same as above. */
5549 v->insn = loop_insn_emit_after (loop, 0, original_insn,
5550 gen_move_insn (v->dest_reg,
5551 v->new_reg));
5552
5553 /* The original insn may have a REG_EQUAL note. This note is
5554 now incorrect and may result in invalid substitutions later.
5555 The original insn is dead, but may be part of a libcall
5556 sequence, which doesn't seem worth the bother of handling. */
5557 note = find_reg_note (original_insn, REG_EQUAL, NULL_RTX);
5558 if (note)
5559 remove_note (original_insn, note);
5560 }
5561
5562 /* When a loop is reversed, givs which depend on the reversed
5563 biv, and which are live outside the loop, must be set to their
5564 correct final value. This insn is only needed if the giv is
5565 not replaceable. The correct final value is the same as the
5566 value that the giv starts the reversed loop with. */
5567 if (bl->reversed && ! v->replaceable)
5568 loop_iv_add_mult_sink (loop,
5569 extend_value_for_giv (v, bl->initial_value),
5570 v->mult_val, v->add_val, v->dest_reg);
5571 else if (v->final_value)
5572 loop_insn_sink_or_swim (loop,
5573 gen_load_of_final_value (v->dest_reg,
5574 v->final_value));
5575
5576 if (loop_dump_stream)
5577 {
5578 fprintf (loop_dump_stream, "giv at %d reduced to ",
5579 INSN_UID (v->insn));
5580 print_simple_rtl (loop_dump_stream, v->new_reg);
5581 fprintf (loop_dump_stream, "\n");
5582 }
5583 }
5584 }
5585
5586
5587 static int
5588 loop_giv_reduce_benefit (struct loop *loop ATTRIBUTE_UNUSED,
5589 struct iv_class *bl, struct induction *v,
5590 rtx test_reg)
5591 {
5592 int add_cost;
5593 int benefit;
5594
5595 benefit = v->benefit;
5596 PUT_MODE (test_reg, v->mode);
5597 add_cost = iv_add_mult_cost (bl->biv->add_val, v->mult_val,
5598 test_reg, test_reg);
5599
5600 /* Reduce benefit if not replaceable, since we will insert a
5601 move-insn to replace the insn that calculates this giv. Don't do
5602 this unless the giv is a user variable, since it will often be
5603 marked non-replaceable because of the duplication of the exit
5604 code outside the loop. In such a case, the copies we insert are
5605 dead and will be deleted. So they don't have a cost. Similar
5606 situations exist. */
5607 /* ??? The new final_[bg]iv_value code does a much better job of
5608 finding replaceable giv's, and hence this code may no longer be
5609 necessary. */
5610 if (! v->replaceable && ! bl->eliminable
5611 && REG_USERVAR_P (v->dest_reg))
5612 benefit -= copy_cost;
5613
5614 /* Decrease the benefit to count the add-insns that we will insert
5615 to increment the reduced reg for the giv. ??? This can
5616 overestimate the run-time cost of the additional insns, e.g. if
5617 there are multiple basic blocks that increment the biv, but only
5618 one of these blocks is executed during each iteration. There is
5619 no good way to detect cases like this with the current structure
5620 of the loop optimizer. This code is more accurate for
5621 determining code size than run-time benefits. */
5622 benefit -= add_cost * bl->biv_count;
5623
5624 /* Decide whether to strength-reduce this giv or to leave the code
5625 unchanged (recompute it from the biv each time it is used). This
5626 decision can be made independently for each giv. */
5627
5628 #ifdef AUTO_INC_DEC
5629 /* Attempt to guess whether autoincrement will handle some of the
5630 new add insns; if so, increase BENEFIT (undo the subtraction of
5631 add_cost that was done above). */
5632 if (v->giv_type == DEST_ADDR
5633 /* Increasing the benefit is risky, since this is only a guess.
5634 Avoid increasing register pressure in cases where there would
5635 be no other benefit from reducing this giv. */
5636 && benefit > 0
5637 && GET_CODE (v->mult_val) == CONST_INT)
5638 {
5639 int size = GET_MODE_SIZE (GET_MODE (v->mem));
5640
5641 if (HAVE_POST_INCREMENT
5642 && INTVAL (v->mult_val) == size)
5643 benefit += add_cost * bl->biv_count;
5644 else if (HAVE_PRE_INCREMENT
5645 && INTVAL (v->mult_val) == size)
5646 benefit += add_cost * bl->biv_count;
5647 else if (HAVE_POST_DECREMENT
5648 && -INTVAL (v->mult_val) == size)
5649 benefit += add_cost * bl->biv_count;
5650 else if (HAVE_PRE_DECREMENT
5651 && -INTVAL (v->mult_val) == size)
5652 benefit += add_cost * bl->biv_count;
5653 }
5654 #endif
5655
5656 return benefit;
5657 }
5658
5659
5660 /* Free IV structures for LOOP. */
5661
5662 static void
5663 loop_ivs_free (struct loop *loop)
5664 {
5665 struct loop_ivs *ivs = LOOP_IVS (loop);
5666 struct iv_class *iv = ivs->list;
5667
5668 free (ivs->regs);
5669
5670 while (iv)
5671 {
5672 struct iv_class *next = iv->next;
5673 struct induction *induction;
5674 struct induction *next_induction;
5675
5676 for (induction = iv->biv; induction; induction = next_induction)
5677 {
5678 next_induction = induction->next_iv;
5679 free (induction);
5680 }
5681 for (induction = iv->giv; induction; induction = next_induction)
5682 {
5683 next_induction = induction->next_iv;
5684 free (induction);
5685 }
5686
5687 free (iv);
5688 iv = next;
5689 }
5690 }
5691
5692 /* Look back before LOOP->START for the insn that sets REG and return
5693 the equivalent constant if there is a REG_EQUAL note otherwise just
5694 the SET_SRC of REG. */
5695
5696 static rtx
5697 loop_find_equiv_value (const struct loop *loop, rtx reg)
5698 {
5699 rtx loop_start = loop->start;
5700 rtx insn, set;
5701 rtx ret;
5702
5703 ret = reg;
5704 for (insn = PREV_INSN (loop_start); insn; insn = PREV_INSN (insn))
5705 {
5706 if (LABEL_P (insn))
5707 break;
5708
5709 else if (INSN_P (insn) && reg_set_p (reg, insn))
5710 {
5711 /* We found the last insn before the loop that sets the register.
5712 If it sets the entire register, and has a REG_EQUAL note,
5713 then use the value of the REG_EQUAL note. */
5714 if ((set = single_set (insn))
5715 && (SET_DEST (set) == reg))
5716 {
5717 rtx note = find_reg_note (insn, REG_EQUAL, NULL_RTX);
5718
5719 /* Only use the REG_EQUAL note if it is a constant.
5720 Other things, divide in particular, will cause
5721 problems later if we use them. */
5722 if (note && GET_CODE (XEXP (note, 0)) != EXPR_LIST
5723 && CONSTANT_P (XEXP (note, 0)))
5724 ret = XEXP (note, 0);
5725 else
5726 ret = SET_SRC (set);
5727
5728 /* We cannot do this if it changes between the
5729 assignment and loop start though. */
5730 if (modified_between_p (ret, insn, loop_start))
5731 ret = reg;
5732 }
5733 break;
5734 }
5735 }
5736 return ret;
5737 }
5738
5739 /* Find and return register term common to both expressions OP0 and
5740 OP1 or NULL_RTX if no such term exists. Each expression must be a
5741 REG or a PLUS of a REG. */
5742
5743 static rtx
5744 find_common_reg_term (rtx op0, rtx op1)
5745 {
5746 if ((REG_P (op0) || GET_CODE (op0) == PLUS)
5747 && (REG_P (op1) || GET_CODE (op1) == PLUS))
5748 {
5749 rtx op00;
5750 rtx op01;
5751 rtx op10;
5752 rtx op11;
5753
5754 if (GET_CODE (op0) == PLUS)
5755 op01 = XEXP (op0, 1), op00 = XEXP (op0, 0);
5756 else
5757 op01 = const0_rtx, op00 = op0;
5758
5759 if (GET_CODE (op1) == PLUS)
5760 op11 = XEXP (op1, 1), op10 = XEXP (op1, 0);
5761 else
5762 op11 = const0_rtx, op10 = op1;
5763
5764 /* Find and return common register term if present. */
5765 if (REG_P (op00) && (op00 == op10 || op00 == op11))
5766 return op00;
5767 else if (REG_P (op01) && (op01 == op10 || op01 == op11))
5768 return op01;
5769 }
5770
5771 /* No common register term found. */
5772 return NULL_RTX;
5773 }
5774
5775 /* Determine the loop iterator and calculate the number of loop
5776 iterations. Returns the exact number of loop iterations if it can
5777 be calculated, otherwise returns zero. */
5778
5779 static unsigned HOST_WIDE_INT
5780 loop_iterations (struct loop *loop)
5781 {
5782 struct loop_info *loop_info = LOOP_INFO (loop);
5783 struct loop_ivs *ivs = LOOP_IVS (loop);
5784 rtx comparison, comparison_value;
5785 rtx iteration_var, initial_value, increment, final_value;
5786 enum rtx_code comparison_code;
5787 HOST_WIDE_INT inc;
5788 unsigned HOST_WIDE_INT abs_inc;
5789 unsigned HOST_WIDE_INT abs_diff;
5790 int off_by_one;
5791 int increment_dir;
5792 int unsigned_p, compare_dir, final_larger;
5793 rtx last_loop_insn;
5794 struct iv_class *bl;
5795
5796 loop_info->n_iterations = 0;
5797 loop_info->initial_value = 0;
5798 loop_info->initial_equiv_value = 0;
5799 loop_info->comparison_value = 0;
5800 loop_info->final_value = 0;
5801 loop_info->final_equiv_value = 0;
5802 loop_info->increment = 0;
5803 loop_info->iteration_var = 0;
5804 loop_info->iv = 0;
5805
5806 /* We used to use prev_nonnote_insn here, but that fails because it might
5807 accidentally get the branch for a contained loop if the branch for this
5808 loop was deleted. We can only trust branches immediately before the
5809 loop_end. */
5810 last_loop_insn = PREV_INSN (loop->end);
5811
5812 /* ??? We should probably try harder to find the jump insn
5813 at the end of the loop. The following code assumes that
5814 the last loop insn is a jump to the top of the loop. */
5815 if (!JUMP_P (last_loop_insn))
5816 {
5817 if (loop_dump_stream)
5818 fprintf (loop_dump_stream,
5819 "Loop iterations: No final conditional branch found.\n");
5820 return 0;
5821 }
5822
5823 /* If there is a more than a single jump to the top of the loop
5824 we cannot (easily) determine the iteration count. */
5825 if (LABEL_NUSES (JUMP_LABEL (last_loop_insn)) > 1)
5826 {
5827 if (loop_dump_stream)
5828 fprintf (loop_dump_stream,
5829 "Loop iterations: Loop has multiple back edges.\n");
5830 return 0;
5831 }
5832
5833 /* Find the iteration variable. If the last insn is a conditional
5834 branch, and the insn before tests a register value, make that the
5835 iteration variable. */
5836
5837 comparison = get_condition_for_loop (loop, last_loop_insn);
5838 if (comparison == 0)
5839 {
5840 if (loop_dump_stream)
5841 fprintf (loop_dump_stream,
5842 "Loop iterations: No final comparison found.\n");
5843 return 0;
5844 }
5845
5846 /* ??? Get_condition may switch position of induction variable and
5847 invariant register when it canonicalizes the comparison. */
5848
5849 comparison_code = GET_CODE (comparison);
5850 iteration_var = XEXP (comparison, 0);
5851 comparison_value = XEXP (comparison, 1);
5852
5853 if (!REG_P (iteration_var))
5854 {
5855 if (loop_dump_stream)
5856 fprintf (loop_dump_stream,
5857 "Loop iterations: Comparison not against register.\n");
5858 return 0;
5859 }
5860
5861 /* The only new registers that are created before loop iterations
5862 are givs made from biv increments or registers created by
5863 load_mems. In the latter case, it is possible that try_copy_prop
5864 will propagate a new pseudo into the old iteration register but
5865 this will be marked by having the REG_USERVAR_P bit set. */
5866
5867 gcc_assert ((unsigned) REGNO (iteration_var) < ivs->n_regs
5868 || REG_USERVAR_P (iteration_var));
5869
5870 /* Determine the initial value of the iteration variable, and the amount
5871 that it is incremented each loop. Use the tables constructed by
5872 the strength reduction pass to calculate these values. */
5873
5874 /* Clear the result values, in case no answer can be found. */
5875 initial_value = 0;
5876 increment = 0;
5877
5878 /* The iteration variable can be either a giv or a biv. Check to see
5879 which it is, and compute the variable's initial value, and increment
5880 value if possible. */
5881
5882 /* If this is a new register, can't handle it since we don't have any
5883 reg_iv_type entry for it. */
5884 if ((unsigned) REGNO (iteration_var) >= ivs->n_regs)
5885 {
5886 if (loop_dump_stream)
5887 fprintf (loop_dump_stream,
5888 "Loop iterations: No reg_iv_type entry for iteration var.\n");
5889 return 0;
5890 }
5891
5892 /* Reject iteration variables larger than the host wide int size, since they
5893 could result in a number of iterations greater than the range of our
5894 `unsigned HOST_WIDE_INT' variable loop_info->n_iterations. */
5895 else if ((GET_MODE_BITSIZE (GET_MODE (iteration_var))
5896 > HOST_BITS_PER_WIDE_INT))
5897 {
5898 if (loop_dump_stream)
5899 fprintf (loop_dump_stream,
5900 "Loop iterations: Iteration var rejected because mode too large.\n");
5901 return 0;
5902 }
5903 else if (GET_MODE_CLASS (GET_MODE (iteration_var)) != MODE_INT)
5904 {
5905 if (loop_dump_stream)
5906 fprintf (loop_dump_stream,
5907 "Loop iterations: Iteration var not an integer.\n");
5908 return 0;
5909 }
5910
5911 /* Try swapping the comparison to identify a suitable iv. */
5912 if (REG_IV_TYPE (ivs, REGNO (iteration_var)) != BASIC_INDUCT
5913 && REG_IV_TYPE (ivs, REGNO (iteration_var)) != GENERAL_INDUCT
5914 && REG_P (comparison_value)
5915 && REGNO (comparison_value) < ivs->n_regs)
5916 {
5917 rtx temp = comparison_value;
5918 comparison_code = swap_condition (comparison_code);
5919 comparison_value = iteration_var;
5920 iteration_var = temp;
5921 }
5922
5923 if (REG_IV_TYPE (ivs, REGNO (iteration_var)) == BASIC_INDUCT)
5924 {
5925 gcc_assert (REGNO (iteration_var) < ivs->n_regs);
5926
5927 /* Grab initial value, only useful if it is a constant. */
5928 bl = REG_IV_CLASS (ivs, REGNO (iteration_var));
5929 initial_value = bl->initial_value;
5930 if (!bl->biv->always_executed || bl->biv->maybe_multiple)
5931 {
5932 if (loop_dump_stream)
5933 fprintf (loop_dump_stream,
5934 "Loop iterations: Basic induction var not set once in each iteration.\n");
5935 return 0;
5936 }
5937
5938 increment = biv_total_increment (bl);
5939 }
5940 else if (REG_IV_TYPE (ivs, REGNO (iteration_var)) == GENERAL_INDUCT)
5941 {
5942 HOST_WIDE_INT offset = 0;
5943 struct induction *v = REG_IV_INFO (ivs, REGNO (iteration_var));
5944 rtx biv_initial_value;
5945
5946 gcc_assert (REGNO (v->src_reg) < ivs->n_regs);
5947
5948 if (!v->always_executed || v->maybe_multiple)
5949 {
5950 if (loop_dump_stream)
5951 fprintf (loop_dump_stream,
5952 "Loop iterations: General induction var not set once in each iteration.\n");
5953 return 0;
5954 }
5955
5956 bl = REG_IV_CLASS (ivs, REGNO (v->src_reg));
5957
5958 /* Increment value is mult_val times the increment value of the biv. */
5959
5960 increment = biv_total_increment (bl);
5961 if (increment)
5962 {
5963 struct induction *biv_inc;
5964
5965 increment = fold_rtx_mult_add (v->mult_val,
5966 extend_value_for_giv (v, increment),
5967 const0_rtx, v->mode);
5968 /* The caller assumes that one full increment has occurred at the
5969 first loop test. But that's not true when the biv is incremented
5970 after the giv is set (which is the usual case), e.g.:
5971 i = 6; do {;} while (i++ < 9) .
5972 Therefore, we bias the initial value by subtracting the amount of
5973 the increment that occurs between the giv set and the giv test. */
5974 for (biv_inc = bl->biv; biv_inc; biv_inc = biv_inc->next_iv)
5975 {
5976 if (loop_insn_first_p (v->insn, biv_inc->insn))
5977 {
5978 if (REG_P (biv_inc->add_val))
5979 {
5980 if (loop_dump_stream)
5981 fprintf (loop_dump_stream,
5982 "Loop iterations: Basic induction var add_val is REG %d.\n",
5983 REGNO (biv_inc->add_val));
5984 return 0;
5985 }
5986
5987 /* If we have already counted it, skip it. */
5988 if (biv_inc->same)
5989 continue;
5990
5991 offset -= INTVAL (biv_inc->add_val);
5992 }
5993 }
5994 }
5995 if (loop_dump_stream)
5996 fprintf (loop_dump_stream,
5997 "Loop iterations: Giv iterator, initial value bias %ld.\n",
5998 (long) offset);
5999
6000 /* Initial value is mult_val times the biv's initial value plus
6001 add_val. Only useful if it is a constant. */
6002 biv_initial_value = extend_value_for_giv (v, bl->initial_value);
6003 initial_value
6004 = fold_rtx_mult_add (v->mult_val,
6005 plus_constant (biv_initial_value, offset),
6006 v->add_val, v->mode);
6007 }
6008 else
6009 {
6010 if (loop_dump_stream)
6011 fprintf (loop_dump_stream,
6012 "Loop iterations: Not basic or general induction var.\n");
6013 return 0;
6014 }
6015
6016 if (initial_value == 0)
6017 return 0;
6018
6019 unsigned_p = 0;
6020 off_by_one = 0;
6021 switch (comparison_code)
6022 {
6023 case LEU:
6024 unsigned_p = 1;
6025 case LE:
6026 compare_dir = 1;
6027 off_by_one = 1;
6028 break;
6029 case GEU:
6030 unsigned_p = 1;
6031 case GE:
6032 compare_dir = -1;
6033 off_by_one = -1;
6034 break;
6035 case EQ:
6036 /* Cannot determine loop iterations with this case. */
6037 compare_dir = 0;
6038 break;
6039 case LTU:
6040 unsigned_p = 1;
6041 case LT:
6042 compare_dir = 1;
6043 break;
6044 case GTU:
6045 unsigned_p = 1;
6046 case GT:
6047 compare_dir = -1;
6048 break;
6049 case NE:
6050 compare_dir = 0;
6051 break;
6052 default:
6053 gcc_unreachable ();
6054 }
6055
6056 /* If the comparison value is an invariant register, then try to find
6057 its value from the insns before the start of the loop. */
6058
6059 final_value = comparison_value;
6060 if (REG_P (comparison_value)
6061 && loop_invariant_p (loop, comparison_value))
6062 {
6063 final_value = loop_find_equiv_value (loop, comparison_value);
6064
6065 /* If we don't get an invariant final value, we are better
6066 off with the original register. */
6067 if (! loop_invariant_p (loop, final_value))
6068 final_value = comparison_value;
6069 }
6070
6071 /* Calculate the approximate final value of the induction variable
6072 (on the last successful iteration). The exact final value
6073 depends on the branch operator, and increment sign. It will be
6074 wrong if the iteration variable is not incremented by one each
6075 time through the loop and (comparison_value + off_by_one -
6076 initial_value) % increment != 0.
6077 ??? Note that the final_value may overflow and thus final_larger
6078 will be bogus. A potentially infinite loop will be classified
6079 as immediate, e.g. for (i = 0x7ffffff0; i <= 0x7fffffff; i++) */
6080 if (off_by_one)
6081 final_value = plus_constant (final_value, off_by_one);
6082
6083 /* Save the calculated values describing this loop's bounds, in case
6084 precondition_loop_p will need them later. These values can not be
6085 recalculated inside precondition_loop_p because strength reduction
6086 optimizations may obscure the loop's structure.
6087
6088 These values are only required by precondition_loop_p and insert_bct
6089 whenever the number of iterations cannot be computed at compile time.
6090 Only the difference between final_value and initial_value is
6091 important. Note that final_value is only approximate. */
6092 loop_info->initial_value = initial_value;
6093 loop_info->comparison_value = comparison_value;
6094 loop_info->final_value = plus_constant (comparison_value, off_by_one);
6095 loop_info->increment = increment;
6096 loop_info->iteration_var = iteration_var;
6097 loop_info->comparison_code = comparison_code;
6098 loop_info->iv = bl;
6099
6100 /* Try to determine the iteration count for loops such
6101 as (for i = init; i < init + const; i++). When running the
6102 loop optimization twice, the first pass often converts simple
6103 loops into this form. */
6104
6105 if (REG_P (initial_value))
6106 {
6107 rtx reg1;
6108 rtx reg2;
6109 rtx const2;
6110
6111 reg1 = initial_value;
6112 if (GET_CODE (final_value) == PLUS)
6113 reg2 = XEXP (final_value, 0), const2 = XEXP (final_value, 1);
6114 else
6115 reg2 = final_value, const2 = const0_rtx;
6116
6117 /* Check for initial_value = reg1, final_value = reg2 + const2,
6118 where reg1 != reg2. */
6119 if (REG_P (reg2) && reg2 != reg1)
6120 {
6121 rtx temp;
6122
6123 /* Find what reg1 is equivalent to. Hopefully it will
6124 either be reg2 or reg2 plus a constant. */
6125 temp = loop_find_equiv_value (loop, reg1);
6126
6127 if (find_common_reg_term (temp, reg2))
6128 initial_value = temp;
6129 else if (loop_invariant_p (loop, reg2))
6130 {
6131 /* Find what reg2 is equivalent to. Hopefully it will
6132 either be reg1 or reg1 plus a constant. Let's ignore
6133 the latter case for now since it is not so common. */
6134 temp = loop_find_equiv_value (loop, reg2);
6135
6136 if (temp == loop_info->iteration_var)
6137 temp = initial_value;
6138 if (temp == reg1)
6139 final_value = (const2 == const0_rtx)
6140 ? reg1 : gen_rtx_PLUS (GET_MODE (reg1), reg1, const2);
6141 }
6142 }
6143 }
6144
6145 loop_info->initial_equiv_value = initial_value;
6146 loop_info->final_equiv_value = final_value;
6147
6148 /* For EQ comparison loops, we don't have a valid final value.
6149 Check this now so that we won't leave an invalid value if we
6150 return early for any other reason. */
6151 if (comparison_code == EQ)
6152 loop_info->final_equiv_value = loop_info->final_value = 0;
6153
6154 if (increment == 0)
6155 {
6156 if (loop_dump_stream)
6157 fprintf (loop_dump_stream,
6158 "Loop iterations: Increment value can't be calculated.\n");
6159 return 0;
6160 }
6161
6162 if (GET_CODE (increment) != CONST_INT)
6163 {
6164 /* If we have a REG, check to see if REG holds a constant value. */
6165 /* ??? Other RTL, such as (neg (reg)) is possible here, but it isn't
6166 clear if it is worthwhile to try to handle such RTL. */
6167 if (REG_P (increment) || GET_CODE (increment) == SUBREG)
6168 increment = loop_find_equiv_value (loop, increment);
6169
6170 if (GET_CODE (increment) != CONST_INT)
6171 {
6172 if (loop_dump_stream)
6173 {
6174 fprintf (loop_dump_stream,
6175 "Loop iterations: Increment value not constant ");
6176 print_simple_rtl (loop_dump_stream, increment);
6177 fprintf (loop_dump_stream, ".\n");
6178 }
6179 return 0;
6180 }
6181 loop_info->increment = increment;
6182 }
6183
6184 if (GET_CODE (initial_value) != CONST_INT)
6185 {
6186 if (loop_dump_stream)
6187 {
6188 fprintf (loop_dump_stream,
6189 "Loop iterations: Initial value not constant ");
6190 print_simple_rtl (loop_dump_stream, initial_value);
6191 fprintf (loop_dump_stream, ".\n");
6192 }
6193 return 0;
6194 }
6195 else if (GET_CODE (final_value) != CONST_INT)
6196 {
6197 if (loop_dump_stream)
6198 {
6199 fprintf (loop_dump_stream,
6200 "Loop iterations: Final value not constant ");
6201 print_simple_rtl (loop_dump_stream, final_value);
6202 fprintf (loop_dump_stream, ".\n");
6203 }
6204 return 0;
6205 }
6206 else if (comparison_code == EQ)
6207 {
6208 rtx inc_once;
6209
6210 if (loop_dump_stream)
6211 fprintf (loop_dump_stream, "Loop iterations: EQ comparison loop.\n");
6212
6213 inc_once = gen_int_mode (INTVAL (initial_value) + INTVAL (increment),
6214 GET_MODE (iteration_var));
6215
6216 if (inc_once == final_value)
6217 {
6218 /* The iterator value once through the loop is equal to the
6219 comparison value. Either we have an infinite loop, or
6220 we'll loop twice. */
6221 if (increment == const0_rtx)
6222 return 0;
6223 loop_info->n_iterations = 2;
6224 }
6225 else
6226 loop_info->n_iterations = 1;
6227
6228 if (GET_CODE (loop_info->initial_value) == CONST_INT)
6229 loop_info->final_value
6230 = gen_int_mode ((INTVAL (loop_info->initial_value)
6231 + loop_info->n_iterations * INTVAL (increment)),
6232 GET_MODE (iteration_var));
6233 else
6234 loop_info->final_value
6235 = plus_constant (loop_info->initial_value,
6236 loop_info->n_iterations * INTVAL (increment));
6237 loop_info->final_equiv_value
6238 = gen_int_mode ((INTVAL (initial_value)
6239 + loop_info->n_iterations * INTVAL (increment)),
6240 GET_MODE (iteration_var));
6241 return loop_info->n_iterations;
6242 }
6243
6244 /* Final_larger is 1 if final larger, 0 if they are equal, otherwise -1. */
6245 if (unsigned_p)
6246 final_larger
6247 = ((unsigned HOST_WIDE_INT) INTVAL (final_value)
6248 > (unsigned HOST_WIDE_INT) INTVAL (initial_value))
6249 - ((unsigned HOST_WIDE_INT) INTVAL (final_value)
6250 < (unsigned HOST_WIDE_INT) INTVAL (initial_value));
6251 else
6252 final_larger = (INTVAL (final_value) > INTVAL (initial_value))
6253 - (INTVAL (final_value) < INTVAL (initial_value));
6254
6255 if (INTVAL (increment) > 0)
6256 increment_dir = 1;
6257 else if (INTVAL (increment) == 0)
6258 increment_dir = 0;
6259 else
6260 increment_dir = -1;
6261
6262 /* There are 27 different cases: compare_dir = -1, 0, 1;
6263 final_larger = -1, 0, 1; increment_dir = -1, 0, 1.
6264 There are 4 normal cases, 4 reverse cases (where the iteration variable
6265 will overflow before the loop exits), 4 infinite loop cases, and 15
6266 immediate exit (0 or 1 iteration depending on loop type) cases.
6267 Only try to optimize the normal cases. */
6268
6269 /* (compare_dir/final_larger/increment_dir)
6270 Normal cases: (0/-1/-1), (0/1/1), (-1/-1/-1), (1/1/1)
6271 Reverse cases: (0/-1/1), (0/1/-1), (-1/-1/1), (1/1/-1)
6272 Infinite loops: (0/-1/0), (0/1/0), (-1/-1/0), (1/1/0)
6273 Immediate exit: (0/0/X), (-1/0/X), (-1/1/X), (1/0/X), (1/-1/X) */
6274
6275 /* ?? If the meaning of reverse loops (where the iteration variable
6276 will overflow before the loop exits) is undefined, then could
6277 eliminate all of these special checks, and just always assume
6278 the loops are normal/immediate/infinite. Note that this means
6279 the sign of increment_dir does not have to be known. Also,
6280 since it does not really hurt if immediate exit loops or infinite loops
6281 are optimized, then that case could be ignored also, and hence all
6282 loops can be optimized.
6283
6284 According to ANSI Spec, the reverse loop case result is undefined,
6285 because the action on overflow is undefined.
6286
6287 See also the special test for NE loops below. */
6288
6289 if (final_larger == increment_dir && final_larger != 0
6290 && (final_larger == compare_dir || compare_dir == 0))
6291 /* Normal case. */
6292 ;
6293 else
6294 {
6295 if (loop_dump_stream)
6296 fprintf (loop_dump_stream, "Loop iterations: Not normal loop.\n");
6297 return 0;
6298 }
6299
6300 /* Calculate the number of iterations, final_value is only an approximation,
6301 so correct for that. Note that abs_diff and n_iterations are
6302 unsigned, because they can be as large as 2^n - 1. */
6303
6304 inc = INTVAL (increment);
6305 gcc_assert (inc);
6306 if (inc > 0)
6307 {
6308 abs_diff = INTVAL (final_value) - INTVAL (initial_value);
6309 abs_inc = inc;
6310 }
6311 else
6312 {
6313 abs_diff = INTVAL (initial_value) - INTVAL (final_value);
6314 abs_inc = -inc;
6315 }
6316
6317 /* Given that iteration_var is going to iterate over its own mode,
6318 not HOST_WIDE_INT, disregard higher bits that might have come
6319 into the picture due to sign extension of initial and final
6320 values. */
6321 abs_diff &= ((unsigned HOST_WIDE_INT) 1
6322 << (GET_MODE_BITSIZE (GET_MODE (iteration_var)) - 1)
6323 << 1) - 1;
6324
6325 /* For NE tests, make sure that the iteration variable won't miss
6326 the final value. If abs_diff mod abs_incr is not zero, then the
6327 iteration variable will overflow before the loop exits, and we
6328 can not calculate the number of iterations. */
6329 if (compare_dir == 0 && (abs_diff % abs_inc) != 0)
6330 return 0;
6331
6332 /* Note that the number of iterations could be calculated using
6333 (abs_diff + abs_inc - 1) / abs_inc, provided care was taken to
6334 handle potential overflow of the summation. */
6335 loop_info->n_iterations = abs_diff / abs_inc + ((abs_diff % abs_inc) != 0);
6336 return loop_info->n_iterations;
6337 }
6338
6339 /* Perform strength reduction and induction variable elimination.
6340
6341 Pseudo registers created during this function will be beyond the
6342 last valid index in several tables including
6343 REGS->ARRAY[I].N_TIMES_SET and REGNO_LAST_UID. This does not cause a
6344 problem here, because the added registers cannot be givs outside of
6345 their loop, and hence will never be reconsidered. But scan_loop
6346 must check regnos to make sure they are in bounds. */
6347
6348 static void
6349 strength_reduce (struct loop *loop, int flags)
6350 {
6351 struct loop_info *loop_info = LOOP_INFO (loop);
6352 struct loop_regs *regs = LOOP_REGS (loop);
6353 struct loop_ivs *ivs = LOOP_IVS (loop);
6354 rtx p;
6355 /* Temporary list pointer for traversing ivs->list. */
6356 struct iv_class *bl;
6357 /* Ratio of extra register life span we can justify
6358 for saving an instruction. More if loop doesn't call subroutines
6359 since in that case saving an insn makes more difference
6360 and more registers are available. */
6361 /* ??? could set this to last value of threshold in move_movables */
6362 int threshold = (loop_info->has_call ? 1 : 2) * (3 + n_non_fixed_regs);
6363 /* Map of pseudo-register replacements. */
6364 rtx *reg_map = NULL;
6365 int reg_map_size;
6366 rtx test_reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
6367 int insn_count = count_insns_in_loop (loop);
6368
6369 addr_placeholder = gen_reg_rtx (Pmode);
6370
6371 ivs->n_regs = max_reg_before_loop;
6372 ivs->regs = xcalloc (ivs->n_regs, sizeof (struct iv));
6373
6374 /* Find all BIVs in loop. */
6375 loop_bivs_find (loop);
6376
6377 /* Exit if there are no bivs. */
6378 if (! ivs->list)
6379 {
6380 loop_ivs_free (loop);
6381 return;
6382 }
6383
6384 /* Determine how BIVS are initialized by looking through pre-header
6385 extended basic block. */
6386 loop_bivs_init_find (loop);
6387
6388 /* Look at the each biv and see if we can say anything better about its
6389 initial value from any initializing insns set up above. */
6390 loop_bivs_check (loop);
6391
6392 /* Search the loop for general induction variables. */
6393 loop_givs_find (loop);
6394
6395 /* Try to calculate and save the number of loop iterations. This is
6396 set to zero if the actual number can not be calculated. This must
6397 be called after all giv's have been identified, since otherwise it may
6398 fail if the iteration variable is a giv. */
6399 loop_iterations (loop);
6400
6401 #ifdef HAVE_prefetch
6402 if (flags & LOOP_PREFETCH)
6403 emit_prefetch_instructions (loop);
6404 #endif
6405
6406 /* Now for each giv for which we still don't know whether or not it is
6407 replaceable, check to see if it is replaceable because its final value
6408 can be calculated. This must be done after loop_iterations is called,
6409 so that final_giv_value will work correctly. */
6410 loop_givs_check (loop);
6411
6412 /* Try to prove that the loop counter variable (if any) is always
6413 nonnegative; if so, record that fact with a REG_NONNEG note
6414 so that "decrement and branch until zero" insn can be used. */
6415 check_dbra_loop (loop, insn_count);
6416
6417 /* Create reg_map to hold substitutions for replaceable giv regs.
6418 Some givs might have been made from biv increments, so look at
6419 ivs->reg_iv_type for a suitable size. */
6420 reg_map_size = ivs->n_regs;
6421 reg_map = xcalloc (reg_map_size, sizeof (rtx));
6422
6423 /* Examine each iv class for feasibility of strength reduction/induction
6424 variable elimination. */
6425
6426 for (bl = ivs->list; bl; bl = bl->next)
6427 {
6428 struct induction *v;
6429 int benefit;
6430
6431 /* Test whether it will be possible to eliminate this biv
6432 provided all givs are reduced. */
6433 bl->eliminable = loop_biv_eliminable_p (loop, bl, threshold, insn_count);
6434
6435 /* This will be true at the end, if all givs which depend on this
6436 biv have been strength reduced.
6437 We can't (currently) eliminate the biv unless this is so. */
6438 bl->all_reduced = 1;
6439
6440 /* Check each extension dependent giv in this class to see if its
6441 root biv is safe from wrapping in the interior mode. */
6442 check_ext_dependent_givs (loop, bl);
6443
6444 /* Combine all giv's for this iv_class. */
6445 combine_givs (regs, bl);
6446
6447 for (v = bl->giv; v; v = v->next_iv)
6448 {
6449 struct induction *tv;
6450
6451 if (v->ignore || v->same)
6452 continue;
6453
6454 benefit = loop_giv_reduce_benefit (loop, bl, v, test_reg);
6455
6456 /* If an insn is not to be strength reduced, then set its ignore
6457 flag, and clear bl->all_reduced. */
6458
6459 /* A giv that depends on a reversed biv must be reduced if it is
6460 used after the loop exit, otherwise, it would have the wrong
6461 value after the loop exit. To make it simple, just reduce all
6462 of such giv's whether or not we know they are used after the loop
6463 exit. */
6464
6465 if (v->lifetime * threshold * benefit < insn_count
6466 && ! bl->reversed)
6467 {
6468 if (loop_dump_stream)
6469 fprintf (loop_dump_stream,
6470 "giv of insn %d not worth while, %d vs %d.\n",
6471 INSN_UID (v->insn),
6472 v->lifetime * threshold * benefit, insn_count);
6473 v->ignore = 1;
6474 bl->all_reduced = 0;
6475 }
6476 else
6477 {
6478 /* Check that we can increment the reduced giv without a
6479 multiply insn. If not, reject it. */
6480
6481 for (tv = bl->biv; tv; tv = tv->next_iv)
6482 if (tv->mult_val == const1_rtx
6483 && ! product_cheap_p (tv->add_val, v->mult_val))
6484 {
6485 if (loop_dump_stream)
6486 fprintf (loop_dump_stream,
6487 "giv of insn %d: would need a multiply.\n",
6488 INSN_UID (v->insn));
6489 v->ignore = 1;
6490 bl->all_reduced = 0;
6491 break;
6492 }
6493 }
6494 }
6495
6496 /* Check for givs whose first use is their definition and whose
6497 last use is the definition of another giv. If so, it is likely
6498 dead and should not be used to derive another giv nor to
6499 eliminate a biv. */
6500 loop_givs_dead_check (loop, bl);
6501
6502 /* Reduce each giv that we decided to reduce. */
6503 loop_givs_reduce (loop, bl);
6504
6505 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
6506 as not reduced.
6507
6508 For each giv register that can be reduced now: if replaceable,
6509 substitute reduced reg wherever the old giv occurs;
6510 else add new move insn "giv_reg = reduced_reg". */
6511 loop_givs_rescan (loop, bl, reg_map);
6512
6513 /* All the givs based on the biv bl have been reduced if they
6514 merit it. */
6515
6516 /* For each giv not marked as maybe dead that has been combined with a
6517 second giv, clear any "maybe dead" mark on that second giv.
6518 v->new_reg will either be or refer to the register of the giv it
6519 combined with.
6520
6521 Doing this clearing avoids problems in biv elimination where
6522 a giv's new_reg is a complex value that can't be put in the
6523 insn but the giv combined with (with a reg as new_reg) is
6524 marked maybe_dead. Since the register will be used in either
6525 case, we'd prefer it be used from the simpler giv. */
6526
6527 for (v = bl->giv; v; v = v->next_iv)
6528 if (! v->maybe_dead && v->same)
6529 v->same->maybe_dead = 0;
6530
6531 /* Try to eliminate the biv, if it is a candidate.
6532 This won't work if ! bl->all_reduced,
6533 since the givs we planned to use might not have been reduced.
6534
6535 We have to be careful that we didn't initially think we could
6536 eliminate this biv because of a giv that we now think may be
6537 dead and shouldn't be used as a biv replacement.
6538
6539 Also, there is the possibility that we may have a giv that looks
6540 like it can be used to eliminate a biv, but the resulting insn
6541 isn't valid. This can happen, for example, on the 88k, where a
6542 JUMP_INSN can compare a register only with zero. Attempts to
6543 replace it with a compare with a constant will fail.
6544
6545 Note that in cases where this call fails, we may have replaced some
6546 of the occurrences of the biv with a giv, but no harm was done in
6547 doing so in the rare cases where it can occur. */
6548
6549 if (bl->all_reduced == 1 && bl->eliminable
6550 && maybe_eliminate_biv (loop, bl, 1, threshold, insn_count))
6551 {
6552 /* ?? If we created a new test to bypass the loop entirely,
6553 or otherwise drop straight in, based on this test, then
6554 we might want to rewrite it also. This way some later
6555 pass has more hope of removing the initialization of this
6556 biv entirely. */
6557
6558 /* If final_value != 0, then the biv may be used after loop end
6559 and we must emit an insn to set it just in case.
6560
6561 Reversed bivs already have an insn after the loop setting their
6562 value, so we don't need another one. We can't calculate the
6563 proper final value for such a biv here anyways. */
6564 if (bl->final_value && ! bl->reversed)
6565 loop_insn_sink_or_swim (loop,
6566 gen_load_of_final_value (bl->biv->dest_reg,
6567 bl->final_value));
6568
6569 if (loop_dump_stream)
6570 fprintf (loop_dump_stream, "Reg %d: biv eliminated\n",
6571 bl->regno);
6572 }
6573 /* See above note wrt final_value. But since we couldn't eliminate
6574 the biv, we must set the value after the loop instead of before. */
6575 else if (bl->final_value && ! bl->reversed)
6576 loop_insn_sink (loop, gen_load_of_final_value (bl->biv->dest_reg,
6577 bl->final_value));
6578 }
6579
6580 /* Go through all the instructions in the loop, making all the
6581 register substitutions scheduled in REG_MAP. */
6582
6583 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
6584 if (INSN_P (p))
6585 {
6586 replace_regs (PATTERN (p), reg_map, reg_map_size, 0);
6587 replace_regs (REG_NOTES (p), reg_map, reg_map_size, 0);
6588 INSN_CODE (p) = -1;
6589 }
6590
6591 if (loop_dump_stream)
6592 fprintf (loop_dump_stream, "\n");
6593
6594 loop_ivs_free (loop);
6595 if (reg_map)
6596 free (reg_map);
6597 }
6598 \f
6599 /*Record all basic induction variables calculated in the insn. */
6600 static rtx
6601 check_insn_for_bivs (struct loop *loop, rtx p, int not_every_iteration,
6602 int maybe_multiple)
6603 {
6604 struct loop_ivs *ivs = LOOP_IVS (loop);
6605 rtx set;
6606 rtx dest_reg;
6607 rtx inc_val;
6608 rtx mult_val;
6609 rtx *location;
6610
6611 if (NONJUMP_INSN_P (p)
6612 && (set = single_set (p))
6613 && REG_P (SET_DEST (set)))
6614 {
6615 dest_reg = SET_DEST (set);
6616 if (REGNO (dest_reg) < max_reg_before_loop
6617 && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER
6618 && REG_IV_TYPE (ivs, REGNO (dest_reg)) != NOT_BASIC_INDUCT)
6619 {
6620 if (basic_induction_var (loop, SET_SRC (set),
6621 GET_MODE (SET_SRC (set)),
6622 dest_reg, p, &inc_val, &mult_val,
6623 &location))
6624 {
6625 /* It is a possible basic induction variable.
6626 Create and initialize an induction structure for it. */
6627
6628 struct induction *v = xmalloc (sizeof (struct induction));
6629
6630 record_biv (loop, v, p, dest_reg, inc_val, mult_val, location,
6631 not_every_iteration, maybe_multiple);
6632 REG_IV_TYPE (ivs, REGNO (dest_reg)) = BASIC_INDUCT;
6633 }
6634 else if (REGNO (dest_reg) < ivs->n_regs)
6635 REG_IV_TYPE (ivs, REGNO (dest_reg)) = NOT_BASIC_INDUCT;
6636 }
6637 }
6638 return p;
6639 }
6640 \f
6641 /* Record all givs calculated in the insn.
6642 A register is a giv if: it is only set once, it is a function of a
6643 biv and a constant (or invariant), and it is not a biv. */
6644 static rtx
6645 check_insn_for_givs (struct loop *loop, rtx p, int not_every_iteration,
6646 int maybe_multiple)
6647 {
6648 struct loop_regs *regs = LOOP_REGS (loop);
6649
6650 rtx set;
6651 /* Look for a general induction variable in a register. */
6652 if (NONJUMP_INSN_P (p)
6653 && (set = single_set (p))
6654 && REG_P (SET_DEST (set))
6655 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
6656 {
6657 rtx src_reg;
6658 rtx dest_reg;
6659 rtx add_val;
6660 rtx mult_val;
6661 rtx ext_val;
6662 int benefit;
6663 rtx regnote = 0;
6664 rtx last_consec_insn;
6665
6666 dest_reg = SET_DEST (set);
6667 if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER)
6668 return p;
6669
6670 if (/* SET_SRC is a giv. */
6671 (general_induction_var (loop, SET_SRC (set), &src_reg, &add_val,
6672 &mult_val, &ext_val, 0, &benefit, VOIDmode)
6673 /* Equivalent expression is a giv. */
6674 || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX))
6675 && general_induction_var (loop, XEXP (regnote, 0), &src_reg,
6676 &add_val, &mult_val, &ext_val, 0,
6677 &benefit, VOIDmode)))
6678 /* Don't try to handle any regs made by loop optimization.
6679 We have nothing on them in regno_first_uid, etc. */
6680 && REGNO (dest_reg) < max_reg_before_loop
6681 /* Don't recognize a BASIC_INDUCT_VAR here. */
6682 && dest_reg != src_reg
6683 /* This must be the only place where the register is set. */
6684 && (regs->array[REGNO (dest_reg)].n_times_set == 1
6685 /* or all sets must be consecutive and make a giv. */
6686 || (benefit = consec_sets_giv (loop, benefit, p,
6687 src_reg, dest_reg,
6688 &add_val, &mult_val, &ext_val,
6689 &last_consec_insn))))
6690 {
6691 struct induction *v = xmalloc (sizeof (struct induction));
6692
6693 /* If this is a library call, increase benefit. */
6694 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
6695 benefit += libcall_benefit (p);
6696
6697 /* Skip the consecutive insns, if there are any. */
6698 if (regs->array[REGNO (dest_reg)].n_times_set != 1)
6699 p = last_consec_insn;
6700
6701 record_giv (loop, v, p, src_reg, dest_reg, mult_val, add_val,
6702 ext_val, benefit, DEST_REG, not_every_iteration,
6703 maybe_multiple, (rtx*) 0);
6704
6705 }
6706 }
6707
6708 /* Look for givs which are memory addresses. */
6709 if (NONJUMP_INSN_P (p))
6710 find_mem_givs (loop, PATTERN (p), p, not_every_iteration,
6711 maybe_multiple);
6712
6713 /* Update the status of whether giv can derive other givs. This can
6714 change when we pass a label or an insn that updates a biv. */
6715 if (INSN_P (p) || LABEL_P (p))
6716 update_giv_derive (loop, p);
6717 return p;
6718 }
6719 \f
6720 /* Return 1 if X is a valid source for an initial value (or as value being
6721 compared against in an initial test).
6722
6723 X must be either a register or constant and must not be clobbered between
6724 the current insn and the start of the loop.
6725
6726 INSN is the insn containing X. */
6727
6728 static int
6729 valid_initial_value_p (rtx x, rtx insn, int call_seen, rtx loop_start)
6730 {
6731 if (CONSTANT_P (x))
6732 return 1;
6733
6734 /* Only consider pseudos we know about initialized in insns whose luids
6735 we know. */
6736 if (!REG_P (x)
6737 || REGNO (x) >= max_reg_before_loop)
6738 return 0;
6739
6740 /* Don't use call-clobbered registers across a call which clobbers it. On
6741 some machines, don't use any hard registers at all. */
6742 if (REGNO (x) < FIRST_PSEUDO_REGISTER
6743 && (SMALL_REGISTER_CLASSES
6744 || (call_seen && call_used_regs[REGNO (x)])))
6745 return 0;
6746
6747 /* Don't use registers that have been clobbered before the start of the
6748 loop. */
6749 if (reg_set_between_p (x, insn, loop_start))
6750 return 0;
6751
6752 return 1;
6753 }
6754 \f
6755 /* Scan X for memory refs and check each memory address
6756 as a possible giv. INSN is the insn whose pattern X comes from.
6757 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
6758 every loop iteration. MAYBE_MULTIPLE is 1 if the insn might be executed
6759 more than once in each loop iteration. */
6760
6761 static void
6762 find_mem_givs (const struct loop *loop, rtx x, rtx insn,
6763 int not_every_iteration, int maybe_multiple)
6764 {
6765 int i, j;
6766 enum rtx_code code;
6767 const char *fmt;
6768
6769 if (x == 0)
6770 return;
6771
6772 code = GET_CODE (x);
6773 switch (code)
6774 {
6775 case REG:
6776 case CONST_INT:
6777 case CONST:
6778 case CONST_DOUBLE:
6779 case SYMBOL_REF:
6780 case LABEL_REF:
6781 case PC:
6782 case CC0:
6783 case ADDR_VEC:
6784 case ADDR_DIFF_VEC:
6785 case USE:
6786 case CLOBBER:
6787 return;
6788
6789 case MEM:
6790 {
6791 rtx src_reg;
6792 rtx add_val;
6793 rtx mult_val;
6794 rtx ext_val;
6795 int benefit;
6796
6797 /* This code used to disable creating GIVs with mult_val == 1 and
6798 add_val == 0. However, this leads to lost optimizations when
6799 it comes time to combine a set of related DEST_ADDR GIVs, since
6800 this one would not be seen. */
6801
6802 if (general_induction_var (loop, XEXP (x, 0), &src_reg, &add_val,
6803 &mult_val, &ext_val, 1, &benefit,
6804 GET_MODE (x)))
6805 {
6806 /* Found one; record it. */
6807 struct induction *v = xmalloc (sizeof (struct induction));
6808
6809 record_giv (loop, v, insn, src_reg, addr_placeholder, mult_val,
6810 add_val, ext_val, benefit, DEST_ADDR,
6811 not_every_iteration, maybe_multiple, &XEXP (x, 0));
6812
6813 v->mem = x;
6814 }
6815 }
6816 return;
6817
6818 default:
6819 break;
6820 }
6821
6822 /* Recursively scan the subexpressions for other mem refs. */
6823
6824 fmt = GET_RTX_FORMAT (code);
6825 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
6826 if (fmt[i] == 'e')
6827 find_mem_givs (loop, XEXP (x, i), insn, not_every_iteration,
6828 maybe_multiple);
6829 else if (fmt[i] == 'E')
6830 for (j = 0; j < XVECLEN (x, i); j++)
6831 find_mem_givs (loop, XVECEXP (x, i, j), insn, not_every_iteration,
6832 maybe_multiple);
6833 }
6834 \f
6835 /* Fill in the data about one biv update.
6836 V is the `struct induction' in which we record the biv. (It is
6837 allocated by the caller, with alloca.)
6838 INSN is the insn that sets it.
6839 DEST_REG is the biv's reg.
6840
6841 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
6842 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
6843 being set to INC_VAL.
6844
6845 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
6846 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
6847 can be executed more than once per iteration. If MAYBE_MULTIPLE
6848 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
6849 executed exactly once per iteration. */
6850
6851 static void
6852 record_biv (struct loop *loop, struct induction *v, rtx insn, rtx dest_reg,
6853 rtx inc_val, rtx mult_val, rtx *location,
6854 int not_every_iteration, int maybe_multiple)
6855 {
6856 struct loop_ivs *ivs = LOOP_IVS (loop);
6857 struct iv_class *bl;
6858
6859 v->insn = insn;
6860 v->src_reg = dest_reg;
6861 v->dest_reg = dest_reg;
6862 v->mult_val = mult_val;
6863 v->add_val = inc_val;
6864 v->ext_dependent = NULL_RTX;
6865 v->location = location;
6866 v->mode = GET_MODE (dest_reg);
6867 v->always_computable = ! not_every_iteration;
6868 v->always_executed = ! not_every_iteration;
6869 v->maybe_multiple = maybe_multiple;
6870 v->same = 0;
6871
6872 /* Add this to the reg's iv_class, creating a class
6873 if this is the first incrementation of the reg. */
6874
6875 bl = REG_IV_CLASS (ivs, REGNO (dest_reg));
6876 if (bl == 0)
6877 {
6878 /* Create and initialize new iv_class. */
6879
6880 bl = xmalloc (sizeof (struct iv_class));
6881
6882 bl->regno = REGNO (dest_reg);
6883 bl->biv = 0;
6884 bl->giv = 0;
6885 bl->biv_count = 0;
6886 bl->giv_count = 0;
6887
6888 /* Set initial value to the reg itself. */
6889 bl->initial_value = dest_reg;
6890 bl->final_value = 0;
6891 /* We haven't seen the initializing insn yet. */
6892 bl->init_insn = 0;
6893 bl->init_set = 0;
6894 bl->initial_test = 0;
6895 bl->incremented = 0;
6896 bl->eliminable = 0;
6897 bl->nonneg = 0;
6898 bl->reversed = 0;
6899 bl->total_benefit = 0;
6900
6901 /* Add this class to ivs->list. */
6902 bl->next = ivs->list;
6903 ivs->list = bl;
6904
6905 /* Put it in the array of biv register classes. */
6906 REG_IV_CLASS (ivs, REGNO (dest_reg)) = bl;
6907 }
6908 else
6909 {
6910 /* Check if location is the same as a previous one. */
6911 struct induction *induction;
6912 for (induction = bl->biv; induction; induction = induction->next_iv)
6913 if (location == induction->location)
6914 {
6915 v->same = induction;
6916 break;
6917 }
6918 }
6919
6920 /* Update IV_CLASS entry for this biv. */
6921 v->next_iv = bl->biv;
6922 bl->biv = v;
6923 bl->biv_count++;
6924 if (mult_val == const1_rtx)
6925 bl->incremented = 1;
6926
6927 if (loop_dump_stream)
6928 loop_biv_dump (v, loop_dump_stream, 0);
6929 }
6930 \f
6931 /* Fill in the data about one giv.
6932 V is the `struct induction' in which we record the giv. (It is
6933 allocated by the caller, with alloca.)
6934 INSN is the insn that sets it.
6935 BENEFIT estimates the savings from deleting this insn.
6936 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
6937 into a register or is used as a memory address.
6938
6939 SRC_REG is the biv reg which the giv is computed from.
6940 DEST_REG is the giv's reg (if the giv is stored in a reg).
6941 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
6942 LOCATION points to the place where this giv's value appears in INSN. */
6943
6944 static void
6945 record_giv (const struct loop *loop, struct induction *v, rtx insn,
6946 rtx src_reg, rtx dest_reg, rtx mult_val, rtx add_val,
6947 rtx ext_val, int benefit, enum g_types type,
6948 int not_every_iteration, int maybe_multiple, rtx *location)
6949 {
6950 struct loop_ivs *ivs = LOOP_IVS (loop);
6951 struct induction *b;
6952 struct iv_class *bl;
6953 rtx set = single_set (insn);
6954 rtx temp;
6955
6956 /* Attempt to prove constantness of the values. Don't let simplify_rtx
6957 undo the MULT canonicalization that we performed earlier. */
6958 temp = simplify_rtx (add_val);
6959 if (temp
6960 && ! (GET_CODE (add_val) == MULT
6961 && GET_CODE (temp) == ASHIFT))
6962 add_val = temp;
6963
6964 v->insn = insn;
6965 v->src_reg = src_reg;
6966 v->giv_type = type;
6967 v->dest_reg = dest_reg;
6968 v->mult_val = mult_val;
6969 v->add_val = add_val;
6970 v->ext_dependent = ext_val;
6971 v->benefit = benefit;
6972 v->location = location;
6973 v->cant_derive = 0;
6974 v->combined_with = 0;
6975 v->maybe_multiple = maybe_multiple;
6976 v->maybe_dead = 0;
6977 v->derive_adjustment = 0;
6978 v->same = 0;
6979 v->ignore = 0;
6980 v->new_reg = 0;
6981 v->final_value = 0;
6982 v->same_insn = 0;
6983 v->auto_inc_opt = 0;
6984 v->shared = 0;
6985
6986 /* The v->always_computable field is used in update_giv_derive, to
6987 determine whether a giv can be used to derive another giv. For a
6988 DEST_REG giv, INSN computes a new value for the giv, so its value
6989 isn't computable if INSN insn't executed every iteration.
6990 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
6991 it does not compute a new value. Hence the value is always computable
6992 regardless of whether INSN is executed each iteration. */
6993
6994 if (type == DEST_ADDR)
6995 v->always_computable = 1;
6996 else
6997 v->always_computable = ! not_every_iteration;
6998
6999 v->always_executed = ! not_every_iteration;
7000
7001 if (type == DEST_ADDR)
7002 {
7003 v->mode = GET_MODE (*location);
7004 v->lifetime = 1;
7005 }
7006 else /* type == DEST_REG */
7007 {
7008 v->mode = GET_MODE (SET_DEST (set));
7009
7010 v->lifetime = LOOP_REG_LIFETIME (loop, REGNO (dest_reg));
7011
7012 /* If the lifetime is zero, it means that this register is
7013 really a dead store. So mark this as a giv that can be
7014 ignored. This will not prevent the biv from being eliminated. */
7015 if (v->lifetime == 0)
7016 v->ignore = 1;
7017
7018 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
7019 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
7020 }
7021
7022 /* Add the giv to the class of givs computed from one biv. */
7023
7024 bl = REG_IV_CLASS (ivs, REGNO (src_reg));
7025 gcc_assert (bl);
7026 v->next_iv = bl->giv;
7027 bl->giv = v;
7028
7029 /* Don't count DEST_ADDR. This is supposed to count the number of
7030 insns that calculate givs. */
7031 if (type == DEST_REG)
7032 bl->giv_count++;
7033 bl->total_benefit += benefit;
7034
7035 if (type == DEST_ADDR)
7036 {
7037 v->replaceable = 1;
7038 v->not_replaceable = 0;
7039 }
7040 else
7041 {
7042 /* The giv can be replaced outright by the reduced register only if all
7043 of the following conditions are true:
7044 - the insn that sets the giv is always executed on any iteration
7045 on which the giv is used at all
7046 (there are two ways to deduce this:
7047 either the insn is executed on every iteration,
7048 or all uses follow that insn in the same basic block),
7049 - the giv is not used outside the loop
7050 - no assignments to the biv occur during the giv's lifetime. */
7051
7052 if (REGNO_FIRST_UID (REGNO (dest_reg)) == INSN_UID (insn)
7053 /* Previous line always fails if INSN was moved by loop opt. */
7054 && REGNO_LAST_LUID (REGNO (dest_reg))
7055 < INSN_LUID (loop->end)
7056 && (! not_every_iteration
7057 || last_use_this_basic_block (dest_reg, insn)))
7058 {
7059 /* Now check that there are no assignments to the biv within the
7060 giv's lifetime. This requires two separate checks. */
7061
7062 /* Check each biv update, and fail if any are between the first
7063 and last use of the giv.
7064
7065 If this loop contains an inner loop that was unrolled, then
7066 the insn modifying the biv may have been emitted by the loop
7067 unrolling code, and hence does not have a valid luid. Just
7068 mark the biv as not replaceable in this case. It is not very
7069 useful as a biv, because it is used in two different loops.
7070 It is very unlikely that we would be able to optimize the giv
7071 using this biv anyways. */
7072
7073 v->replaceable = 1;
7074 v->not_replaceable = 0;
7075 for (b = bl->biv; b; b = b->next_iv)
7076 {
7077 if (INSN_UID (b->insn) >= max_uid_for_loop
7078 || ((INSN_LUID (b->insn)
7079 >= REGNO_FIRST_LUID (REGNO (dest_reg)))
7080 && (INSN_LUID (b->insn)
7081 <= REGNO_LAST_LUID (REGNO (dest_reg)))))
7082 {
7083 v->replaceable = 0;
7084 v->not_replaceable = 1;
7085 break;
7086 }
7087 }
7088
7089 /* If there are any backwards branches that go from after the
7090 biv update to before it, then this giv is not replaceable. */
7091 if (v->replaceable)
7092 for (b = bl->biv; b; b = b->next_iv)
7093 if (back_branch_in_range_p (loop, b->insn))
7094 {
7095 v->replaceable = 0;
7096 v->not_replaceable = 1;
7097 break;
7098 }
7099 }
7100 else
7101 {
7102 /* May still be replaceable, we don't have enough info here to
7103 decide. */
7104 v->replaceable = 0;
7105 v->not_replaceable = 0;
7106 }
7107 }
7108
7109 /* Record whether the add_val contains a const_int, for later use by
7110 combine_givs. */
7111 {
7112 rtx tem = add_val;
7113
7114 v->no_const_addval = 1;
7115 if (tem == const0_rtx)
7116 ;
7117 else if (CONSTANT_P (add_val))
7118 v->no_const_addval = 0;
7119 if (GET_CODE (tem) == PLUS)
7120 {
7121 while (1)
7122 {
7123 if (GET_CODE (XEXP (tem, 0)) == PLUS)
7124 tem = XEXP (tem, 0);
7125 else if (GET_CODE (XEXP (tem, 1)) == PLUS)
7126 tem = XEXP (tem, 1);
7127 else
7128 break;
7129 }
7130 if (CONSTANT_P (XEXP (tem, 1)))
7131 v->no_const_addval = 0;
7132 }
7133 }
7134
7135 if (loop_dump_stream)
7136 loop_giv_dump (v, loop_dump_stream, 0);
7137 }
7138
7139 /* Try to calculate the final value of the giv, the value it will have at
7140 the end of the loop. If we can do it, return that value. */
7141
7142 static rtx
7143 final_giv_value (const struct loop *loop, struct induction *v)
7144 {
7145 struct loop_ivs *ivs = LOOP_IVS (loop);
7146 struct iv_class *bl;
7147 rtx insn;
7148 rtx increment, tem;
7149 rtx seq;
7150 rtx loop_end = loop->end;
7151 unsigned HOST_WIDE_INT n_iterations = LOOP_INFO (loop)->n_iterations;
7152
7153 bl = REG_IV_CLASS (ivs, REGNO (v->src_reg));
7154
7155 /* The final value for givs which depend on reversed bivs must be calculated
7156 differently than for ordinary givs. In this case, there is already an
7157 insn after the loop which sets this giv's final value (if necessary),
7158 and there are no other loop exits, so we can return any value. */
7159 if (bl->reversed)
7160 {
7161 if (loop_dump_stream)
7162 fprintf (loop_dump_stream,
7163 "Final giv value for %d, depends on reversed biv\n",
7164 REGNO (v->dest_reg));
7165 return const0_rtx;
7166 }
7167
7168 /* Try to calculate the final value as a function of the biv it depends
7169 upon. The only exit from the loop must be the fall through at the bottom
7170 and the insn that sets the giv must be executed on every iteration
7171 (otherwise the giv may not have its final value when the loop exits). */
7172
7173 /* ??? Can calculate the final giv value by subtracting off the
7174 extra biv increments times the giv's mult_val. The loop must have
7175 only one exit for this to work, but the loop iterations does not need
7176 to be known. */
7177
7178 if (n_iterations != 0
7179 && ! loop->exit_count
7180 && v->always_executed)
7181 {
7182 /* ?? It is tempting to use the biv's value here since these insns will
7183 be put after the loop, and hence the biv will have its final value
7184 then. However, this fails if the biv is subsequently eliminated.
7185 Perhaps determine whether biv's are eliminable before trying to
7186 determine whether giv's are replaceable so that we can use the
7187 biv value here if it is not eliminable. */
7188
7189 /* We are emitting code after the end of the loop, so we must make
7190 sure that bl->initial_value is still valid then. It will still
7191 be valid if it is invariant. */
7192
7193 increment = biv_total_increment (bl);
7194
7195 if (increment && loop_invariant_p (loop, increment)
7196 && loop_invariant_p (loop, bl->initial_value))
7197 {
7198 /* Can calculate the loop exit value of its biv as
7199 (n_iterations * increment) + initial_value */
7200
7201 /* The loop exit value of the giv is then
7202 (final_biv_value - extra increments) * mult_val + add_val.
7203 The extra increments are any increments to the biv which
7204 occur in the loop after the giv's value is calculated.
7205 We must search from the insn that sets the giv to the end
7206 of the loop to calculate this value. */
7207
7208 /* Put the final biv value in tem. */
7209 tem = gen_reg_rtx (v->mode);
7210 record_base_value (REGNO (tem), bl->biv->add_val, 0);
7211 loop_iv_add_mult_sink (loop, extend_value_for_giv (v, increment),
7212 GEN_INT (n_iterations),
7213 extend_value_for_giv (v, bl->initial_value),
7214 tem);
7215
7216 /* Subtract off extra increments as we find them. */
7217 for (insn = NEXT_INSN (v->insn); insn != loop_end;
7218 insn = NEXT_INSN (insn))
7219 {
7220 struct induction *biv;
7221
7222 for (biv = bl->biv; biv; biv = biv->next_iv)
7223 if (biv->insn == insn)
7224 {
7225 start_sequence ();
7226 tem = expand_simple_binop (GET_MODE (tem), MINUS, tem,
7227 biv->add_val, NULL_RTX, 0,
7228 OPTAB_LIB_WIDEN);
7229 seq = get_insns ();
7230 end_sequence ();
7231 loop_insn_sink (loop, seq);
7232 }
7233 }
7234
7235 /* Now calculate the giv's final value. */
7236 loop_iv_add_mult_sink (loop, tem, v->mult_val, v->add_val, tem);
7237
7238 if (loop_dump_stream)
7239 fprintf (loop_dump_stream,
7240 "Final giv value for %d, calc from biv's value.\n",
7241 REGNO (v->dest_reg));
7242
7243 return tem;
7244 }
7245 }
7246
7247 /* Replaceable giv's should never reach here. */
7248 gcc_assert (!v->replaceable);
7249
7250 /* Check to see if the biv is dead at all loop exits. */
7251 if (reg_dead_after_loop (loop, v->dest_reg))
7252 {
7253 if (loop_dump_stream)
7254 fprintf (loop_dump_stream,
7255 "Final giv value for %d, giv dead after loop exit.\n",
7256 REGNO (v->dest_reg));
7257
7258 return const0_rtx;
7259 }
7260
7261 return 0;
7262 }
7263
7264 /* All this does is determine whether a giv can be made replaceable because
7265 its final value can be calculated. This code can not be part of record_giv
7266 above, because final_giv_value requires that the number of loop iterations
7267 be known, and that can not be accurately calculated until after all givs
7268 have been identified. */
7269
7270 static void
7271 check_final_value (const struct loop *loop, struct induction *v)
7272 {
7273 rtx final_value = 0;
7274
7275 /* DEST_ADDR givs will never reach here, because they are always marked
7276 replaceable above in record_giv. */
7277
7278 /* The giv can be replaced outright by the reduced register only if all
7279 of the following conditions are true:
7280 - the insn that sets the giv is always executed on any iteration
7281 on which the giv is used at all
7282 (there are two ways to deduce this:
7283 either the insn is executed on every iteration,
7284 or all uses follow that insn in the same basic block),
7285 - its final value can be calculated (this condition is different
7286 than the one above in record_giv)
7287 - it's not used before the it's set
7288 - no assignments to the biv occur during the giv's lifetime. */
7289
7290 #if 0
7291 /* This is only called now when replaceable is known to be false. */
7292 /* Clear replaceable, so that it won't confuse final_giv_value. */
7293 v->replaceable = 0;
7294 #endif
7295
7296 if ((final_value = final_giv_value (loop, v))
7297 && (v->always_executed
7298 || last_use_this_basic_block (v->dest_reg, v->insn)))
7299 {
7300 int biv_increment_seen = 0, before_giv_insn = 0;
7301 rtx p = v->insn;
7302 rtx last_giv_use;
7303
7304 v->replaceable = 1;
7305 v->not_replaceable = 0;
7306
7307 /* When trying to determine whether or not a biv increment occurs
7308 during the lifetime of the giv, we can ignore uses of the variable
7309 outside the loop because final_value is true. Hence we can not
7310 use regno_last_uid and regno_first_uid as above in record_giv. */
7311
7312 /* Search the loop to determine whether any assignments to the
7313 biv occur during the giv's lifetime. Start with the insn
7314 that sets the giv, and search around the loop until we come
7315 back to that insn again.
7316
7317 Also fail if there is a jump within the giv's lifetime that jumps
7318 to somewhere outside the lifetime but still within the loop. This
7319 catches spaghetti code where the execution order is not linear, and
7320 hence the above test fails. Here we assume that the giv lifetime
7321 does not extend from one iteration of the loop to the next, so as
7322 to make the test easier. Since the lifetime isn't known yet,
7323 this requires two loops. See also record_giv above. */
7324
7325 last_giv_use = v->insn;
7326
7327 while (1)
7328 {
7329 p = NEXT_INSN (p);
7330 if (p == loop->end)
7331 {
7332 before_giv_insn = 1;
7333 p = NEXT_INSN (loop->start);
7334 }
7335 if (p == v->insn)
7336 break;
7337
7338 if (INSN_P (p))
7339 {
7340 /* It is possible for the BIV increment to use the GIV if we
7341 have a cycle. Thus we must be sure to check each insn for
7342 both BIV and GIV uses, and we must check for BIV uses
7343 first. */
7344
7345 if (! biv_increment_seen
7346 && reg_set_p (v->src_reg, PATTERN (p)))
7347 biv_increment_seen = 1;
7348
7349 if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
7350 {
7351 if (biv_increment_seen || before_giv_insn)
7352 {
7353 v->replaceable = 0;
7354 v->not_replaceable = 1;
7355 break;
7356 }
7357 last_giv_use = p;
7358 }
7359 }
7360 }
7361
7362 /* Now that the lifetime of the giv is known, check for branches
7363 from within the lifetime to outside the lifetime if it is still
7364 replaceable. */
7365
7366 if (v->replaceable)
7367 {
7368 p = v->insn;
7369 while (1)
7370 {
7371 p = NEXT_INSN (p);
7372 if (p == loop->end)
7373 p = NEXT_INSN (loop->start);
7374 if (p == last_giv_use)
7375 break;
7376
7377 if (JUMP_P (p) && JUMP_LABEL (p)
7378 && LABEL_NAME (JUMP_LABEL (p))
7379 && ((loop_insn_first_p (JUMP_LABEL (p), v->insn)
7380 && loop_insn_first_p (loop->start, JUMP_LABEL (p)))
7381 || (loop_insn_first_p (last_giv_use, JUMP_LABEL (p))
7382 && loop_insn_first_p (JUMP_LABEL (p), loop->end))))
7383 {
7384 v->replaceable = 0;
7385 v->not_replaceable = 1;
7386
7387 if (loop_dump_stream)
7388 fprintf (loop_dump_stream,
7389 "Found branch outside giv lifetime.\n");
7390
7391 break;
7392 }
7393 }
7394 }
7395
7396 /* If it is replaceable, then save the final value. */
7397 if (v->replaceable)
7398 v->final_value = final_value;
7399 }
7400
7401 if (loop_dump_stream && v->replaceable)
7402 fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n",
7403 INSN_UID (v->insn), REGNO (v->dest_reg));
7404 }
7405 \f
7406 /* Update the status of whether a giv can derive other givs.
7407
7408 We need to do something special if there is or may be an update to the biv
7409 between the time the giv is defined and the time it is used to derive
7410 another giv.
7411
7412 In addition, a giv that is only conditionally set is not allowed to
7413 derive another giv once a label has been passed.
7414
7415 The cases we look at are when a label or an update to a biv is passed. */
7416
7417 static void
7418 update_giv_derive (const struct loop *loop, rtx p)
7419 {
7420 struct loop_ivs *ivs = LOOP_IVS (loop);
7421 struct iv_class *bl;
7422 struct induction *biv, *giv;
7423 rtx tem;
7424 int dummy;
7425
7426 /* Search all IV classes, then all bivs, and finally all givs.
7427
7428 There are three cases we are concerned with. First we have the situation
7429 of a giv that is only updated conditionally. In that case, it may not
7430 derive any givs after a label is passed.
7431
7432 The second case is when a biv update occurs, or may occur, after the
7433 definition of a giv. For certain biv updates (see below) that are
7434 known to occur between the giv definition and use, we can adjust the
7435 giv definition. For others, or when the biv update is conditional,
7436 we must prevent the giv from deriving any other givs. There are two
7437 sub-cases within this case.
7438
7439 If this is a label, we are concerned with any biv update that is done
7440 conditionally, since it may be done after the giv is defined followed by
7441 a branch here (actually, we need to pass both a jump and a label, but
7442 this extra tracking doesn't seem worth it).
7443
7444 If this is a jump, we are concerned about any biv update that may be
7445 executed multiple times. We are actually only concerned about
7446 backward jumps, but it is probably not worth performing the test
7447 on the jump again here.
7448
7449 If this is a biv update, we must adjust the giv status to show that a
7450 subsequent biv update was performed. If this adjustment cannot be done,
7451 the giv cannot derive further givs. */
7452
7453 for (bl = ivs->list; bl; bl = bl->next)
7454 for (biv = bl->biv; biv; biv = biv->next_iv)
7455 if (LABEL_P (p) || JUMP_P (p)
7456 || biv->insn == p)
7457 {
7458 /* Skip if location is the same as a previous one. */
7459 if (biv->same)
7460 continue;
7461
7462 for (giv = bl->giv; giv; giv = giv->next_iv)
7463 {
7464 /* If cant_derive is already true, there is no point in
7465 checking all of these conditions again. */
7466 if (giv->cant_derive)
7467 continue;
7468
7469 /* If this giv is conditionally set and we have passed a label,
7470 it cannot derive anything. */
7471 if (LABEL_P (p) && ! giv->always_computable)
7472 giv->cant_derive = 1;
7473
7474 /* Skip givs that have mult_val == 0, since
7475 they are really invariants. Also skip those that are
7476 replaceable, since we know their lifetime doesn't contain
7477 any biv update. */
7478 else if (giv->mult_val == const0_rtx || giv->replaceable)
7479 continue;
7480
7481 /* The only way we can allow this giv to derive another
7482 is if this is a biv increment and we can form the product
7483 of biv->add_val and giv->mult_val. In this case, we will
7484 be able to compute a compensation. */
7485 else if (biv->insn == p)
7486 {
7487 rtx ext_val_dummy;
7488
7489 tem = 0;
7490 if (biv->mult_val == const1_rtx)
7491 tem = simplify_giv_expr (loop,
7492 gen_rtx_MULT (giv->mode,
7493 biv->add_val,
7494 giv->mult_val),
7495 &ext_val_dummy, &dummy);
7496
7497 if (tem && giv->derive_adjustment)
7498 tem = simplify_giv_expr
7499 (loop,
7500 gen_rtx_PLUS (giv->mode, tem, giv->derive_adjustment),
7501 &ext_val_dummy, &dummy);
7502
7503 if (tem)
7504 giv->derive_adjustment = tem;
7505 else
7506 giv->cant_derive = 1;
7507 }
7508 else if ((LABEL_P (p) && ! biv->always_computable)
7509 || (JUMP_P (p) && biv->maybe_multiple))
7510 giv->cant_derive = 1;
7511 }
7512 }
7513 }
7514 \f
7515 /* Check whether an insn is an increment legitimate for a basic induction var.
7516 X is the source of insn P, or a part of it.
7517 MODE is the mode in which X should be interpreted.
7518
7519 DEST_REG is the putative biv, also the destination of the insn.
7520 We accept patterns of these forms:
7521 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
7522 REG = INVARIANT + REG
7523
7524 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
7525 store the additive term into *INC_VAL, and store the place where
7526 we found the additive term into *LOCATION.
7527
7528 If X is an assignment of an invariant into DEST_REG, we set
7529 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
7530
7531 We also want to detect a BIV when it corresponds to a variable
7532 whose mode was promoted. In that case, an increment
7533 of the variable may be a PLUS that adds a SUBREG of that variable to
7534 an invariant and then sign- or zero-extends the result of the PLUS
7535 into the variable.
7536
7537 Most GIVs in such cases will be in the promoted mode, since that is the
7538 probably the natural computation mode (and almost certainly the mode
7539 used for addresses) on the machine. So we view the pseudo-reg containing
7540 the variable as the BIV, as if it were simply incremented.
7541
7542 Note that treating the entire pseudo as a BIV will result in making
7543 simple increments to any GIVs based on it. However, if the variable
7544 overflows in its declared mode but not its promoted mode, the result will
7545 be incorrect. This is acceptable if the variable is signed, since
7546 overflows in such cases are undefined, but not if it is unsigned, since
7547 those overflows are defined. So we only check for SIGN_EXTEND and
7548 not ZERO_EXTEND.
7549
7550 If we cannot find a biv, we return 0. */
7551
7552 static int
7553 basic_induction_var (const struct loop *loop, rtx x, enum machine_mode mode,
7554 rtx dest_reg, rtx p, rtx *inc_val, rtx *mult_val,
7555 rtx **location)
7556 {
7557 enum rtx_code code;
7558 rtx *argp, arg;
7559 rtx insn, set = 0, last, inc;
7560
7561 code = GET_CODE (x);
7562 *location = NULL;
7563 switch (code)
7564 {
7565 case PLUS:
7566 if (rtx_equal_p (XEXP (x, 0), dest_reg)
7567 || (GET_CODE (XEXP (x, 0)) == SUBREG
7568 && SUBREG_PROMOTED_VAR_P (XEXP (x, 0))
7569 && SUBREG_REG (XEXP (x, 0)) == dest_reg))
7570 {
7571 argp = &XEXP (x, 1);
7572 }
7573 else if (rtx_equal_p (XEXP (x, 1), dest_reg)
7574 || (GET_CODE (XEXP (x, 1)) == SUBREG
7575 && SUBREG_PROMOTED_VAR_P (XEXP (x, 1))
7576 && SUBREG_REG (XEXP (x, 1)) == dest_reg))
7577 {
7578 argp = &XEXP (x, 0);
7579 }
7580 else
7581 return 0;
7582
7583 arg = *argp;
7584 if (loop_invariant_p (loop, arg) != 1)
7585 return 0;
7586
7587 /* convert_modes can emit new instructions, e.g. when arg is a loop
7588 invariant MEM and dest_reg has a different mode.
7589 These instructions would be emitted after the end of the function
7590 and then *inc_val would be an uninitialized pseudo.
7591 Detect this and bail in this case.
7592 Other alternatives to solve this can be introducing a convert_modes
7593 variant which is allowed to fail but not allowed to emit new
7594 instructions, emit these instructions before loop start and let
7595 it be garbage collected if *inc_val is never used or saving the
7596 *inc_val initialization sequence generated here and when *inc_val
7597 is going to be actually used, emit it at some suitable place. */
7598 last = get_last_insn ();
7599 inc = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
7600 if (get_last_insn () != last)
7601 {
7602 delete_insns_since (last);
7603 return 0;
7604 }
7605
7606 *inc_val = inc;
7607 *mult_val = const1_rtx;
7608 *location = argp;
7609 return 1;
7610
7611 case SUBREG:
7612 /* If what's inside the SUBREG is a BIV, then the SUBREG. This will
7613 handle addition of promoted variables.
7614 ??? The comment at the start of this function is wrong: promoted
7615 variable increments don't look like it says they do. */
7616 return basic_induction_var (loop, SUBREG_REG (x),
7617 GET_MODE (SUBREG_REG (x)),
7618 dest_reg, p, inc_val, mult_val, location);
7619
7620 case REG:
7621 /* If this register is assigned in a previous insn, look at its
7622 source, but don't go outside the loop or past a label. */
7623
7624 /* If this sets a register to itself, we would repeat any previous
7625 biv increment if we applied this strategy blindly. */
7626 if (rtx_equal_p (dest_reg, x))
7627 return 0;
7628
7629 insn = p;
7630 while (1)
7631 {
7632 rtx dest;
7633 do
7634 {
7635 insn = PREV_INSN (insn);
7636 }
7637 while (insn && NOTE_P (insn)
7638 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
7639
7640 if (!insn)
7641 break;
7642 set = single_set (insn);
7643 if (set == 0)
7644 break;
7645 dest = SET_DEST (set);
7646 if (dest == x
7647 || (GET_CODE (dest) == SUBREG
7648 && (GET_MODE_SIZE (GET_MODE (dest)) <= UNITS_PER_WORD)
7649 && (GET_MODE_CLASS (GET_MODE (dest)) == MODE_INT)
7650 && SUBREG_REG (dest) == x))
7651 return basic_induction_var (loop, SET_SRC (set),
7652 (GET_MODE (SET_SRC (set)) == VOIDmode
7653 ? GET_MODE (x)
7654 : GET_MODE (SET_SRC (set))),
7655 dest_reg, insn,
7656 inc_val, mult_val, location);
7657
7658 while (GET_CODE (dest) == SUBREG
7659 || GET_CODE (dest) == ZERO_EXTRACT
7660 || GET_CODE (dest) == STRICT_LOW_PART)
7661 dest = XEXP (dest, 0);
7662 if (dest == x)
7663 break;
7664 }
7665 /* Fall through. */
7666
7667 /* Can accept constant setting of biv only when inside inner most loop.
7668 Otherwise, a biv of an inner loop may be incorrectly recognized
7669 as a biv of the outer loop,
7670 causing code to be moved INTO the inner loop. */
7671 case MEM:
7672 if (loop_invariant_p (loop, x) != 1)
7673 return 0;
7674 case CONST_INT:
7675 case SYMBOL_REF:
7676 case CONST:
7677 /* convert_modes dies if we try to convert to or from CCmode, so just
7678 exclude that case. It is very unlikely that a condition code value
7679 would be a useful iterator anyways. convert_modes dies if we try to
7680 convert a float mode to non-float or vice versa too. */
7681 if (loop->level == 1
7682 && GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (dest_reg))
7683 && GET_MODE_CLASS (mode) != MODE_CC)
7684 {
7685 /* Possible bug here? Perhaps we don't know the mode of X. */
7686 last = get_last_insn ();
7687 inc = convert_modes (GET_MODE (dest_reg), mode, x, 0);
7688 if (get_last_insn () != last)
7689 {
7690 delete_insns_since (last);
7691 return 0;
7692 }
7693
7694 *inc_val = inc;
7695 *mult_val = const0_rtx;
7696 return 1;
7697 }
7698 else
7699 return 0;
7700
7701 case SIGN_EXTEND:
7702 /* Ignore this BIV if signed arithmetic overflow is defined. */
7703 if (flag_wrapv)
7704 return 0;
7705 return basic_induction_var (loop, XEXP (x, 0), GET_MODE (XEXP (x, 0)),
7706 dest_reg, p, inc_val, mult_val, location);
7707
7708 case ASHIFTRT:
7709 /* Similar, since this can be a sign extension. */
7710 for (insn = PREV_INSN (p);
7711 (insn && NOTE_P (insn)
7712 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
7713 insn = PREV_INSN (insn))
7714 ;
7715
7716 if (insn)
7717 set = single_set (insn);
7718
7719 if (! rtx_equal_p (dest_reg, XEXP (x, 0))
7720 && set && SET_DEST (set) == XEXP (x, 0)
7721 && GET_CODE (XEXP (x, 1)) == CONST_INT
7722 && INTVAL (XEXP (x, 1)) >= 0
7723 && GET_CODE (SET_SRC (set)) == ASHIFT
7724 && XEXP (x, 1) == XEXP (SET_SRC (set), 1))
7725 return basic_induction_var (loop, XEXP (SET_SRC (set), 0),
7726 GET_MODE (XEXP (x, 0)),
7727 dest_reg, insn, inc_val, mult_val,
7728 location);
7729 return 0;
7730
7731 default:
7732 return 0;
7733 }
7734 }
7735 \f
7736 /* A general induction variable (giv) is any quantity that is a linear
7737 function of a basic induction variable,
7738 i.e. giv = biv * mult_val + add_val.
7739 The coefficients can be any loop invariant quantity.
7740 A giv need not be computed directly from the biv;
7741 it can be computed by way of other givs. */
7742
7743 /* Determine whether X computes a giv.
7744 If it does, return a nonzero value
7745 which is the benefit from eliminating the computation of X;
7746 set *SRC_REG to the register of the biv that it is computed from;
7747 set *ADD_VAL and *MULT_VAL to the coefficients,
7748 such that the value of X is biv * mult + add; */
7749
7750 static int
7751 general_induction_var (const struct loop *loop, rtx x, rtx *src_reg,
7752 rtx *add_val, rtx *mult_val, rtx *ext_val,
7753 int is_addr, int *pbenefit,
7754 enum machine_mode addr_mode)
7755 {
7756 struct loop_ivs *ivs = LOOP_IVS (loop);
7757 rtx orig_x = x;
7758
7759 /* If this is an invariant, forget it, it isn't a giv. */
7760 if (loop_invariant_p (loop, x) == 1)
7761 return 0;
7762
7763 *pbenefit = 0;
7764 *ext_val = NULL_RTX;
7765 x = simplify_giv_expr (loop, x, ext_val, pbenefit);
7766 if (x == 0)
7767 return 0;
7768
7769 switch (GET_CODE (x))
7770 {
7771 case USE:
7772 case CONST_INT:
7773 /* Since this is now an invariant and wasn't before, it must be a giv
7774 with MULT_VAL == 0. It doesn't matter which BIV we associate this
7775 with. */
7776 *src_reg = ivs->list->biv->dest_reg;
7777 *mult_val = const0_rtx;
7778 *add_val = x;
7779 break;
7780
7781 case REG:
7782 /* This is equivalent to a BIV. */
7783 *src_reg = x;
7784 *mult_val = const1_rtx;
7785 *add_val = const0_rtx;
7786 break;
7787
7788 case PLUS:
7789 /* Either (plus (biv) (invar)) or
7790 (plus (mult (biv) (invar_1)) (invar_2)). */
7791 if (GET_CODE (XEXP (x, 0)) == MULT)
7792 {
7793 *src_reg = XEXP (XEXP (x, 0), 0);
7794 *mult_val = XEXP (XEXP (x, 0), 1);
7795 }
7796 else
7797 {
7798 *src_reg = XEXP (x, 0);
7799 *mult_val = const1_rtx;
7800 }
7801 *add_val = XEXP (x, 1);
7802 break;
7803
7804 case MULT:
7805 /* ADD_VAL is zero. */
7806 *src_reg = XEXP (x, 0);
7807 *mult_val = XEXP (x, 1);
7808 *add_val = const0_rtx;
7809 break;
7810
7811 default:
7812 gcc_unreachable ();
7813 }
7814
7815 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
7816 unless they are CONST_INT). */
7817 if (GET_CODE (*add_val) == USE)
7818 *add_val = XEXP (*add_val, 0);
7819 if (GET_CODE (*mult_val) == USE)
7820 *mult_val = XEXP (*mult_val, 0);
7821
7822 if (is_addr)
7823 *pbenefit += address_cost (orig_x, addr_mode) - reg_address_cost;
7824 else
7825 *pbenefit += rtx_cost (orig_x, SET);
7826
7827 /* Always return true if this is a giv so it will be detected as such,
7828 even if the benefit is zero or negative. This allows elimination
7829 of bivs that might otherwise not be eliminated. */
7830 return 1;
7831 }
7832 \f
7833 /* Given an expression, X, try to form it as a linear function of a biv.
7834 We will canonicalize it to be of the form
7835 (plus (mult (BIV) (invar_1))
7836 (invar_2))
7837 with possible degeneracies.
7838
7839 The invariant expressions must each be of a form that can be used as a
7840 machine operand. We surround then with a USE rtx (a hack, but localized
7841 and certainly unambiguous!) if not a CONST_INT for simplicity in this
7842 routine; it is the caller's responsibility to strip them.
7843
7844 If no such canonicalization is possible (i.e., two biv's are used or an
7845 expression that is neither invariant nor a biv or giv), this routine
7846 returns 0.
7847
7848 For a nonzero return, the result will have a code of CONST_INT, USE,
7849 REG (for a BIV), PLUS, or MULT. No other codes will occur.
7850
7851 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
7852
7853 static rtx sge_plus (enum machine_mode, rtx, rtx);
7854 static rtx sge_plus_constant (rtx, rtx);
7855
7856 static rtx
7857 simplify_giv_expr (const struct loop *loop, rtx x, rtx *ext_val, int *benefit)
7858 {
7859 struct loop_ivs *ivs = LOOP_IVS (loop);
7860 struct loop_regs *regs = LOOP_REGS (loop);
7861 enum machine_mode mode = GET_MODE (x);
7862 rtx arg0, arg1;
7863 rtx tem;
7864
7865 /* If this is not an integer mode, or if we cannot do arithmetic in this
7866 mode, this can't be a giv. */
7867 if (mode != VOIDmode
7868 && (GET_MODE_CLASS (mode) != MODE_INT
7869 || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT))
7870 return NULL_RTX;
7871
7872 switch (GET_CODE (x))
7873 {
7874 case PLUS:
7875 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
7876 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
7877 if (arg0 == 0 || arg1 == 0)
7878 return NULL_RTX;
7879
7880 /* Put constant last, CONST_INT last if both constant. */
7881 if ((GET_CODE (arg0) == USE
7882 || GET_CODE (arg0) == CONST_INT)
7883 && ! ((GET_CODE (arg0) == USE
7884 && GET_CODE (arg1) == USE)
7885 || GET_CODE (arg1) == CONST_INT))
7886 tem = arg0, arg0 = arg1, arg1 = tem;
7887
7888 /* Handle addition of zero, then addition of an invariant. */
7889 if (arg1 == const0_rtx)
7890 return arg0;
7891 else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE)
7892 switch (GET_CODE (arg0))
7893 {
7894 case CONST_INT:
7895 case USE:
7896 /* Adding two invariants must result in an invariant, so enclose
7897 addition operation inside a USE and return it. */
7898 if (GET_CODE (arg0) == USE)
7899 arg0 = XEXP (arg0, 0);
7900 if (GET_CODE (arg1) == USE)
7901 arg1 = XEXP (arg1, 0);
7902
7903 if (GET_CODE (arg0) == CONST_INT)
7904 tem = arg0, arg0 = arg1, arg1 = tem;
7905 if (GET_CODE (arg1) == CONST_INT)
7906 tem = sge_plus_constant (arg0, arg1);
7907 else
7908 tem = sge_plus (mode, arg0, arg1);
7909
7910 if (GET_CODE (tem) != CONST_INT)
7911 tem = gen_rtx_USE (mode, tem);
7912 return tem;
7913
7914 case REG:
7915 case MULT:
7916 /* biv + invar or mult + invar. Return sum. */
7917 return gen_rtx_PLUS (mode, arg0, arg1);
7918
7919 case PLUS:
7920 /* (a + invar_1) + invar_2. Associate. */
7921 return
7922 simplify_giv_expr (loop,
7923 gen_rtx_PLUS (mode,
7924 XEXP (arg0, 0),
7925 gen_rtx_PLUS (mode,
7926 XEXP (arg0, 1),
7927 arg1)),
7928 ext_val, benefit);
7929
7930 default:
7931 gcc_unreachable ();
7932 }
7933
7934 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
7935 MULT to reduce cases. */
7936 if (REG_P (arg0))
7937 arg0 = gen_rtx_MULT (mode, arg0, const1_rtx);
7938 if (REG_P (arg1))
7939 arg1 = gen_rtx_MULT (mode, arg1, const1_rtx);
7940
7941 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
7942 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
7943 Recurse to associate the second PLUS. */
7944 if (GET_CODE (arg1) == MULT)
7945 tem = arg0, arg0 = arg1, arg1 = tem;
7946
7947 if (GET_CODE (arg1) == PLUS)
7948 return
7949 simplify_giv_expr (loop,
7950 gen_rtx_PLUS (mode,
7951 gen_rtx_PLUS (mode, arg0,
7952 XEXP (arg1, 0)),
7953 XEXP (arg1, 1)),
7954 ext_val, benefit);
7955
7956 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
7957 if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT)
7958 return NULL_RTX;
7959
7960 if (!rtx_equal_p (arg0, arg1))
7961 return NULL_RTX;
7962
7963 return simplify_giv_expr (loop,
7964 gen_rtx_MULT (mode,
7965 XEXP (arg0, 0),
7966 gen_rtx_PLUS (mode,
7967 XEXP (arg0, 1),
7968 XEXP (arg1, 1))),
7969 ext_val, benefit);
7970
7971 case MINUS:
7972 /* Handle "a - b" as "a + b * (-1)". */
7973 return simplify_giv_expr (loop,
7974 gen_rtx_PLUS (mode,
7975 XEXP (x, 0),
7976 gen_rtx_MULT (mode,
7977 XEXP (x, 1),
7978 constm1_rtx)),
7979 ext_val, benefit);
7980
7981 case MULT:
7982 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
7983 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
7984 if (arg0 == 0 || arg1 == 0)
7985 return NULL_RTX;
7986
7987 /* Put constant last, CONST_INT last if both constant. */
7988 if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT)
7989 && GET_CODE (arg1) != CONST_INT)
7990 tem = arg0, arg0 = arg1, arg1 = tem;
7991
7992 /* If second argument is not now constant, not giv. */
7993 if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT)
7994 return NULL_RTX;
7995
7996 /* Handle multiply by 0 or 1. */
7997 if (arg1 == const0_rtx)
7998 return const0_rtx;
7999
8000 else if (arg1 == const1_rtx)
8001 return arg0;
8002
8003 switch (GET_CODE (arg0))
8004 {
8005 case REG:
8006 /* biv * invar. Done. */
8007 return gen_rtx_MULT (mode, arg0, arg1);
8008
8009 case CONST_INT:
8010 /* Product of two constants. */
8011 return GEN_INT (INTVAL (arg0) * INTVAL (arg1));
8012
8013 case USE:
8014 /* invar * invar is a giv, but attempt to simplify it somehow. */
8015 if (GET_CODE (arg1) != CONST_INT)
8016 return NULL_RTX;
8017
8018 arg0 = XEXP (arg0, 0);
8019 if (GET_CODE (arg0) == MULT)
8020 {
8021 /* (invar_0 * invar_1) * invar_2. Associate. */
8022 return simplify_giv_expr (loop,
8023 gen_rtx_MULT (mode,
8024 XEXP (arg0, 0),
8025 gen_rtx_MULT (mode,
8026 XEXP (arg0,
8027 1),
8028 arg1)),
8029 ext_val, benefit);
8030 }
8031 /* Propagate the MULT expressions to the innermost nodes. */
8032 else if (GET_CODE (arg0) == PLUS)
8033 {
8034 /* (invar_0 + invar_1) * invar_2. Distribute. */
8035 return simplify_giv_expr (loop,
8036 gen_rtx_PLUS (mode,
8037 gen_rtx_MULT (mode,
8038 XEXP (arg0,
8039 0),
8040 arg1),
8041 gen_rtx_MULT (mode,
8042 XEXP (arg0,
8043 1),
8044 arg1)),
8045 ext_val, benefit);
8046 }
8047 return gen_rtx_USE (mode, gen_rtx_MULT (mode, arg0, arg1));
8048
8049 case MULT:
8050 /* (a * invar_1) * invar_2. Associate. */
8051 return simplify_giv_expr (loop,
8052 gen_rtx_MULT (mode,
8053 XEXP (arg0, 0),
8054 gen_rtx_MULT (mode,
8055 XEXP (arg0, 1),
8056 arg1)),
8057 ext_val, benefit);
8058
8059 case PLUS:
8060 /* (a + invar_1) * invar_2. Distribute. */
8061 return simplify_giv_expr (loop,
8062 gen_rtx_PLUS (mode,
8063 gen_rtx_MULT (mode,
8064 XEXP (arg0, 0),
8065 arg1),
8066 gen_rtx_MULT (mode,
8067 XEXP (arg0, 1),
8068 arg1)),
8069 ext_val, benefit);
8070
8071 default:
8072 gcc_unreachable ();
8073 }
8074
8075 case ASHIFT:
8076 /* Shift by constant is multiply by power of two. */
8077 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
8078 return 0;
8079
8080 return
8081 simplify_giv_expr (loop,
8082 gen_rtx_MULT (mode,
8083 XEXP (x, 0),
8084 GEN_INT ((HOST_WIDE_INT) 1
8085 << INTVAL (XEXP (x, 1)))),
8086 ext_val, benefit);
8087
8088 case NEG:
8089 /* "-a" is "a * (-1)" */
8090 return simplify_giv_expr (loop,
8091 gen_rtx_MULT (mode, XEXP (x, 0), constm1_rtx),
8092 ext_val, benefit);
8093
8094 case NOT:
8095 /* "~a" is "-a - 1". Silly, but easy. */
8096 return simplify_giv_expr (loop,
8097 gen_rtx_MINUS (mode,
8098 gen_rtx_NEG (mode, XEXP (x, 0)),
8099 const1_rtx),
8100 ext_val, benefit);
8101
8102 case USE:
8103 /* Already in proper form for invariant. */
8104 return x;
8105
8106 case SIGN_EXTEND:
8107 case ZERO_EXTEND:
8108 case TRUNCATE:
8109 /* Conditionally recognize extensions of simple IVs. After we've
8110 computed loop traversal counts and verified the range of the
8111 source IV, we'll reevaluate this as a GIV. */
8112 if (*ext_val == NULL_RTX)
8113 {
8114 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
8115 if (arg0 && *ext_val == NULL_RTX && REG_P (arg0))
8116 {
8117 *ext_val = gen_rtx_fmt_e (GET_CODE (x), mode, arg0);
8118 return arg0;
8119 }
8120 }
8121 goto do_default;
8122
8123 case REG:
8124 /* If this is a new register, we can't deal with it. */
8125 if (REGNO (x) >= max_reg_before_loop)
8126 return 0;
8127
8128 /* Check for biv or giv. */
8129 switch (REG_IV_TYPE (ivs, REGNO (x)))
8130 {
8131 case BASIC_INDUCT:
8132 return x;
8133 case GENERAL_INDUCT:
8134 {
8135 struct induction *v = REG_IV_INFO (ivs, REGNO (x));
8136
8137 /* Form expression from giv and add benefit. Ensure this giv
8138 can derive another and subtract any needed adjustment if so. */
8139
8140 /* Increasing the benefit here is risky. The only case in which it
8141 is arguably correct is if this is the only use of V. In other
8142 cases, this will artificially inflate the benefit of the current
8143 giv, and lead to suboptimal code. Thus, it is disabled, since
8144 potentially not reducing an only marginally beneficial giv is
8145 less harmful than reducing many givs that are not really
8146 beneficial. */
8147 {
8148 rtx single_use = regs->array[REGNO (x)].single_usage;
8149 if (single_use && single_use != const0_rtx)
8150 *benefit += v->benefit;
8151 }
8152
8153 if (v->cant_derive)
8154 return 0;
8155
8156 tem = gen_rtx_PLUS (mode, gen_rtx_MULT (mode,
8157 v->src_reg, v->mult_val),
8158 v->add_val);
8159
8160 if (v->derive_adjustment)
8161 tem = gen_rtx_MINUS (mode, tem, v->derive_adjustment);
8162 arg0 = simplify_giv_expr (loop, tem, ext_val, benefit);
8163 if (*ext_val)
8164 {
8165 if (!v->ext_dependent)
8166 return arg0;
8167 }
8168 else
8169 {
8170 *ext_val = v->ext_dependent;
8171 return arg0;
8172 }
8173 return 0;
8174 }
8175
8176 default:
8177 do_default:
8178 /* If it isn't an induction variable, and it is invariant, we
8179 may be able to simplify things further by looking through
8180 the bits we just moved outside the loop. */
8181 if (loop_invariant_p (loop, x) == 1)
8182 {
8183 struct movable *m;
8184 struct loop_movables *movables = LOOP_MOVABLES (loop);
8185
8186 for (m = movables->head; m; m = m->next)
8187 if (rtx_equal_p (x, m->set_dest))
8188 {
8189 /* Ok, we found a match. Substitute and simplify. */
8190
8191 /* If we match another movable, we must use that, as
8192 this one is going away. */
8193 if (m->match)
8194 return simplify_giv_expr (loop, m->match->set_dest,
8195 ext_val, benefit);
8196
8197 /* If consec is nonzero, this is a member of a group of
8198 instructions that were moved together. We handle this
8199 case only to the point of seeking to the last insn and
8200 looking for a REG_EQUAL. Fail if we don't find one. */
8201 if (m->consec != 0)
8202 {
8203 int i = m->consec;
8204 tem = m->insn;
8205 do
8206 {
8207 tem = NEXT_INSN (tem);
8208 }
8209 while (--i > 0);
8210
8211 tem = find_reg_note (tem, REG_EQUAL, NULL_RTX);
8212 if (tem)
8213 tem = XEXP (tem, 0);
8214 }
8215 else
8216 {
8217 tem = single_set (m->insn);
8218 if (tem)
8219 tem = SET_SRC (tem);
8220 }
8221
8222 if (tem)
8223 {
8224 /* What we are most interested in is pointer
8225 arithmetic on invariants -- only take
8226 patterns we may be able to do something with. */
8227 if (GET_CODE (tem) == PLUS
8228 || GET_CODE (tem) == MULT
8229 || GET_CODE (tem) == ASHIFT
8230 || GET_CODE (tem) == CONST_INT
8231 || GET_CODE (tem) == SYMBOL_REF)
8232 {
8233 tem = simplify_giv_expr (loop, tem, ext_val,
8234 benefit);
8235 if (tem)
8236 return tem;
8237 }
8238 else if (GET_CODE (tem) == CONST
8239 && GET_CODE (XEXP (tem, 0)) == PLUS
8240 && GET_CODE (XEXP (XEXP (tem, 0), 0)) == SYMBOL_REF
8241 && GET_CODE (XEXP (XEXP (tem, 0), 1)) == CONST_INT)
8242 {
8243 tem = simplify_giv_expr (loop, XEXP (tem, 0),
8244 ext_val, benefit);
8245 if (tem)
8246 return tem;
8247 }
8248 }
8249 break;
8250 }
8251 }
8252 break;
8253 }
8254
8255 /* Fall through to general case. */
8256 default:
8257 /* If invariant, return as USE (unless CONST_INT).
8258 Otherwise, not giv. */
8259 if (GET_CODE (x) == USE)
8260 x = XEXP (x, 0);
8261
8262 if (loop_invariant_p (loop, x) == 1)
8263 {
8264 if (GET_CODE (x) == CONST_INT)
8265 return x;
8266 if (GET_CODE (x) == CONST
8267 && GET_CODE (XEXP (x, 0)) == PLUS
8268 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
8269 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
8270 x = XEXP (x, 0);
8271 return gen_rtx_USE (mode, x);
8272 }
8273 else
8274 return 0;
8275 }
8276 }
8277
8278 /* This routine folds invariants such that there is only ever one
8279 CONST_INT in the summation. It is only used by simplify_giv_expr. */
8280
8281 static rtx
8282 sge_plus_constant (rtx x, rtx c)
8283 {
8284 if (GET_CODE (x) == CONST_INT)
8285 return GEN_INT (INTVAL (x) + INTVAL (c));
8286 else if (GET_CODE (x) != PLUS)
8287 return gen_rtx_PLUS (GET_MODE (x), x, c);
8288 else if (GET_CODE (XEXP (x, 1)) == CONST_INT)
8289 {
8290 return gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
8291 GEN_INT (INTVAL (XEXP (x, 1)) + INTVAL (c)));
8292 }
8293 else if (GET_CODE (XEXP (x, 0)) == PLUS
8294 || GET_CODE (XEXP (x, 1)) != PLUS)
8295 {
8296 return gen_rtx_PLUS (GET_MODE (x),
8297 sge_plus_constant (XEXP (x, 0), c), XEXP (x, 1));
8298 }
8299 else
8300 {
8301 return gen_rtx_PLUS (GET_MODE (x),
8302 sge_plus_constant (XEXP (x, 1), c), XEXP (x, 0));
8303 }
8304 }
8305
8306 static rtx
8307 sge_plus (enum machine_mode mode, rtx x, rtx y)
8308 {
8309 while (GET_CODE (y) == PLUS)
8310 {
8311 rtx a = XEXP (y, 0);
8312 if (GET_CODE (a) == CONST_INT)
8313 x = sge_plus_constant (x, a);
8314 else
8315 x = gen_rtx_PLUS (mode, x, a);
8316 y = XEXP (y, 1);
8317 }
8318 if (GET_CODE (y) == CONST_INT)
8319 x = sge_plus_constant (x, y);
8320 else
8321 x = gen_rtx_PLUS (mode, x, y);
8322 return x;
8323 }
8324 \f
8325 /* Help detect a giv that is calculated by several consecutive insns;
8326 for example,
8327 giv = biv * M
8328 giv = giv + A
8329 The caller has already identified the first insn P as having a giv as dest;
8330 we check that all other insns that set the same register follow
8331 immediately after P, that they alter nothing else,
8332 and that the result of the last is still a giv.
8333
8334 The value is 0 if the reg set in P is not really a giv.
8335 Otherwise, the value is the amount gained by eliminating
8336 all the consecutive insns that compute the value.
8337
8338 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
8339 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
8340
8341 The coefficients of the ultimate giv value are stored in
8342 *MULT_VAL and *ADD_VAL. */
8343
8344 static int
8345 consec_sets_giv (const struct loop *loop, int first_benefit, rtx p,
8346 rtx src_reg, rtx dest_reg, rtx *add_val, rtx *mult_val,
8347 rtx *ext_val, rtx *last_consec_insn)
8348 {
8349 struct loop_ivs *ivs = LOOP_IVS (loop);
8350 struct loop_regs *regs = LOOP_REGS (loop);
8351 int count;
8352 enum rtx_code code;
8353 int benefit;
8354 rtx temp;
8355 rtx set;
8356
8357 /* Indicate that this is a giv so that we can update the value produced in
8358 each insn of the multi-insn sequence.
8359
8360 This induction structure will be used only by the call to
8361 general_induction_var below, so we can allocate it on our stack.
8362 If this is a giv, our caller will replace the induct var entry with
8363 a new induction structure. */
8364 struct induction *v;
8365
8366 if (REG_IV_TYPE (ivs, REGNO (dest_reg)) != UNKNOWN_INDUCT)
8367 return 0;
8368
8369 v = alloca (sizeof (struct induction));
8370 v->src_reg = src_reg;
8371 v->mult_val = *mult_val;
8372 v->add_val = *add_val;
8373 v->benefit = first_benefit;
8374 v->cant_derive = 0;
8375 v->derive_adjustment = 0;
8376 v->ext_dependent = NULL_RTX;
8377
8378 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
8379 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
8380
8381 count = regs->array[REGNO (dest_reg)].n_times_set - 1;
8382
8383 while (count > 0)
8384 {
8385 p = NEXT_INSN (p);
8386 code = GET_CODE (p);
8387
8388 /* If libcall, skip to end of call sequence. */
8389 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
8390 p = XEXP (temp, 0);
8391
8392 if (code == INSN
8393 && (set = single_set (p))
8394 && REG_P (SET_DEST (set))
8395 && SET_DEST (set) == dest_reg
8396 && (general_induction_var (loop, SET_SRC (set), &src_reg,
8397 add_val, mult_val, ext_val, 0,
8398 &benefit, VOIDmode)
8399 /* Giv created by equivalent expression. */
8400 || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
8401 && general_induction_var (loop, XEXP (temp, 0), &src_reg,
8402 add_val, mult_val, ext_val, 0,
8403 &benefit, VOIDmode)))
8404 && src_reg == v->src_reg)
8405 {
8406 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
8407 benefit += libcall_benefit (p);
8408
8409 count--;
8410 v->mult_val = *mult_val;
8411 v->add_val = *add_val;
8412 v->benefit += benefit;
8413 }
8414 else if (code != NOTE)
8415 {
8416 /* Allow insns that set something other than this giv to a
8417 constant. Such insns are needed on machines which cannot
8418 include long constants and should not disqualify a giv. */
8419 if (code == INSN
8420 && (set = single_set (p))
8421 && SET_DEST (set) != dest_reg
8422 && CONSTANT_P (SET_SRC (set)))
8423 continue;
8424
8425 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
8426 return 0;
8427 }
8428 }
8429
8430 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
8431 *last_consec_insn = p;
8432 return v->benefit;
8433 }
8434 \f
8435 /* Return an rtx, if any, that expresses giv G2 as a function of the register
8436 represented by G1. If no such expression can be found, or it is clear that
8437 it cannot possibly be a valid address, 0 is returned.
8438
8439 To perform the computation, we note that
8440 G1 = x * v + a and
8441 G2 = y * v + b
8442 where `v' is the biv.
8443
8444 So G2 = (y/b) * G1 + (b - a*y/x).
8445
8446 Note that MULT = y/x.
8447
8448 Update: A and B are now allowed to be additive expressions such that
8449 B contains all variables in A. That is, computing B-A will not require
8450 subtracting variables. */
8451
8452 static rtx
8453 express_from_1 (rtx a, rtx b, rtx mult)
8454 {
8455 /* If MULT is zero, then A*MULT is zero, and our expression is B. */
8456
8457 if (mult == const0_rtx)
8458 return b;
8459
8460 /* If MULT is not 1, we cannot handle A with non-constants, since we
8461 would then be required to subtract multiples of the registers in A.
8462 This is theoretically possible, and may even apply to some Fortran
8463 constructs, but it is a lot of work and we do not attempt it here. */
8464
8465 if (mult != const1_rtx && GET_CODE (a) != CONST_INT)
8466 return NULL_RTX;
8467
8468 /* In general these structures are sorted top to bottom (down the PLUS
8469 chain), but not left to right across the PLUS. If B is a higher
8470 order giv than A, we can strip one level and recurse. If A is higher
8471 order, we'll eventually bail out, but won't know that until the end.
8472 If they are the same, we'll strip one level around this loop. */
8473
8474 while (GET_CODE (a) == PLUS && GET_CODE (b) == PLUS)
8475 {
8476 rtx ra, rb, oa, ob, tmp;
8477
8478 ra = XEXP (a, 0), oa = XEXP (a, 1);
8479 if (GET_CODE (ra) == PLUS)
8480 tmp = ra, ra = oa, oa = tmp;
8481
8482 rb = XEXP (b, 0), ob = XEXP (b, 1);
8483 if (GET_CODE (rb) == PLUS)
8484 tmp = rb, rb = ob, ob = tmp;
8485
8486 if (rtx_equal_p (ra, rb))
8487 /* We matched: remove one reg completely. */
8488 a = oa, b = ob;
8489 else if (GET_CODE (ob) != PLUS && rtx_equal_p (ra, ob))
8490 /* An alternate match. */
8491 a = oa, b = rb;
8492 else if (GET_CODE (oa) != PLUS && rtx_equal_p (oa, rb))
8493 /* An alternate match. */
8494 a = ra, b = ob;
8495 else
8496 {
8497 /* Indicates an extra register in B. Strip one level from B and
8498 recurse, hoping B was the higher order expression. */
8499 ob = express_from_1 (a, ob, mult);
8500 if (ob == NULL_RTX)
8501 return NULL_RTX;
8502 return gen_rtx_PLUS (GET_MODE (b), rb, ob);
8503 }
8504 }
8505
8506 /* Here we are at the last level of A, go through the cases hoping to
8507 get rid of everything but a constant. */
8508
8509 if (GET_CODE (a) == PLUS)
8510 {
8511 rtx ra, oa;
8512
8513 ra = XEXP (a, 0), oa = XEXP (a, 1);
8514 if (rtx_equal_p (oa, b))
8515 oa = ra;
8516 else if (!rtx_equal_p (ra, b))
8517 return NULL_RTX;
8518
8519 if (GET_CODE (oa) != CONST_INT)
8520 return NULL_RTX;
8521
8522 return GEN_INT (-INTVAL (oa) * INTVAL (mult));
8523 }
8524 else if (GET_CODE (a) == CONST_INT)
8525 {
8526 return plus_constant (b, -INTVAL (a) * INTVAL (mult));
8527 }
8528 else if (CONSTANT_P (a))
8529 {
8530 enum machine_mode mode_a = GET_MODE (a);
8531 enum machine_mode mode_b = GET_MODE (b);
8532 enum machine_mode mode = mode_b == VOIDmode ? mode_a : mode_b;
8533 return simplify_gen_binary (MINUS, mode, b, a);
8534 }
8535 else if (GET_CODE (b) == PLUS)
8536 {
8537 if (rtx_equal_p (a, XEXP (b, 0)))
8538 return XEXP (b, 1);
8539 else if (rtx_equal_p (a, XEXP (b, 1)))
8540 return XEXP (b, 0);
8541 else
8542 return NULL_RTX;
8543 }
8544 else if (rtx_equal_p (a, b))
8545 return const0_rtx;
8546
8547 return NULL_RTX;
8548 }
8549
8550 static rtx
8551 express_from (struct induction *g1, struct induction *g2)
8552 {
8553 rtx mult, add;
8554
8555 /* The value that G1 will be multiplied by must be a constant integer. Also,
8556 the only chance we have of getting a valid address is if b*c/a (see above
8557 for notation) is also an integer. */
8558 if (GET_CODE (g1->mult_val) == CONST_INT
8559 && GET_CODE (g2->mult_val) == CONST_INT)
8560 {
8561 if (g1->mult_val == const0_rtx
8562 || (g1->mult_val == constm1_rtx
8563 && INTVAL (g2->mult_val)
8564 == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1))
8565 || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0)
8566 return NULL_RTX;
8567 mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val));
8568 }
8569 else if (rtx_equal_p (g1->mult_val, g2->mult_val))
8570 mult = const1_rtx;
8571 else
8572 {
8573 /* ??? Find out if the one is a multiple of the other? */
8574 return NULL_RTX;
8575 }
8576
8577 add = express_from_1 (g1->add_val, g2->add_val, mult);
8578 if (add == NULL_RTX)
8579 {
8580 /* Failed. If we've got a multiplication factor between G1 and G2,
8581 scale G1's addend and try again. */
8582 if (INTVAL (mult) > 1)
8583 {
8584 rtx g1_add_val = g1->add_val;
8585 if (GET_CODE (g1_add_val) == MULT
8586 && GET_CODE (XEXP (g1_add_val, 1)) == CONST_INT)
8587 {
8588 HOST_WIDE_INT m;
8589 m = INTVAL (mult) * INTVAL (XEXP (g1_add_val, 1));
8590 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val),
8591 XEXP (g1_add_val, 0), GEN_INT (m));
8592 }
8593 else
8594 {
8595 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val), g1_add_val,
8596 mult);
8597 }
8598
8599 add = express_from_1 (g1_add_val, g2->add_val, const1_rtx);
8600 }
8601 }
8602 if (add == NULL_RTX)
8603 return NULL_RTX;
8604
8605 /* Form simplified final result. */
8606 if (mult == const0_rtx)
8607 return add;
8608 else if (mult == const1_rtx)
8609 mult = g1->dest_reg;
8610 else
8611 mult = gen_rtx_MULT (g2->mode, g1->dest_reg, mult);
8612
8613 if (add == const0_rtx)
8614 return mult;
8615 else
8616 {
8617 if (GET_CODE (add) == PLUS
8618 && CONSTANT_P (XEXP (add, 1)))
8619 {
8620 rtx tem = XEXP (add, 1);
8621 mult = gen_rtx_PLUS (g2->mode, mult, XEXP (add, 0));
8622 add = tem;
8623 }
8624
8625 return gen_rtx_PLUS (g2->mode, mult, add);
8626 }
8627 }
8628 \f
8629 /* Return an rtx, if any, that expresses giv G2 as a function of the register
8630 represented by G1. This indicates that G2 should be combined with G1 and
8631 that G2 can use (either directly or via an address expression) a register
8632 used to represent G1. */
8633
8634 static rtx
8635 combine_givs_p (struct induction *g1, struct induction *g2)
8636 {
8637 rtx comb, ret;
8638
8639 /* With the introduction of ext dependent givs, we must care for modes.
8640 G2 must not use a wider mode than G1. */
8641 if (GET_MODE_SIZE (g1->mode) < GET_MODE_SIZE (g2->mode))
8642 return NULL_RTX;
8643
8644 ret = comb = express_from (g1, g2);
8645 if (comb == NULL_RTX)
8646 return NULL_RTX;
8647 if (g1->mode != g2->mode)
8648 ret = gen_lowpart (g2->mode, comb);
8649
8650 /* If these givs are identical, they can be combined. We use the results
8651 of express_from because the addends are not in a canonical form, so
8652 rtx_equal_p is a weaker test. */
8653 /* But don't combine a DEST_REG giv with a DEST_ADDR giv; we want the
8654 combination to be the other way round. */
8655 if (comb == g1->dest_reg
8656 && (g1->giv_type == DEST_REG || g2->giv_type == DEST_ADDR))
8657 {
8658 return ret;
8659 }
8660
8661 /* If G2 can be expressed as a function of G1 and that function is valid
8662 as an address and no more expensive than using a register for G2,
8663 the expression of G2 in terms of G1 can be used. */
8664 if (ret != NULL_RTX
8665 && g2->giv_type == DEST_ADDR
8666 && memory_address_p (GET_MODE (g2->mem), ret))
8667 return ret;
8668
8669 return NULL_RTX;
8670 }
8671 \f
8672 /* See if BL is monotonic and has a constant per-iteration increment.
8673 Return the increment if so, otherwise return 0. */
8674
8675 static HOST_WIDE_INT
8676 get_monotonic_increment (struct iv_class *bl)
8677 {
8678 struct induction *v;
8679 rtx incr;
8680
8681 /* Get the total increment and check that it is constant. */
8682 incr = biv_total_increment (bl);
8683 if (incr == 0 || GET_CODE (incr) != CONST_INT)
8684 return 0;
8685
8686 for (v = bl->biv; v != 0; v = v->next_iv)
8687 {
8688 if (GET_CODE (v->add_val) != CONST_INT)
8689 return 0;
8690
8691 if (INTVAL (v->add_val) < 0 && INTVAL (incr) >= 0)
8692 return 0;
8693
8694 if (INTVAL (v->add_val) > 0 && INTVAL (incr) <= 0)
8695 return 0;
8696 }
8697 return INTVAL (incr);
8698 }
8699
8700
8701 /* Subroutine of biv_fits_mode_p. Return true if biv BL, when biased by
8702 BIAS, will never exceed the unsigned range of MODE. LOOP is the loop
8703 to which the biv belongs and INCR is its per-iteration increment. */
8704
8705 static bool
8706 biased_biv_fits_mode_p (const struct loop *loop, struct iv_class *bl,
8707 HOST_WIDE_INT incr, enum machine_mode mode,
8708 unsigned HOST_WIDE_INT bias)
8709 {
8710 unsigned HOST_WIDE_INT initial, maximum, span, delta;
8711
8712 /* We need to be able to manipulate MODE-size constants. */
8713 if (HOST_BITS_PER_WIDE_INT < GET_MODE_BITSIZE (mode))
8714 return false;
8715
8716 /* The number of loop iterations must be constant. */
8717 if (LOOP_INFO (loop)->n_iterations == 0)
8718 return false;
8719
8720 /* So must the biv's initial value. */
8721 if (bl->initial_value == 0 || GET_CODE (bl->initial_value) != CONST_INT)
8722 return false;
8723
8724 initial = bias + INTVAL (bl->initial_value);
8725 maximum = GET_MODE_MASK (mode);
8726
8727 /* Make sure that the initial value is within range. */
8728 if (initial > maximum)
8729 return false;
8730
8731 /* Set up DELTA and SPAN such that the number of iterations * DELTA
8732 (calculated to arbitrary precision) must be <= SPAN. */
8733 if (incr < 0)
8734 {
8735 delta = -incr;
8736 span = initial;
8737 }
8738 else
8739 {
8740 delta = incr;
8741 /* Handle the special case in which MAXIMUM is the largest
8742 unsigned HOST_WIDE_INT and INITIAL is 0. */
8743 if (maximum + 1 == initial)
8744 span = LOOP_INFO (loop)->n_iterations * delta;
8745 else
8746 span = maximum + 1 - initial;
8747 }
8748 return (span / LOOP_INFO (loop)->n_iterations >= delta);
8749 }
8750
8751
8752 /* Return true if biv BL will never exceed the bounds of MODE. LOOP is
8753 the loop to which BL belongs and INCR is its per-iteration increment.
8754 UNSIGNEDP is true if the biv should be treated as unsigned. */
8755
8756 static bool
8757 biv_fits_mode_p (const struct loop *loop, struct iv_class *bl,
8758 HOST_WIDE_INT incr, enum machine_mode mode, bool unsignedp)
8759 {
8760 struct loop_info *loop_info;
8761 unsigned HOST_WIDE_INT bias;
8762
8763 /* A biv's value will always be limited to its natural mode.
8764 Larger modes will observe the same wrap-around. */
8765 if (GET_MODE_SIZE (mode) > GET_MODE_SIZE (GET_MODE (bl->biv->src_reg)))
8766 mode = GET_MODE (bl->biv->src_reg);
8767
8768 loop_info = LOOP_INFO (loop);
8769
8770 bias = (unsignedp ? 0 : (GET_MODE_MASK (mode) >> 1) + 1);
8771 if (biased_biv_fits_mode_p (loop, bl, incr, mode, bias))
8772 return true;
8773
8774 if (mode == GET_MODE (bl->biv->src_reg)
8775 && bl->biv->src_reg == loop_info->iteration_var
8776 && loop_info->comparison_value
8777 && loop_invariant_p (loop, loop_info->comparison_value))
8778 {
8779 /* If the increment is +1, and the exit test is a <, the BIV
8780 cannot overflow. (For <=, we have the problematic case that
8781 the comparison value might be the maximum value of the range.) */
8782 if (incr == 1)
8783 {
8784 if (loop_info->comparison_code == LT)
8785 return true;
8786 if (loop_info->comparison_code == LTU && unsignedp)
8787 return true;
8788 }
8789
8790 /* Likewise for increment -1 and exit test >. */
8791 if (incr == -1)
8792 {
8793 if (loop_info->comparison_code == GT)
8794 return true;
8795 if (loop_info->comparison_code == GTU && unsignedp)
8796 return true;
8797 }
8798 }
8799 return false;
8800 }
8801
8802
8803 /* Return false iff it is provable that biv BL plus BIAS will not wrap
8804 at any point in its update sequence. Note that at the rtl level we
8805 may not have information about the signedness of BL; in that case,
8806 check for both signed and unsigned overflow. */
8807
8808 static bool
8809 biased_biv_may_wrap_p (const struct loop *loop, struct iv_class *bl,
8810 unsigned HOST_WIDE_INT bias)
8811 {
8812 HOST_WIDE_INT incr;
8813 bool check_signed, check_unsigned;
8814 enum machine_mode mode;
8815
8816 /* If the increment is not monotonic, we'd have to check separately
8817 at each increment step. Not Worth It. */
8818 incr = get_monotonic_increment (bl);
8819 if (incr == 0)
8820 return true;
8821
8822 /* If this biv is the loop iteration variable, then we may be able to
8823 deduce a sign based on the loop condition. */
8824 /* ??? This is not 100% reliable; consider an unsigned biv that is cast
8825 to signed for the comparison. However, this same bug appears all
8826 through loop.c. */
8827 check_signed = check_unsigned = true;
8828 if (bl->biv->src_reg == LOOP_INFO (loop)->iteration_var)
8829 {
8830 switch (LOOP_INFO (loop)->comparison_code)
8831 {
8832 case GTU: case GEU: case LTU: case LEU:
8833 check_signed = false;
8834 break;
8835 case GT: case GE: case LT: case LE:
8836 check_unsigned = false;
8837 break;
8838 default:
8839 break;
8840 }
8841 }
8842
8843 mode = GET_MODE (bl->biv->src_reg);
8844
8845 if (check_unsigned
8846 && !biased_biv_fits_mode_p (loop, bl, incr, mode, bias))
8847 return true;
8848
8849 if (check_signed)
8850 {
8851 bias += (GET_MODE_MASK (mode) >> 1) + 1;
8852 if (!biased_biv_fits_mode_p (loop, bl, incr, mode, bias))
8853 return true;
8854 }
8855
8856 return false;
8857 }
8858
8859
8860 /* Given that X is an extension or truncation of BL, return true
8861 if it is unaffected by overflow. LOOP is the loop to which
8862 BL belongs and INCR is its per-iteration increment. */
8863
8864 static bool
8865 extension_within_bounds_p (const struct loop *loop, struct iv_class *bl,
8866 HOST_WIDE_INT incr, rtx x)
8867 {
8868 enum machine_mode mode;
8869 bool signedp, unsignedp;
8870
8871 switch (GET_CODE (x))
8872 {
8873 case SIGN_EXTEND:
8874 case ZERO_EXTEND:
8875 mode = GET_MODE (XEXP (x, 0));
8876 signedp = (GET_CODE (x) == SIGN_EXTEND);
8877 unsignedp = (GET_CODE (x) == ZERO_EXTEND);
8878 break;
8879
8880 case TRUNCATE:
8881 /* We don't know whether this value is being used as signed
8882 or unsigned, so check the conditions for both. */
8883 mode = GET_MODE (x);
8884 signedp = unsignedp = true;
8885 break;
8886
8887 default:
8888 gcc_unreachable ();
8889 }
8890
8891 return ((!signedp || biv_fits_mode_p (loop, bl, incr, mode, false))
8892 && (!unsignedp || biv_fits_mode_p (loop, bl, incr, mode, true)));
8893 }
8894
8895
8896 /* Check each extension dependent giv in this class to see if its
8897 root biv is safe from wrapping in the interior mode, which would
8898 make the giv illegal. */
8899
8900 static void
8901 check_ext_dependent_givs (const struct loop *loop, struct iv_class *bl)
8902 {
8903 struct induction *v;
8904 HOST_WIDE_INT incr;
8905
8906 incr = get_monotonic_increment (bl);
8907
8908 /* Invalidate givs that fail the tests. */
8909 for (v = bl->giv; v; v = v->next_iv)
8910 if (v->ext_dependent)
8911 {
8912 if (incr != 0
8913 && extension_within_bounds_p (loop, bl, incr, v->ext_dependent))
8914 {
8915 if (loop_dump_stream)
8916 fprintf (loop_dump_stream,
8917 "Verified ext dependent giv at %d of reg %d\n",
8918 INSN_UID (v->insn), bl->regno);
8919 }
8920 else
8921 {
8922 if (loop_dump_stream)
8923 fprintf (loop_dump_stream,
8924 "Failed ext dependent giv at %d\n",
8925 INSN_UID (v->insn));
8926
8927 v->ignore = 1;
8928 bl->all_reduced = 0;
8929 }
8930 }
8931 }
8932
8933 /* Generate a version of VALUE in a mode appropriate for initializing V. */
8934
8935 static rtx
8936 extend_value_for_giv (struct induction *v, rtx value)
8937 {
8938 rtx ext_dep = v->ext_dependent;
8939
8940 if (! ext_dep)
8941 return value;
8942
8943 /* Recall that check_ext_dependent_givs verified that the known bounds
8944 of a biv did not overflow or wrap with respect to the extension for
8945 the giv. Therefore, constants need no additional adjustment. */
8946 if (CONSTANT_P (value) && GET_MODE (value) == VOIDmode)
8947 return value;
8948
8949 /* Otherwise, we must adjust the value to compensate for the
8950 differing modes of the biv and the giv. */
8951 return gen_rtx_fmt_e (GET_CODE (ext_dep), GET_MODE (ext_dep), value);
8952 }
8953 \f
8954 struct combine_givs_stats
8955 {
8956 int giv_number;
8957 int total_benefit;
8958 };
8959
8960 static int
8961 cmp_combine_givs_stats (const void *xp, const void *yp)
8962 {
8963 const struct combine_givs_stats * const x =
8964 (const struct combine_givs_stats *) xp;
8965 const struct combine_givs_stats * const y =
8966 (const struct combine_givs_stats *) yp;
8967 int d;
8968 d = y->total_benefit - x->total_benefit;
8969 /* Stabilize the sort. */
8970 if (!d)
8971 d = x->giv_number - y->giv_number;
8972 return d;
8973 }
8974
8975 /* Check all pairs of givs for iv_class BL and see if any can be combined with
8976 any other. If so, point SAME to the giv combined with and set NEW_REG to
8977 be an expression (in terms of the other giv's DEST_REG) equivalent to the
8978 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
8979
8980 static void
8981 combine_givs (struct loop_regs *regs, struct iv_class *bl)
8982 {
8983 /* Additional benefit to add for being combined multiple times. */
8984 const int extra_benefit = 3;
8985
8986 struct induction *g1, *g2, **giv_array;
8987 int i, j, k, giv_count;
8988 struct combine_givs_stats *stats;
8989 rtx *can_combine;
8990
8991 /* Count givs, because bl->giv_count is incorrect here. */
8992 giv_count = 0;
8993 for (g1 = bl->giv; g1; g1 = g1->next_iv)
8994 if (!g1->ignore)
8995 giv_count++;
8996
8997 giv_array = alloca (giv_count * sizeof (struct induction *));
8998 i = 0;
8999 for (g1 = bl->giv; g1; g1 = g1->next_iv)
9000 if (!g1->ignore)
9001 giv_array[i++] = g1;
9002
9003 stats = xcalloc (giv_count, sizeof (*stats));
9004 can_combine = xcalloc (giv_count, giv_count * sizeof (rtx));
9005
9006 for (i = 0; i < giv_count; i++)
9007 {
9008 int this_benefit;
9009 rtx single_use;
9010
9011 g1 = giv_array[i];
9012 stats[i].giv_number = i;
9013
9014 /* If a DEST_REG GIV is used only once, do not allow it to combine
9015 with anything, for in doing so we will gain nothing that cannot
9016 be had by simply letting the GIV with which we would have combined
9017 to be reduced on its own. The lossage shows up in particular with
9018 DEST_ADDR targets on hosts with reg+reg addressing, though it can
9019 be seen elsewhere as well. */
9020 if (g1->giv_type == DEST_REG
9021 && (single_use = regs->array[REGNO (g1->dest_reg)].single_usage)
9022 && single_use != const0_rtx)
9023 continue;
9024
9025 this_benefit = g1->benefit;
9026 /* Add an additional weight for zero addends. */
9027 if (g1->no_const_addval)
9028 this_benefit += 1;
9029
9030 for (j = 0; j < giv_count; j++)
9031 {
9032 rtx this_combine;
9033
9034 g2 = giv_array[j];
9035 if (g1 != g2
9036 && (this_combine = combine_givs_p (g1, g2)) != NULL_RTX)
9037 {
9038 can_combine[i * giv_count + j] = this_combine;
9039 this_benefit += g2->benefit + extra_benefit;
9040 }
9041 }
9042 stats[i].total_benefit = this_benefit;
9043 }
9044
9045 /* Iterate, combining until we can't. */
9046 restart:
9047 qsort (stats, giv_count, sizeof (*stats), cmp_combine_givs_stats);
9048
9049 if (loop_dump_stream)
9050 {
9051 fprintf (loop_dump_stream, "Sorted combine statistics:\n");
9052 for (k = 0; k < giv_count; k++)
9053 {
9054 g1 = giv_array[stats[k].giv_number];
9055 if (!g1->combined_with && !g1->same)
9056 fprintf (loop_dump_stream, " {%d, %d}",
9057 INSN_UID (giv_array[stats[k].giv_number]->insn),
9058 stats[k].total_benefit);
9059 }
9060 putc ('\n', loop_dump_stream);
9061 }
9062
9063 for (k = 0; k < giv_count; k++)
9064 {
9065 int g1_add_benefit = 0;
9066
9067 i = stats[k].giv_number;
9068 g1 = giv_array[i];
9069
9070 /* If it has already been combined, skip. */
9071 if (g1->combined_with || g1->same)
9072 continue;
9073
9074 for (j = 0; j < giv_count; j++)
9075 {
9076 g2 = giv_array[j];
9077 if (g1 != g2 && can_combine[i * giv_count + j]
9078 /* If it has already been combined, skip. */
9079 && ! g2->same && ! g2->combined_with)
9080 {
9081 int l;
9082
9083 g2->new_reg = can_combine[i * giv_count + j];
9084 g2->same = g1;
9085 /* For destination, we now may replace by mem expression instead
9086 of register. This changes the costs considerably, so add the
9087 compensation. */
9088 if (g2->giv_type == DEST_ADDR)
9089 g2->benefit = (g2->benefit + reg_address_cost
9090 - address_cost (g2->new_reg,
9091 GET_MODE (g2->mem)));
9092 g1->combined_with++;
9093 g1->lifetime += g2->lifetime;
9094
9095 g1_add_benefit += g2->benefit;
9096
9097 /* ??? The new final_[bg]iv_value code does a much better job
9098 of finding replaceable giv's, and hence this code may no
9099 longer be necessary. */
9100 if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg))
9101 g1_add_benefit -= copy_cost;
9102
9103 /* To help optimize the next set of combinations, remove
9104 this giv from the benefits of other potential mates. */
9105 for (l = 0; l < giv_count; ++l)
9106 {
9107 int m = stats[l].giv_number;
9108 if (can_combine[m * giv_count + j])
9109 stats[l].total_benefit -= g2->benefit + extra_benefit;
9110 }
9111
9112 if (loop_dump_stream)
9113 fprintf (loop_dump_stream,
9114 "giv at %d combined with giv at %d; new benefit %d + %d, lifetime %d\n",
9115 INSN_UID (g2->insn), INSN_UID (g1->insn),
9116 g1->benefit, g1_add_benefit, g1->lifetime);
9117 }
9118 }
9119
9120 /* To help optimize the next set of combinations, remove
9121 this giv from the benefits of other potential mates. */
9122 if (g1->combined_with)
9123 {
9124 for (j = 0; j < giv_count; ++j)
9125 {
9126 int m = stats[j].giv_number;
9127 if (can_combine[m * giv_count + i])
9128 stats[j].total_benefit -= g1->benefit + extra_benefit;
9129 }
9130
9131 g1->benefit += g1_add_benefit;
9132
9133 /* We've finished with this giv, and everything it touched.
9134 Restart the combination so that proper weights for the
9135 rest of the givs are properly taken into account. */
9136 /* ??? Ideally we would compact the arrays at this point, so
9137 as to not cover old ground. But sanely compacting
9138 can_combine is tricky. */
9139 goto restart;
9140 }
9141 }
9142
9143 /* Clean up. */
9144 free (stats);
9145 free (can_combine);
9146 }
9147 \f
9148 /* Generate sequence for REG = B * M + A. B is the initial value of
9149 the basic induction variable, M a multiplicative constant, A an
9150 additive constant and REG the destination register. */
9151
9152 static rtx
9153 gen_add_mult (rtx b, rtx m, rtx a, rtx reg)
9154 {
9155 rtx seq;
9156 rtx result;
9157
9158 start_sequence ();
9159 /* Use unsigned arithmetic. */
9160 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
9161 if (reg != result)
9162 emit_move_insn (reg, result);
9163 seq = get_insns ();
9164 end_sequence ();
9165
9166 return seq;
9167 }
9168
9169
9170 /* Update registers created in insn sequence SEQ. */
9171
9172 static void
9173 loop_regs_update (const struct loop *loop ATTRIBUTE_UNUSED, rtx seq)
9174 {
9175 rtx insn;
9176
9177 /* Update register info for alias analysis. */
9178
9179 insn = seq;
9180 while (insn != NULL_RTX)
9181 {
9182 rtx set = single_set (insn);
9183
9184 if (set && REG_P (SET_DEST (set)))
9185 record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0);
9186
9187 insn = NEXT_INSN (insn);
9188 }
9189 }
9190
9191
9192 /* EMIT code before BEFORE_BB/BEFORE_INSN to set REG = B * M + A. B
9193 is the initial value of the basic induction variable, M a
9194 multiplicative constant, A an additive constant and REG the
9195 destination register. */
9196
9197 static void
9198 loop_iv_add_mult_emit_before (const struct loop *loop, rtx b, rtx m, rtx a,
9199 rtx reg, basic_block before_bb, rtx before_insn)
9200 {
9201 rtx seq;
9202
9203 if (! before_insn)
9204 {
9205 loop_iv_add_mult_hoist (loop, b, m, a, reg);
9206 return;
9207 }
9208
9209 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
9210 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
9211
9212 /* Increase the lifetime of any invariants moved further in code. */
9213 update_reg_last_use (a, before_insn);
9214 update_reg_last_use (b, before_insn);
9215 update_reg_last_use (m, before_insn);
9216
9217 /* It is possible that the expansion created lots of new registers.
9218 Iterate over the sequence we just created and record them all. We
9219 must do this before inserting the sequence. */
9220 loop_regs_update (loop, seq);
9221
9222 loop_insn_emit_before (loop, before_bb, before_insn, seq);
9223 }
9224
9225
9226 /* Emit insns in loop pre-header to set REG = B * M + A. B is the
9227 initial value of the basic induction variable, M a multiplicative
9228 constant, A an additive constant and REG the destination
9229 register. */
9230
9231 static void
9232 loop_iv_add_mult_sink (const struct loop *loop, rtx b, rtx m, rtx a, rtx reg)
9233 {
9234 rtx seq;
9235
9236 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
9237 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
9238
9239 /* Increase the lifetime of any invariants moved further in code.
9240 ???? Is this really necessary? */
9241 update_reg_last_use (a, loop->sink);
9242 update_reg_last_use (b, loop->sink);
9243 update_reg_last_use (m, loop->sink);
9244
9245 /* It is possible that the expansion created lots of new registers.
9246 Iterate over the sequence we just created and record them all. We
9247 must do this before inserting the sequence. */
9248 loop_regs_update (loop, seq);
9249
9250 loop_insn_sink (loop, seq);
9251 }
9252
9253
9254 /* Emit insns after loop to set REG = B * M + A. B is the initial
9255 value of the basic induction variable, M a multiplicative constant,
9256 A an additive constant and REG the destination register. */
9257
9258 static void
9259 loop_iv_add_mult_hoist (const struct loop *loop, rtx b, rtx m, rtx a, rtx reg)
9260 {
9261 rtx seq;
9262
9263 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
9264 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
9265
9266 /* It is possible that the expansion created lots of new registers.
9267 Iterate over the sequence we just created and record them all. We
9268 must do this before inserting the sequence. */
9269 loop_regs_update (loop, seq);
9270
9271 loop_insn_hoist (loop, seq);
9272 }
9273
9274
9275
9276 /* Similar to gen_add_mult, but compute cost rather than generating
9277 sequence. */
9278
9279 static int
9280 iv_add_mult_cost (rtx b, rtx m, rtx a, rtx reg)
9281 {
9282 int cost = 0;
9283 rtx last, result;
9284
9285 start_sequence ();
9286 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
9287 if (reg != result)
9288 emit_move_insn (reg, result);
9289 last = get_last_insn ();
9290 while (last)
9291 {
9292 rtx t = single_set (last);
9293 if (t)
9294 cost += rtx_cost (SET_SRC (t), SET);
9295 last = PREV_INSN (last);
9296 }
9297 end_sequence ();
9298 return cost;
9299 }
9300 \f
9301 /* Test whether A * B can be computed without
9302 an actual multiply insn. Value is 1 if so.
9303
9304 ??? This function stinks because it generates a ton of wasted RTL
9305 ??? and as a result fragments GC memory to no end. There are other
9306 ??? places in the compiler which are invoked a lot and do the same
9307 ??? thing, generate wasted RTL just to see if something is possible. */
9308
9309 static int
9310 product_cheap_p (rtx a, rtx b)
9311 {
9312 rtx tmp;
9313 int win, n_insns;
9314
9315 /* If only one is constant, make it B. */
9316 if (GET_CODE (a) == CONST_INT)
9317 tmp = a, a = b, b = tmp;
9318
9319 /* If first constant, both constant, so don't need multiply. */
9320 if (GET_CODE (a) == CONST_INT)
9321 return 1;
9322
9323 /* If second not constant, neither is constant, so would need multiply. */
9324 if (GET_CODE (b) != CONST_INT)
9325 return 0;
9326
9327 /* One operand is constant, so might not need multiply insn. Generate the
9328 code for the multiply and see if a call or multiply, or long sequence
9329 of insns is generated. */
9330
9331 start_sequence ();
9332 expand_mult (GET_MODE (a), a, b, NULL_RTX, 1);
9333 tmp = get_insns ();
9334 end_sequence ();
9335
9336 win = 1;
9337 if (tmp == NULL_RTX)
9338 ;
9339 else if (INSN_P (tmp))
9340 {
9341 n_insns = 0;
9342 while (tmp != NULL_RTX)
9343 {
9344 rtx next = NEXT_INSN (tmp);
9345
9346 if (++n_insns > 3
9347 || !NONJUMP_INSN_P (tmp)
9348 || (GET_CODE (PATTERN (tmp)) == SET
9349 && GET_CODE (SET_SRC (PATTERN (tmp))) == MULT)
9350 || (GET_CODE (PATTERN (tmp)) == PARALLEL
9351 && GET_CODE (XVECEXP (PATTERN (tmp), 0, 0)) == SET
9352 && GET_CODE (SET_SRC (XVECEXP (PATTERN (tmp), 0, 0))) == MULT))
9353 {
9354 win = 0;
9355 break;
9356 }
9357
9358 tmp = next;
9359 }
9360 }
9361 else if (GET_CODE (tmp) == SET
9362 && GET_CODE (SET_SRC (tmp)) == MULT)
9363 win = 0;
9364 else if (GET_CODE (tmp) == PARALLEL
9365 && GET_CODE (XVECEXP (tmp, 0, 0)) == SET
9366 && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT)
9367 win = 0;
9368
9369 return win;
9370 }
9371 \f
9372 /* Check to see if loop can be terminated by a "decrement and branch until
9373 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
9374 Also try reversing an increment loop to a decrement loop
9375 to see if the optimization can be performed.
9376 Value is nonzero if optimization was performed. */
9377
9378 /* This is useful even if the architecture doesn't have such an insn,
9379 because it might change a loops which increments from 0 to n to a loop
9380 which decrements from n to 0. A loop that decrements to zero is usually
9381 faster than one that increments from zero. */
9382
9383 /* ??? This could be rewritten to use some of the loop unrolling procedures,
9384 such as approx_final_value, biv_total_increment, loop_iterations, and
9385 final_[bg]iv_value. */
9386
9387 static int
9388 check_dbra_loop (struct loop *loop, int insn_count)
9389 {
9390 struct loop_info *loop_info = LOOP_INFO (loop);
9391 struct loop_regs *regs = LOOP_REGS (loop);
9392 struct loop_ivs *ivs = LOOP_IVS (loop);
9393 struct iv_class *bl;
9394 rtx reg;
9395 enum machine_mode mode;
9396 rtx jump_label;
9397 rtx final_value;
9398 rtx start_value;
9399 rtx new_add_val;
9400 rtx comparison;
9401 rtx before_comparison;
9402 rtx p;
9403 rtx jump;
9404 rtx first_compare;
9405 int compare_and_branch;
9406 rtx loop_start = loop->start;
9407 rtx loop_end = loop->end;
9408
9409 /* If last insn is a conditional branch, and the insn before tests a
9410 register value, try to optimize it. Otherwise, we can't do anything. */
9411
9412 jump = PREV_INSN (loop_end);
9413 comparison = get_condition_for_loop (loop, jump);
9414 if (comparison == 0)
9415 return 0;
9416 if (!onlyjump_p (jump))
9417 return 0;
9418
9419 /* Try to compute whether the compare/branch at the loop end is one or
9420 two instructions. */
9421 get_condition (jump, &first_compare, false, true);
9422 if (first_compare == jump)
9423 compare_and_branch = 1;
9424 else if (first_compare == prev_nonnote_insn (jump))
9425 compare_and_branch = 2;
9426 else
9427 return 0;
9428
9429 {
9430 /* If more than one condition is present to control the loop, then
9431 do not proceed, as this function does not know how to rewrite
9432 loop tests with more than one condition.
9433
9434 Look backwards from the first insn in the last comparison
9435 sequence and see if we've got another comparison sequence. */
9436
9437 rtx jump1;
9438 if ((jump1 = prev_nonnote_insn (first_compare))
9439 && JUMP_P (jump1))
9440 return 0;
9441 }
9442
9443 /* Check all of the bivs to see if the compare uses one of them.
9444 Skip biv's set more than once because we can't guarantee that
9445 it will be zero on the last iteration. Also skip if the biv is
9446 used between its update and the test insn. */
9447
9448 for (bl = ivs->list; bl; bl = bl->next)
9449 {
9450 if (bl->biv_count == 1
9451 && ! bl->biv->maybe_multiple
9452 && bl->biv->dest_reg == XEXP (comparison, 0)
9453 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
9454 first_compare))
9455 break;
9456 }
9457
9458 /* Try swapping the comparison to identify a suitable biv. */
9459 if (!bl)
9460 for (bl = ivs->list; bl; bl = bl->next)
9461 if (bl->biv_count == 1
9462 && ! bl->biv->maybe_multiple
9463 && bl->biv->dest_reg == XEXP (comparison, 1)
9464 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
9465 first_compare))
9466 {
9467 comparison = gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)),
9468 VOIDmode,
9469 XEXP (comparison, 1),
9470 XEXP (comparison, 0));
9471 break;
9472 }
9473
9474 if (! bl)
9475 return 0;
9476
9477 /* Look for the case where the basic induction variable is always
9478 nonnegative, and equals zero on the last iteration.
9479 In this case, add a reg_note REG_NONNEG, which allows the
9480 m68k DBRA instruction to be used. */
9481
9482 if (((GET_CODE (comparison) == GT && XEXP (comparison, 1) == constm1_rtx)
9483 || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx))
9484 && GET_CODE (bl->biv->add_val) == CONST_INT
9485 && INTVAL (bl->biv->add_val) < 0)
9486 {
9487 /* Initial value must be greater than 0,
9488 init_val % -dec_value == 0 to ensure that it equals zero on
9489 the last iteration */
9490
9491 if (GET_CODE (bl->initial_value) == CONST_INT
9492 && INTVAL (bl->initial_value) > 0
9493 && (INTVAL (bl->initial_value)
9494 % (-INTVAL (bl->biv->add_val))) == 0)
9495 {
9496 /* Register always nonnegative, add REG_NOTE to branch. */
9497 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
9498 REG_NOTES (jump)
9499 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
9500 REG_NOTES (jump));
9501 bl->nonneg = 1;
9502
9503 return 1;
9504 }
9505
9506 /* If the decrement is 1 and the value was tested as >= 0 before
9507 the loop, then we can safely optimize. */
9508 for (p = loop_start; p; p = PREV_INSN (p))
9509 {
9510 if (LABEL_P (p))
9511 break;
9512 if (!JUMP_P (p))
9513 continue;
9514
9515 before_comparison = get_condition_for_loop (loop, p);
9516 if (before_comparison
9517 && XEXP (before_comparison, 0) == bl->biv->dest_reg
9518 && (GET_CODE (before_comparison) == LT
9519 || GET_CODE (before_comparison) == LTU)
9520 && XEXP (before_comparison, 1) == const0_rtx
9521 && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start)
9522 && INTVAL (bl->biv->add_val) == -1)
9523 {
9524 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
9525 REG_NOTES (jump)
9526 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
9527 REG_NOTES (jump));
9528 bl->nonneg = 1;
9529
9530 return 1;
9531 }
9532 }
9533 }
9534 else if (GET_CODE (bl->biv->add_val) == CONST_INT
9535 && INTVAL (bl->biv->add_val) > 0)
9536 {
9537 /* Try to change inc to dec, so can apply above optimization. */
9538 /* Can do this if:
9539 all registers modified are induction variables or invariant,
9540 all memory references have non-overlapping addresses
9541 (obviously true if only one write)
9542 allow 2 insns for the compare/jump at the end of the loop. */
9543 /* Also, we must avoid any instructions which use both the reversed
9544 biv and another biv. Such instructions will fail if the loop is
9545 reversed. We meet this condition by requiring that either
9546 no_use_except_counting is true, or else that there is only
9547 one biv. */
9548 int num_nonfixed_reads = 0;
9549 /* 1 if the iteration var is used only to count iterations. */
9550 int no_use_except_counting = 0;
9551 /* 1 if the loop has no memory store, or it has a single memory store
9552 which is reversible. */
9553 int reversible_mem_store = 1;
9554
9555 if (bl->giv_count == 0
9556 && !loop->exit_count
9557 && !loop_info->has_multiple_exit_targets)
9558 {
9559 rtx bivreg = regno_reg_rtx[bl->regno];
9560 struct iv_class *blt;
9561
9562 /* If there are no givs for this biv, and the only exit is the
9563 fall through at the end of the loop, then
9564 see if perhaps there are no uses except to count. */
9565 no_use_except_counting = 1;
9566 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
9567 if (INSN_P (p))
9568 {
9569 rtx set = single_set (p);
9570
9571 if (set && REG_P (SET_DEST (set))
9572 && REGNO (SET_DEST (set)) == bl->regno)
9573 /* An insn that sets the biv is okay. */
9574 ;
9575 else if (!reg_mentioned_p (bivreg, PATTERN (p)))
9576 /* An insn that doesn't mention the biv is okay. */
9577 ;
9578 else if (p == prev_nonnote_insn (prev_nonnote_insn (loop_end))
9579 || p == prev_nonnote_insn (loop_end))
9580 {
9581 /* If either of these insns uses the biv and sets a pseudo
9582 that has more than one usage, then the biv has uses
9583 other than counting since it's used to derive a value
9584 that is used more than one time. */
9585 note_stores (PATTERN (p), note_set_pseudo_multiple_uses,
9586 regs);
9587 if (regs->multiple_uses)
9588 {
9589 no_use_except_counting = 0;
9590 break;
9591 }
9592 }
9593 else
9594 {
9595 no_use_except_counting = 0;
9596 break;
9597 }
9598 }
9599
9600 /* A biv has uses besides counting if it is used to set
9601 another biv. */
9602 for (blt = ivs->list; blt; blt = blt->next)
9603 if (blt->init_set
9604 && reg_mentioned_p (bivreg, SET_SRC (blt->init_set)))
9605 {
9606 no_use_except_counting = 0;
9607 break;
9608 }
9609 }
9610
9611 if (no_use_except_counting)
9612 /* No need to worry about MEMs. */
9613 ;
9614 else if (loop_info->num_mem_sets <= 1)
9615 {
9616 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
9617 if (INSN_P (p))
9618 num_nonfixed_reads += count_nonfixed_reads (loop, PATTERN (p));
9619
9620 /* If the loop has a single store, and the destination address is
9621 invariant, then we can't reverse the loop, because this address
9622 might then have the wrong value at loop exit.
9623 This would work if the source was invariant also, however, in that
9624 case, the insn should have been moved out of the loop. */
9625
9626 if (loop_info->num_mem_sets == 1)
9627 {
9628 struct induction *v;
9629
9630 /* If we could prove that each of the memory locations
9631 written to was different, then we could reverse the
9632 store -- but we don't presently have any way of
9633 knowing that. */
9634 reversible_mem_store = 0;
9635
9636 /* If the store depends on a register that is set after the
9637 store, it depends on the initial value, and is thus not
9638 reversible. */
9639 for (v = bl->giv; reversible_mem_store && v; v = v->next_iv)
9640 {
9641 if (v->giv_type == DEST_REG
9642 && reg_mentioned_p (v->dest_reg,
9643 PATTERN (loop_info->first_loop_store_insn))
9644 && loop_insn_first_p (loop_info->first_loop_store_insn,
9645 v->insn))
9646 reversible_mem_store = 0;
9647 }
9648 }
9649 }
9650 else
9651 return 0;
9652
9653 /* This code only acts for innermost loops. Also it simplifies
9654 the memory address check by only reversing loops with
9655 zero or one memory access.
9656 Two memory accesses could involve parts of the same array,
9657 and that can't be reversed.
9658 If the biv is used only for counting, than we don't need to worry
9659 about all these things. */
9660
9661 if ((num_nonfixed_reads <= 1
9662 && ! loop_info->has_nonconst_call
9663 && ! loop_info->has_prefetch
9664 && ! loop_info->has_volatile
9665 && reversible_mem_store
9666 && (bl->giv_count + bl->biv_count + loop_info->num_mem_sets
9667 + num_unmoved_movables (loop) + compare_and_branch == insn_count)
9668 && (bl == ivs->list && bl->next == 0))
9669 || (no_use_except_counting && ! loop_info->has_prefetch))
9670 {
9671 rtx tem;
9672
9673 /* Loop can be reversed. */
9674 if (loop_dump_stream)
9675 fprintf (loop_dump_stream, "Can reverse loop\n");
9676
9677 /* Now check other conditions:
9678
9679 The increment must be a constant, as must the initial value,
9680 and the comparison code must be LT.
9681
9682 This test can probably be improved since +/- 1 in the constant
9683 can be obtained by changing LT to LE and vice versa; this is
9684 confusing. */
9685
9686 if (comparison
9687 /* for constants, LE gets turned into LT */
9688 && (GET_CODE (comparison) == LT
9689 || (GET_CODE (comparison) == LE
9690 && no_use_except_counting)
9691 || GET_CODE (comparison) == LTU))
9692 {
9693 HOST_WIDE_INT add_val, add_adjust, comparison_val = 0;
9694 rtx initial_value, comparison_value;
9695 int nonneg = 0;
9696 enum rtx_code cmp_code;
9697 int comparison_const_width;
9698 unsigned HOST_WIDE_INT comparison_sign_mask;
9699 bool keep_first_compare;
9700
9701 add_val = INTVAL (bl->biv->add_val);
9702 comparison_value = XEXP (comparison, 1);
9703 if (GET_MODE (comparison_value) == VOIDmode)
9704 comparison_const_width
9705 = GET_MODE_BITSIZE (GET_MODE (XEXP (comparison, 0)));
9706 else
9707 comparison_const_width
9708 = GET_MODE_BITSIZE (GET_MODE (comparison_value));
9709 if (comparison_const_width > HOST_BITS_PER_WIDE_INT)
9710 comparison_const_width = HOST_BITS_PER_WIDE_INT;
9711 comparison_sign_mask
9712 = (unsigned HOST_WIDE_INT) 1 << (comparison_const_width - 1);
9713
9714 /* If the comparison value is not a loop invariant, then we
9715 can not reverse this loop.
9716
9717 ??? If the insns which initialize the comparison value as
9718 a whole compute an invariant result, then we could move
9719 them out of the loop and proceed with loop reversal. */
9720 if (! loop_invariant_p (loop, comparison_value))
9721 return 0;
9722
9723 if (GET_CODE (comparison_value) == CONST_INT)
9724 comparison_val = INTVAL (comparison_value);
9725 initial_value = bl->initial_value;
9726
9727 /* Normalize the initial value if it is an integer and
9728 has no other use except as a counter. This will allow
9729 a few more loops to be reversed. */
9730 if (no_use_except_counting
9731 && GET_CODE (comparison_value) == CONST_INT
9732 && GET_CODE (initial_value) == CONST_INT)
9733 {
9734 comparison_val = comparison_val - INTVAL (bl->initial_value);
9735 /* The code below requires comparison_val to be a multiple
9736 of add_val in order to do the loop reversal, so
9737 round up comparison_val to a multiple of add_val.
9738 Since comparison_value is constant, we know that the
9739 current comparison code is LT. */
9740 comparison_val = comparison_val + add_val - 1;
9741 comparison_val
9742 -= (unsigned HOST_WIDE_INT) comparison_val % add_val;
9743 /* We postpone overflow checks for COMPARISON_VAL here;
9744 even if there is an overflow, we might still be able to
9745 reverse the loop, if converting the loop exit test to
9746 NE is possible. */
9747 initial_value = const0_rtx;
9748 }
9749
9750 /* First check if we can do a vanilla loop reversal. */
9751 if (initial_value == const0_rtx
9752 && GET_CODE (comparison_value) == CONST_INT
9753 /* Now do postponed overflow checks on COMPARISON_VAL. */
9754 && ! (((comparison_val - add_val) ^ INTVAL (comparison_value))
9755 & comparison_sign_mask))
9756 {
9757 /* Register will always be nonnegative, with value
9758 0 on last iteration */
9759 add_adjust = add_val;
9760 nonneg = 1;
9761 cmp_code = GE;
9762 }
9763 else
9764 return 0;
9765
9766 if (GET_CODE (comparison) == LE)
9767 add_adjust -= add_val;
9768
9769 /* If the initial value is not zero, or if the comparison
9770 value is not an exact multiple of the increment, then we
9771 can not reverse this loop. */
9772 if (initial_value == const0_rtx
9773 && GET_CODE (comparison_value) == CONST_INT)
9774 {
9775 if (((unsigned HOST_WIDE_INT) comparison_val % add_val) != 0)
9776 return 0;
9777 }
9778 else
9779 {
9780 if (! no_use_except_counting || add_val != 1)
9781 return 0;
9782 }
9783
9784 final_value = comparison_value;
9785
9786 /* Reset these in case we normalized the initial value
9787 and comparison value above. */
9788 if (GET_CODE (comparison_value) == CONST_INT
9789 && GET_CODE (initial_value) == CONST_INT)
9790 {
9791 comparison_value = GEN_INT (comparison_val);
9792 final_value
9793 = GEN_INT (comparison_val + INTVAL (bl->initial_value));
9794 }
9795 bl->initial_value = initial_value;
9796
9797 /* Save some info needed to produce the new insns. */
9798 reg = bl->biv->dest_reg;
9799 mode = GET_MODE (reg);
9800 jump_label = condjump_label (PREV_INSN (loop_end));
9801 new_add_val = GEN_INT (-INTVAL (bl->biv->add_val));
9802
9803 /* Set start_value; if this is not a CONST_INT, we need
9804 to generate a SUB.
9805 Initialize biv to start_value before loop start.
9806 The old initializing insn will be deleted as a
9807 dead store by flow.c. */
9808 if (initial_value == const0_rtx
9809 && GET_CODE (comparison_value) == CONST_INT)
9810 {
9811 start_value
9812 = gen_int_mode (comparison_val - add_adjust, mode);
9813 loop_insn_hoist (loop, gen_move_insn (reg, start_value));
9814 }
9815 else if (GET_CODE (initial_value) == CONST_INT)
9816 {
9817 rtx offset = GEN_INT (-INTVAL (initial_value) - add_adjust);
9818 rtx add_insn = gen_add3_insn (reg, comparison_value, offset);
9819
9820 if (add_insn == 0)
9821 return 0;
9822
9823 start_value
9824 = gen_rtx_PLUS (mode, comparison_value, offset);
9825 loop_insn_hoist (loop, add_insn);
9826 if (GET_CODE (comparison) == LE)
9827 final_value = gen_rtx_PLUS (mode, comparison_value,
9828 GEN_INT (add_val));
9829 }
9830 else if (! add_adjust)
9831 {
9832 rtx sub_insn = gen_sub3_insn (reg, comparison_value,
9833 initial_value);
9834
9835 if (sub_insn == 0)
9836 return 0;
9837 start_value
9838 = gen_rtx_MINUS (mode, comparison_value, initial_value);
9839 loop_insn_hoist (loop, sub_insn);
9840 }
9841 else
9842 /* We could handle the other cases too, but it'll be
9843 better to have a testcase first. */
9844 return 0;
9845
9846 /* We may not have a single insn which can increment a reg, so
9847 create a sequence to hold all the insns from expand_inc. */
9848 start_sequence ();
9849 expand_inc (reg, new_add_val);
9850 tem = get_insns ();
9851 end_sequence ();
9852
9853 p = loop_insn_emit_before (loop, 0, bl->biv->insn, tem);
9854 delete_insn (bl->biv->insn);
9855
9856 /* Update biv info to reflect its new status. */
9857 bl->biv->insn = p;
9858 bl->initial_value = start_value;
9859 bl->biv->add_val = new_add_val;
9860
9861 /* Update loop info. */
9862 loop_info->initial_value = reg;
9863 loop_info->initial_equiv_value = reg;
9864 loop_info->final_value = const0_rtx;
9865 loop_info->final_equiv_value = const0_rtx;
9866 loop_info->comparison_value = const0_rtx;
9867 loop_info->comparison_code = cmp_code;
9868 loop_info->increment = new_add_val;
9869
9870 /* Inc LABEL_NUSES so that delete_insn will
9871 not delete the label. */
9872 LABEL_NUSES (XEXP (jump_label, 0))++;
9873
9874 /* If we have a separate comparison insn that does more
9875 than just set cc0, the result of the comparison might
9876 be used outside the loop. */
9877 keep_first_compare = (compare_and_branch == 2
9878 #ifdef HAVE_CC0
9879 && sets_cc0_p (first_compare) <= 0
9880 #endif
9881 );
9882
9883 /* Emit an insn after the end of the loop to set the biv's
9884 proper exit value if it is used anywhere outside the loop. */
9885 if (keep_first_compare
9886 || (REGNO_LAST_UID (bl->regno) != INSN_UID (first_compare))
9887 || ! bl->init_insn
9888 || REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn))
9889 loop_insn_sink (loop, gen_load_of_final_value (reg, final_value));
9890
9891 if (keep_first_compare)
9892 loop_insn_sink (loop, PATTERN (first_compare));
9893
9894 /* Delete compare/branch at end of loop. */
9895 delete_related_insns (PREV_INSN (loop_end));
9896 if (compare_and_branch == 2)
9897 delete_related_insns (first_compare);
9898
9899 /* Add new compare/branch insn at end of loop. */
9900 start_sequence ();
9901 emit_cmp_and_jump_insns (reg, const0_rtx, cmp_code, NULL_RTX,
9902 mode, 0,
9903 XEXP (jump_label, 0));
9904 tem = get_insns ();
9905 end_sequence ();
9906 emit_jump_insn_before (tem, loop_end);
9907
9908 for (tem = PREV_INSN (loop_end);
9909 tem && !JUMP_P (tem);
9910 tem = PREV_INSN (tem))
9911 ;
9912
9913 if (tem)
9914 JUMP_LABEL (tem) = XEXP (jump_label, 0);
9915
9916 if (nonneg)
9917 {
9918 if (tem)
9919 {
9920 /* Increment of LABEL_NUSES done above. */
9921 /* Register is now always nonnegative,
9922 so add REG_NONNEG note to the branch. */
9923 REG_NOTES (tem) = gen_rtx_EXPR_LIST (REG_NONNEG, reg,
9924 REG_NOTES (tem));
9925 }
9926 bl->nonneg = 1;
9927 }
9928
9929 /* No insn may reference both the reversed and another biv or it
9930 will fail (see comment near the top of the loop reversal
9931 code).
9932 Earlier on, we have verified that the biv has no use except
9933 counting, or it is the only biv in this function.
9934 However, the code that computes no_use_except_counting does
9935 not verify reg notes. It's possible to have an insn that
9936 references another biv, and has a REG_EQUAL note with an
9937 expression based on the reversed biv. To avoid this case,
9938 remove all REG_EQUAL notes based on the reversed biv
9939 here. */
9940 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
9941 if (INSN_P (p))
9942 {
9943 rtx *pnote;
9944 rtx set = single_set (p);
9945 /* If this is a set of a GIV based on the reversed biv, any
9946 REG_EQUAL notes should still be correct. */
9947 if (! set
9948 || !REG_P (SET_DEST (set))
9949 || (size_t) REGNO (SET_DEST (set)) >= ivs->n_regs
9950 || REG_IV_TYPE (ivs, REGNO (SET_DEST (set))) != GENERAL_INDUCT
9951 || REG_IV_INFO (ivs, REGNO (SET_DEST (set)))->src_reg != bl->biv->src_reg)
9952 for (pnote = &REG_NOTES (p); *pnote;)
9953 {
9954 if (REG_NOTE_KIND (*pnote) == REG_EQUAL
9955 && reg_mentioned_p (regno_reg_rtx[bl->regno],
9956 XEXP (*pnote, 0)))
9957 *pnote = XEXP (*pnote, 1);
9958 else
9959 pnote = &XEXP (*pnote, 1);
9960 }
9961 }
9962
9963 /* Mark that this biv has been reversed. Each giv which depends
9964 on this biv, and which is also live past the end of the loop
9965 will have to be fixed up. */
9966
9967 bl->reversed = 1;
9968
9969 if (loop_dump_stream)
9970 {
9971 fprintf (loop_dump_stream, "Reversed loop");
9972 if (bl->nonneg)
9973 fprintf (loop_dump_stream, " and added reg_nonneg\n");
9974 else
9975 fprintf (loop_dump_stream, "\n");
9976 }
9977
9978 return 1;
9979 }
9980 }
9981 }
9982
9983 return 0;
9984 }
9985 \f
9986 /* Verify whether the biv BL appears to be eliminable,
9987 based on the insns in the loop that refer to it.
9988
9989 If ELIMINATE_P is nonzero, actually do the elimination.
9990
9991 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
9992 determine whether invariant insns should be placed inside or at the
9993 start of the loop. */
9994
9995 static int
9996 maybe_eliminate_biv (const struct loop *loop, struct iv_class *bl,
9997 int eliminate_p, int threshold, int insn_count)
9998 {
9999 struct loop_ivs *ivs = LOOP_IVS (loop);
10000 rtx reg = bl->biv->dest_reg;
10001 rtx p;
10002
10003 /* Scan all insns in the loop, stopping if we find one that uses the
10004 biv in a way that we cannot eliminate. */
10005
10006 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
10007 {
10008 enum rtx_code code = GET_CODE (p);
10009 basic_block where_bb = 0;
10010 rtx where_insn = threshold >= insn_count ? 0 : p;
10011 rtx note;
10012
10013 /* If this is a libcall that sets a giv, skip ahead to its end. */
10014 if (INSN_P (p))
10015 {
10016 note = find_reg_note (p, REG_LIBCALL, NULL_RTX);
10017
10018 if (note)
10019 {
10020 rtx last = XEXP (note, 0);
10021 rtx set = single_set (last);
10022
10023 if (set && REG_P (SET_DEST (set)))
10024 {
10025 unsigned int regno = REGNO (SET_DEST (set));
10026
10027 if (regno < ivs->n_regs
10028 && REG_IV_TYPE (ivs, regno) == GENERAL_INDUCT
10029 && REG_IV_INFO (ivs, regno)->src_reg == bl->biv->src_reg)
10030 p = last;
10031 }
10032 }
10033 }
10034
10035 /* Closely examine the insn if the biv is mentioned. */
10036 if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
10037 && reg_mentioned_p (reg, PATTERN (p))
10038 && ! maybe_eliminate_biv_1 (loop, PATTERN (p), p, bl,
10039 eliminate_p, where_bb, where_insn))
10040 {
10041 if (loop_dump_stream)
10042 fprintf (loop_dump_stream,
10043 "Cannot eliminate biv %d: biv used in insn %d.\n",
10044 bl->regno, INSN_UID (p));
10045 break;
10046 }
10047
10048 /* If we are eliminating, kill REG_EQUAL notes mentioning the biv. */
10049 if (eliminate_p
10050 && (note = find_reg_note (p, REG_EQUAL, NULL_RTX)) != NULL_RTX
10051 && reg_mentioned_p (reg, XEXP (note, 0)))
10052 remove_note (p, note);
10053 }
10054
10055 if (p == loop->end)
10056 {
10057 if (loop_dump_stream)
10058 fprintf (loop_dump_stream, "biv %d %s eliminated.\n",
10059 bl->regno, eliminate_p ? "was" : "can be");
10060 return 1;
10061 }
10062
10063 return 0;
10064 }
10065 \f
10066 /* INSN and REFERENCE are instructions in the same insn chain.
10067 Return nonzero if INSN is first. */
10068
10069 static int
10070 loop_insn_first_p (rtx insn, rtx reference)
10071 {
10072 rtx p, q;
10073
10074 for (p = insn, q = reference;;)
10075 {
10076 /* Start with test for not first so that INSN == REFERENCE yields not
10077 first. */
10078 if (q == insn || ! p)
10079 return 0;
10080 if (p == reference || ! q)
10081 return 1;
10082
10083 /* Either of P or Q might be a NOTE. Notes have the same LUID as the
10084 previous insn, hence the <= comparison below does not work if
10085 P is a note. */
10086 if (INSN_UID (p) < max_uid_for_loop
10087 && INSN_UID (q) < max_uid_for_loop
10088 && !NOTE_P (p))
10089 return INSN_LUID (p) <= INSN_LUID (q);
10090
10091 if (INSN_UID (p) >= max_uid_for_loop
10092 || NOTE_P (p))
10093 p = NEXT_INSN (p);
10094 if (INSN_UID (q) >= max_uid_for_loop)
10095 q = NEXT_INSN (q);
10096 }
10097 }
10098
10099 /* We are trying to eliminate BIV in INSN using GIV. Return nonzero if
10100 the offset that we have to take into account due to auto-increment /
10101 div derivation is zero. */
10102 static int
10103 biv_elimination_giv_has_0_offset (struct induction *biv,
10104 struct induction *giv, rtx insn)
10105 {
10106 /* If the giv V had the auto-inc address optimization applied
10107 to it, and INSN occurs between the giv insn and the biv
10108 insn, then we'd have to adjust the value used here.
10109 This is rare, so we don't bother to make this possible. */
10110 if (giv->auto_inc_opt
10111 && ((loop_insn_first_p (giv->insn, insn)
10112 && loop_insn_first_p (insn, biv->insn))
10113 || (loop_insn_first_p (biv->insn, insn)
10114 && loop_insn_first_p (insn, giv->insn))))
10115 return 0;
10116
10117 return 1;
10118 }
10119
10120 /* If BL appears in X (part of the pattern of INSN), see if we can
10121 eliminate its use. If so, return 1. If not, return 0.
10122
10123 If BIV does not appear in X, return 1.
10124
10125 If ELIMINATE_P is nonzero, actually do the elimination.
10126 WHERE_INSN/WHERE_BB indicate where extra insns should be added.
10127 Depending on how many items have been moved out of the loop, it
10128 will either be before INSN (when WHERE_INSN is nonzero) or at the
10129 start of the loop (when WHERE_INSN is zero). */
10130
10131 static int
10132 maybe_eliminate_biv_1 (const struct loop *loop, rtx x, rtx insn,
10133 struct iv_class *bl, int eliminate_p,
10134 basic_block where_bb, rtx where_insn)
10135 {
10136 enum rtx_code code = GET_CODE (x);
10137 rtx reg = bl->biv->dest_reg;
10138 enum machine_mode mode = GET_MODE (reg);
10139 struct induction *v;
10140 rtx arg, tem;
10141 #ifdef HAVE_cc0
10142 rtx new;
10143 #endif
10144 int arg_operand;
10145 const char *fmt;
10146 int i, j;
10147
10148 switch (code)
10149 {
10150 case REG:
10151 /* If we haven't already been able to do something with this BIV,
10152 we can't eliminate it. */
10153 if (x == reg)
10154 return 0;
10155 return 1;
10156
10157 case SET:
10158 /* If this sets the BIV, it is not a problem. */
10159 if (SET_DEST (x) == reg)
10160 return 1;
10161
10162 /* If this is an insn that defines a giv, it is also ok because
10163 it will go away when the giv is reduced. */
10164 for (v = bl->giv; v; v = v->next_iv)
10165 if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg)
10166 return 1;
10167
10168 #ifdef HAVE_cc0
10169 if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg)
10170 {
10171 /* Can replace with any giv that was reduced and
10172 that has (MULT_VAL != 0) and (ADD_VAL == 0).
10173 Require a constant for MULT_VAL, so we know it's nonzero.
10174 ??? We disable this optimization to avoid potential
10175 overflows. */
10176
10177 for (v = bl->giv; v; v = v->next_iv)
10178 if (GET_CODE (v->mult_val) == CONST_INT && v->mult_val != const0_rtx
10179 && v->add_val == const0_rtx
10180 && ! v->ignore && ! v->maybe_dead && v->always_computable
10181 && v->mode == mode
10182 && 0)
10183 {
10184 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
10185 continue;
10186
10187 if (! eliminate_p)
10188 return 1;
10189
10190 /* If the giv has the opposite direction of change,
10191 then reverse the comparison. */
10192 if (INTVAL (v->mult_val) < 0)
10193 new = gen_rtx_COMPARE (GET_MODE (v->new_reg),
10194 const0_rtx, v->new_reg);
10195 else
10196 new = v->new_reg;
10197
10198 /* We can probably test that giv's reduced reg. */
10199 if (validate_change (insn, &SET_SRC (x), new, 0))
10200 return 1;
10201 }
10202
10203 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
10204 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
10205 Require a constant for MULT_VAL, so we know it's nonzero.
10206 ??? Do this only if ADD_VAL is a pointer to avoid a potential
10207 overflow problem. */
10208
10209 for (v = bl->giv; v; v = v->next_iv)
10210 if (GET_CODE (v->mult_val) == CONST_INT
10211 && v->mult_val != const0_rtx
10212 && ! v->ignore && ! v->maybe_dead && v->always_computable
10213 && v->mode == mode
10214 && (GET_CODE (v->add_val) == SYMBOL_REF
10215 || GET_CODE (v->add_val) == LABEL_REF
10216 || GET_CODE (v->add_val) == CONST
10217 || (REG_P (v->add_val)
10218 && REG_POINTER (v->add_val))))
10219 {
10220 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
10221 continue;
10222
10223 if (! eliminate_p)
10224 return 1;
10225
10226 /* If the giv has the opposite direction of change,
10227 then reverse the comparison. */
10228 if (INTVAL (v->mult_val) < 0)
10229 new = gen_rtx_COMPARE (VOIDmode, copy_rtx (v->add_val),
10230 v->new_reg);
10231 else
10232 new = gen_rtx_COMPARE (VOIDmode, v->new_reg,
10233 copy_rtx (v->add_val));
10234
10235 /* Replace biv with the giv's reduced register. */
10236 update_reg_last_use (v->add_val, insn);
10237 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
10238 return 1;
10239
10240 /* Insn doesn't support that constant or invariant. Copy it
10241 into a register (it will be a loop invariant.) */
10242 tem = gen_reg_rtx (GET_MODE (v->new_reg));
10243
10244 loop_insn_emit_before (loop, 0, where_insn,
10245 gen_move_insn (tem,
10246 copy_rtx (v->add_val)));
10247
10248 /* Substitute the new register for its invariant value in
10249 the compare expression. */
10250 XEXP (new, (INTVAL (v->mult_val) < 0) ? 0 : 1) = tem;
10251 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
10252 return 1;
10253 }
10254 }
10255 #endif
10256 break;
10257
10258 case COMPARE:
10259 case EQ: case NE:
10260 case GT: case GE: case GTU: case GEU:
10261 case LT: case LE: case LTU: case LEU:
10262 /* See if either argument is the biv. */
10263 if (XEXP (x, 0) == reg)
10264 arg = XEXP (x, 1), arg_operand = 1;
10265 else if (XEXP (x, 1) == reg)
10266 arg = XEXP (x, 0), arg_operand = 0;
10267 else
10268 break;
10269
10270 if (GET_CODE (arg) != CONST_INT)
10271 return 0;
10272
10273 /* Unless we're dealing with an equality comparison, if we can't
10274 determine that the original biv doesn't wrap, then we must not
10275 apply the transformation. */
10276 /* ??? Actually, what we must do is verify that the transformed
10277 giv doesn't wrap. But the general case of this transformation
10278 was disabled long ago due to wrapping problems, and there's no
10279 point reviving it this close to end-of-life for loop.c. The
10280 only case still enabled is known (via the check on add_val) to
10281 be pointer arithmetic, which in theory never overflows for
10282 valid programs. */
10283 /* Without lifetime analysis, we don't know how COMPARE will be
10284 used, so we must assume the worst. */
10285 if (code != EQ && code != NE
10286 && biased_biv_may_wrap_p (loop, bl, INTVAL (arg)))
10287 return 0;
10288
10289 /* Try to replace with any giv that has constant positive mult_val
10290 and a pointer add_val. */
10291 for (v = bl->giv; v; v = v->next_iv)
10292 if (GET_CODE (v->mult_val) == CONST_INT
10293 && INTVAL (v->mult_val) > 0
10294 && (GET_CODE (v->add_val) == SYMBOL_REF
10295 || GET_CODE (v->add_val) == LABEL_REF
10296 || GET_CODE (v->add_val) == CONST
10297 || (REG_P (v->add_val) && REG_POINTER (v->add_val)))
10298 && ! v->ignore && ! v->maybe_dead && v->always_computable
10299 && v->mode == mode)
10300 {
10301 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
10302 continue;
10303
10304 if (! eliminate_p)
10305 return 1;
10306
10307 /* Replace biv with the giv's reduced reg. */
10308 validate_change (insn, &XEXP (x, 1 - arg_operand), v->new_reg, 1);
10309
10310 /* Load the value into a register. */
10311 tem = gen_reg_rtx (mode);
10312 loop_iv_add_mult_emit_before (loop, arg, v->mult_val, v->add_val,
10313 tem, where_bb, where_insn);
10314
10315 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
10316
10317 if (apply_change_group ())
10318 return 1;
10319 }
10320
10321 /* If we get here, the biv can't be eliminated. */
10322 return 0;
10323
10324 case MEM:
10325 /* If this address is a DEST_ADDR giv, it doesn't matter if the
10326 biv is used in it, since it will be replaced. */
10327 for (v = bl->giv; v; v = v->next_iv)
10328 if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0))
10329 return 1;
10330 break;
10331
10332 default:
10333 break;
10334 }
10335
10336 /* See if any subexpression fails elimination. */
10337 fmt = GET_RTX_FORMAT (code);
10338 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
10339 {
10340 switch (fmt[i])
10341 {
10342 case 'e':
10343 if (! maybe_eliminate_biv_1 (loop, XEXP (x, i), insn, bl,
10344 eliminate_p, where_bb, where_insn))
10345 return 0;
10346 break;
10347
10348 case 'E':
10349 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
10350 if (! maybe_eliminate_biv_1 (loop, XVECEXP (x, i, j), insn, bl,
10351 eliminate_p, where_bb, where_insn))
10352 return 0;
10353 break;
10354 }
10355 }
10356
10357 return 1;
10358 }
10359 \f
10360 /* Return nonzero if the last use of REG
10361 is in an insn following INSN in the same basic block. */
10362
10363 static int
10364 last_use_this_basic_block (rtx reg, rtx insn)
10365 {
10366 rtx n;
10367 for (n = insn;
10368 n && !LABEL_P (n) && !JUMP_P (n);
10369 n = NEXT_INSN (n))
10370 {
10371 if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (n))
10372 return 1;
10373 }
10374 return 0;
10375 }
10376 \f
10377 /* Called via `note_stores' to record the initial value of a biv. Here we
10378 just record the location of the set and process it later. */
10379
10380 static void
10381 record_initial (rtx dest, rtx set, void *data ATTRIBUTE_UNUSED)
10382 {
10383 struct loop_ivs *ivs = (struct loop_ivs *) data;
10384 struct iv_class *bl;
10385
10386 if (!REG_P (dest)
10387 || REGNO (dest) >= ivs->n_regs
10388 || REG_IV_TYPE (ivs, REGNO (dest)) != BASIC_INDUCT)
10389 return;
10390
10391 bl = REG_IV_CLASS (ivs, REGNO (dest));
10392
10393 /* If this is the first set found, record it. */
10394 if (bl->init_insn == 0)
10395 {
10396 bl->init_insn = note_insn;
10397 bl->init_set = set;
10398 }
10399 }
10400 \f
10401 /* If any of the registers in X are "old" and currently have a last use earlier
10402 than INSN, update them to have a last use of INSN. Their actual last use
10403 will be the previous insn but it will not have a valid uid_luid so we can't
10404 use it. X must be a source expression only. */
10405
10406 static void
10407 update_reg_last_use (rtx x, rtx insn)
10408 {
10409 /* Check for the case where INSN does not have a valid luid. In this case,
10410 there is no need to modify the regno_last_uid, as this can only happen
10411 when code is inserted after the loop_end to set a pseudo's final value,
10412 and hence this insn will never be the last use of x.
10413 ???? This comment is not correct. See for example loop_givs_reduce.
10414 This may insert an insn before another new insn. */
10415 if (REG_P (x) && REGNO (x) < max_reg_before_loop
10416 && INSN_UID (insn) < max_uid_for_loop
10417 && REGNO_LAST_LUID (REGNO (x)) < INSN_LUID (insn))
10418 {
10419 REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn);
10420 }
10421 else
10422 {
10423 int i, j;
10424 const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
10425 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
10426 {
10427 if (fmt[i] == 'e')
10428 update_reg_last_use (XEXP (x, i), insn);
10429 else if (fmt[i] == 'E')
10430 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
10431 update_reg_last_use (XVECEXP (x, i, j), insn);
10432 }
10433 }
10434 }
10435 \f
10436 /* Similar to rtlanal.c:get_condition, except that we also put an
10437 invariant last unless both operands are invariants. */
10438
10439 static rtx
10440 get_condition_for_loop (const struct loop *loop, rtx x)
10441 {
10442 rtx comparison = get_condition (x, (rtx*) 0, false, true);
10443
10444 if (comparison == 0
10445 || ! loop_invariant_p (loop, XEXP (comparison, 0))
10446 || loop_invariant_p (loop, XEXP (comparison, 1)))
10447 return comparison;
10448
10449 return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)), VOIDmode,
10450 XEXP (comparison, 1), XEXP (comparison, 0));
10451 }
10452
10453 /* Scan the function and determine whether it has indirect (computed) jumps.
10454
10455 This is taken mostly from flow.c; similar code exists elsewhere
10456 in the compiler. It may be useful to put this into rtlanal.c. */
10457 static int
10458 indirect_jump_in_function_p (rtx start)
10459 {
10460 rtx insn;
10461
10462 for (insn = start; insn; insn = NEXT_INSN (insn))
10463 if (computed_jump_p (insn))
10464 return 1;
10465
10466 return 0;
10467 }
10468
10469 /* Add MEM to the LOOP_MEMS array, if appropriate. See the
10470 documentation for LOOP_MEMS for the definition of `appropriate'.
10471 This function is called from prescan_loop via for_each_rtx. */
10472
10473 static int
10474 insert_loop_mem (rtx *mem, void *data ATTRIBUTE_UNUSED)
10475 {
10476 struct loop_info *loop_info = data;
10477 int i;
10478 rtx m = *mem;
10479
10480 if (m == NULL_RTX)
10481 return 0;
10482
10483 switch (GET_CODE (m))
10484 {
10485 case MEM:
10486 break;
10487
10488 case CLOBBER:
10489 /* We're not interested in MEMs that are only clobbered. */
10490 return -1;
10491
10492 case CONST_DOUBLE:
10493 /* We're not interested in the MEM associated with a
10494 CONST_DOUBLE, so there's no need to traverse into this. */
10495 return -1;
10496
10497 case EXPR_LIST:
10498 /* We're not interested in any MEMs that only appear in notes. */
10499 return -1;
10500
10501 default:
10502 /* This is not a MEM. */
10503 return 0;
10504 }
10505
10506 /* See if we've already seen this MEM. */
10507 for (i = 0; i < loop_info->mems_idx; ++i)
10508 if (rtx_equal_p (m, loop_info->mems[i].mem))
10509 {
10510 if (MEM_VOLATILE_P (m) && !MEM_VOLATILE_P (loop_info->mems[i].mem))
10511 loop_info->mems[i].mem = m;
10512 if (GET_MODE (m) != GET_MODE (loop_info->mems[i].mem))
10513 /* The modes of the two memory accesses are different. If
10514 this happens, something tricky is going on, and we just
10515 don't optimize accesses to this MEM. */
10516 loop_info->mems[i].optimize = 0;
10517
10518 return 0;
10519 }
10520
10521 /* Resize the array, if necessary. */
10522 if (loop_info->mems_idx == loop_info->mems_allocated)
10523 {
10524 if (loop_info->mems_allocated != 0)
10525 loop_info->mems_allocated *= 2;
10526 else
10527 loop_info->mems_allocated = 32;
10528
10529 loop_info->mems = xrealloc (loop_info->mems,
10530 loop_info->mems_allocated * sizeof (loop_mem_info));
10531 }
10532
10533 /* Actually insert the MEM. */
10534 loop_info->mems[loop_info->mems_idx].mem = m;
10535 /* We can't hoist this MEM out of the loop if it's a BLKmode MEM
10536 because we can't put it in a register. We still store it in the
10537 table, though, so that if we see the same address later, but in a
10538 non-BLK mode, we'll not think we can optimize it at that point. */
10539 loop_info->mems[loop_info->mems_idx].optimize = (GET_MODE (m) != BLKmode);
10540 loop_info->mems[loop_info->mems_idx].reg = NULL_RTX;
10541 ++loop_info->mems_idx;
10542
10543 return 0;
10544 }
10545
10546
10547 /* Allocate REGS->ARRAY or reallocate it if it is too small.
10548
10549 Increment REGS->ARRAY[I].SET_IN_LOOP at the index I of each
10550 register that is modified by an insn between FROM and TO. If the
10551 value of an element of REGS->array[I].SET_IN_LOOP becomes 127 or
10552 more, stop incrementing it, to avoid overflow.
10553
10554 Store in REGS->ARRAY[I].SINGLE_USAGE the single insn in which
10555 register I is used, if it is only used once. Otherwise, it is set
10556 to 0 (for no uses) or const0_rtx for more than one use. This
10557 parameter may be zero, in which case this processing is not done.
10558
10559 Set REGS->ARRAY[I].MAY_NOT_OPTIMIZE nonzero if we should not
10560 optimize register I. */
10561
10562 static void
10563 loop_regs_scan (const struct loop *loop, int extra_size)
10564 {
10565 struct loop_regs *regs = LOOP_REGS (loop);
10566 int old_nregs;
10567 /* last_set[n] is nonzero iff reg n has been set in the current
10568 basic block. In that case, it is the insn that last set reg n. */
10569 rtx *last_set;
10570 rtx insn;
10571 int i;
10572
10573 old_nregs = regs->num;
10574 regs->num = max_reg_num ();
10575
10576 /* Grow the regs array if not allocated or too small. */
10577 if (regs->num >= regs->size)
10578 {
10579 regs->size = regs->num + extra_size;
10580
10581 regs->array = xrealloc (regs->array, regs->size * sizeof (*regs->array));
10582
10583 /* Zero the new elements. */
10584 memset (regs->array + old_nregs, 0,
10585 (regs->size - old_nregs) * sizeof (*regs->array));
10586 }
10587
10588 /* Clear previously scanned fields but do not clear n_times_set. */
10589 for (i = 0; i < old_nregs; i++)
10590 {
10591 regs->array[i].set_in_loop = 0;
10592 regs->array[i].may_not_optimize = 0;
10593 regs->array[i].single_usage = NULL_RTX;
10594 }
10595
10596 last_set = xcalloc (regs->num, sizeof (rtx));
10597
10598 /* Scan the loop, recording register usage. */
10599 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
10600 insn = NEXT_INSN (insn))
10601 {
10602 if (INSN_P (insn))
10603 {
10604 /* Record registers that have exactly one use. */
10605 find_single_use_in_loop (regs, insn, PATTERN (insn));
10606
10607 /* Include uses in REG_EQUAL notes. */
10608 if (REG_NOTES (insn))
10609 find_single_use_in_loop (regs, insn, REG_NOTES (insn));
10610
10611 if (GET_CODE (PATTERN (insn)) == SET
10612 || GET_CODE (PATTERN (insn)) == CLOBBER)
10613 count_one_set (regs, insn, PATTERN (insn), last_set);
10614 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
10615 {
10616 int i;
10617 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
10618 count_one_set (regs, insn, XVECEXP (PATTERN (insn), 0, i),
10619 last_set);
10620 }
10621 }
10622
10623 if (LABEL_P (insn) || JUMP_P (insn))
10624 memset (last_set, 0, regs->num * sizeof (rtx));
10625
10626 /* Invalidate all registers used for function argument passing.
10627 We check rtx_varies_p for the same reason as below, to allow
10628 optimizing PIC calculations. */
10629 if (CALL_P (insn))
10630 {
10631 rtx link;
10632 for (link = CALL_INSN_FUNCTION_USAGE (insn);
10633 link;
10634 link = XEXP (link, 1))
10635 {
10636 rtx op, reg;
10637
10638 if (GET_CODE (op = XEXP (link, 0)) == USE
10639 && REG_P (reg = XEXP (op, 0))
10640 && rtx_varies_p (reg, 1))
10641 regs->array[REGNO (reg)].may_not_optimize = 1;
10642 }
10643 }
10644 }
10645
10646 /* Invalidate all hard registers clobbered by calls. With one exception:
10647 a call-clobbered PIC register is still function-invariant for our
10648 purposes, since we can hoist any PIC calculations out of the loop.
10649 Thus the call to rtx_varies_p. */
10650 if (LOOP_INFO (loop)->has_call)
10651 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
10652 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i)
10653 && rtx_varies_p (regno_reg_rtx[i], 1))
10654 {
10655 regs->array[i].may_not_optimize = 1;
10656 regs->array[i].set_in_loop = 1;
10657 }
10658
10659 #ifdef AVOID_CCMODE_COPIES
10660 /* Don't try to move insns which set CC registers if we should not
10661 create CCmode register copies. */
10662 for (i = regs->num - 1; i >= FIRST_PSEUDO_REGISTER; i--)
10663 if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC)
10664 regs->array[i].may_not_optimize = 1;
10665 #endif
10666
10667 /* Set regs->array[I].n_times_set for the new registers. */
10668 for (i = old_nregs; i < regs->num; i++)
10669 regs->array[i].n_times_set = regs->array[i].set_in_loop;
10670
10671 free (last_set);
10672 }
10673
10674 /* Returns the number of real INSNs in the LOOP. */
10675
10676 static int
10677 count_insns_in_loop (const struct loop *loop)
10678 {
10679 int count = 0;
10680 rtx insn;
10681
10682 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
10683 insn = NEXT_INSN (insn))
10684 if (INSN_P (insn))
10685 ++count;
10686
10687 return count;
10688 }
10689
10690 /* Move MEMs into registers for the duration of the loop. */
10691
10692 static void
10693 load_mems (const struct loop *loop)
10694 {
10695 struct loop_info *loop_info = LOOP_INFO (loop);
10696 struct loop_regs *regs = LOOP_REGS (loop);
10697 int maybe_never = 0;
10698 int i;
10699 rtx p, prev_ebb_head;
10700 rtx label = NULL_RTX;
10701 rtx end_label;
10702 /* Nonzero if the next instruction may never be executed. */
10703 int next_maybe_never = 0;
10704 unsigned int last_max_reg = max_reg_num ();
10705
10706 if (loop_info->mems_idx == 0)
10707 return;
10708
10709 /* We cannot use next_label here because it skips over normal insns. */
10710 end_label = next_nonnote_insn (loop->end);
10711 if (end_label && !LABEL_P (end_label))
10712 end_label = NULL_RTX;
10713
10714 /* Check to see if it's possible that some instructions in the loop are
10715 never executed. Also check if there is a goto out of the loop other
10716 than right after the end of the loop. */
10717 for (p = next_insn_in_loop (loop, loop->scan_start);
10718 p != NULL_RTX;
10719 p = next_insn_in_loop (loop, p))
10720 {
10721 if (LABEL_P (p))
10722 maybe_never = 1;
10723 else if (JUMP_P (p)
10724 /* If we enter the loop in the middle, and scan
10725 around to the beginning, don't set maybe_never
10726 for that. This must be an unconditional jump,
10727 otherwise the code at the top of the loop might
10728 never be executed. Unconditional jumps are
10729 followed a by barrier then loop end. */
10730 && ! (JUMP_P (p)
10731 && JUMP_LABEL (p) == loop->top
10732 && NEXT_INSN (NEXT_INSN (p)) == loop->end
10733 && any_uncondjump_p (p)))
10734 {
10735 /* If this is a jump outside of the loop but not right
10736 after the end of the loop, we would have to emit new fixup
10737 sequences for each such label. */
10738 if (/* If we can't tell where control might go when this
10739 JUMP_INSN is executed, we must be conservative. */
10740 !JUMP_LABEL (p)
10741 || (JUMP_LABEL (p) != end_label
10742 && (INSN_UID (JUMP_LABEL (p)) >= max_uid_for_loop
10743 || INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop->start)
10744 || INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop->end))))
10745 return;
10746
10747 if (!any_condjump_p (p))
10748 /* Something complicated. */
10749 maybe_never = 1;
10750 else
10751 /* If there are any more instructions in the loop, they
10752 might not be reached. */
10753 next_maybe_never = 1;
10754 }
10755 else if (next_maybe_never)
10756 maybe_never = 1;
10757 }
10758
10759 /* Find start of the extended basic block that enters the loop. */
10760 for (p = loop->start;
10761 PREV_INSN (p) && !LABEL_P (p);
10762 p = PREV_INSN (p))
10763 ;
10764 prev_ebb_head = p;
10765
10766 cselib_init (true);
10767
10768 /* Build table of mems that get set to constant values before the
10769 loop. */
10770 for (; p != loop->start; p = NEXT_INSN (p))
10771 cselib_process_insn (p);
10772
10773 /* Actually move the MEMs. */
10774 for (i = 0; i < loop_info->mems_idx; ++i)
10775 {
10776 regset_head load_copies;
10777 regset_head store_copies;
10778 int written = 0;
10779 rtx reg;
10780 rtx mem = loop_info->mems[i].mem;
10781 rtx mem_list_entry;
10782
10783 if (MEM_VOLATILE_P (mem)
10784 || loop_invariant_p (loop, XEXP (mem, 0)) != 1)
10785 /* There's no telling whether or not MEM is modified. */
10786 loop_info->mems[i].optimize = 0;
10787
10788 /* Go through the MEMs written to in the loop to see if this
10789 one is aliased by one of them. */
10790 mem_list_entry = loop_info->store_mems;
10791 while (mem_list_entry)
10792 {
10793 if (rtx_equal_p (mem, XEXP (mem_list_entry, 0)))
10794 written = 1;
10795 else if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
10796 mem, rtx_varies_p))
10797 {
10798 /* MEM is indeed aliased by this store. */
10799 loop_info->mems[i].optimize = 0;
10800 break;
10801 }
10802 mem_list_entry = XEXP (mem_list_entry, 1);
10803 }
10804
10805 if (flag_float_store && written
10806 && GET_MODE_CLASS (GET_MODE (mem)) == MODE_FLOAT)
10807 loop_info->mems[i].optimize = 0;
10808
10809 /* If this MEM is written to, we must be sure that there
10810 are no reads from another MEM that aliases this one. */
10811 if (loop_info->mems[i].optimize && written)
10812 {
10813 int j;
10814
10815 for (j = 0; j < loop_info->mems_idx; ++j)
10816 {
10817 if (j == i)
10818 continue;
10819 else if (true_dependence (mem,
10820 VOIDmode,
10821 loop_info->mems[j].mem,
10822 rtx_varies_p))
10823 {
10824 /* It's not safe to hoist loop_info->mems[i] out of
10825 the loop because writes to it might not be
10826 seen by reads from loop_info->mems[j]. */
10827 loop_info->mems[i].optimize = 0;
10828 break;
10829 }
10830 }
10831 }
10832
10833 if (maybe_never && may_trap_p (mem))
10834 /* We can't access the MEM outside the loop; it might
10835 cause a trap that wouldn't have happened otherwise. */
10836 loop_info->mems[i].optimize = 0;
10837
10838 if (!loop_info->mems[i].optimize)
10839 /* We thought we were going to lift this MEM out of the
10840 loop, but later discovered that we could not. */
10841 continue;
10842
10843 INIT_REG_SET (&load_copies);
10844 INIT_REG_SET (&store_copies);
10845
10846 /* Allocate a pseudo for this MEM. We set REG_USERVAR_P in
10847 order to keep scan_loop from moving stores to this MEM
10848 out of the loop just because this REG is neither a
10849 user-variable nor used in the loop test. */
10850 reg = gen_reg_rtx (GET_MODE (mem));
10851 REG_USERVAR_P (reg) = 1;
10852 loop_info->mems[i].reg = reg;
10853
10854 /* Now, replace all references to the MEM with the
10855 corresponding pseudos. */
10856 maybe_never = 0;
10857 for (p = next_insn_in_loop (loop, loop->scan_start);
10858 p != NULL_RTX;
10859 p = next_insn_in_loop (loop, p))
10860 {
10861 if (INSN_P (p))
10862 {
10863 rtx set;
10864
10865 set = single_set (p);
10866
10867 /* See if this copies the mem into a register that isn't
10868 modified afterwards. We'll try to do copy propagation
10869 a little further on. */
10870 if (set
10871 /* @@@ This test is _way_ too conservative. */
10872 && ! maybe_never
10873 && REG_P (SET_DEST (set))
10874 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
10875 && REGNO (SET_DEST (set)) < last_max_reg
10876 && regs->array[REGNO (SET_DEST (set))].n_times_set == 1
10877 && rtx_equal_p (SET_SRC (set), mem))
10878 SET_REGNO_REG_SET (&load_copies, REGNO (SET_DEST (set)));
10879
10880 /* See if this copies the mem from a register that isn't
10881 modified afterwards. We'll try to remove the
10882 redundant copy later on by doing a little register
10883 renaming and copy propagation. This will help
10884 to untangle things for the BIV detection code. */
10885 if (set
10886 && ! maybe_never
10887 && REG_P (SET_SRC (set))
10888 && REGNO (SET_SRC (set)) >= FIRST_PSEUDO_REGISTER
10889 && REGNO (SET_SRC (set)) < last_max_reg
10890 && regs->array[REGNO (SET_SRC (set))].n_times_set == 1
10891 && rtx_equal_p (SET_DEST (set), mem))
10892 SET_REGNO_REG_SET (&store_copies, REGNO (SET_SRC (set)));
10893
10894 /* If this is a call which uses / clobbers this memory
10895 location, we must not change the interface here. */
10896 if (CALL_P (p)
10897 && reg_mentioned_p (loop_info->mems[i].mem,
10898 CALL_INSN_FUNCTION_USAGE (p)))
10899 {
10900 cancel_changes (0);
10901 loop_info->mems[i].optimize = 0;
10902 break;
10903 }
10904 else
10905 /* Replace the memory reference with the shadow register. */
10906 replace_loop_mems (p, loop_info->mems[i].mem,
10907 loop_info->mems[i].reg, written);
10908 }
10909
10910 if (LABEL_P (p)
10911 || JUMP_P (p))
10912 maybe_never = 1;
10913 }
10914
10915 if (! loop_info->mems[i].optimize)
10916 ; /* We found we couldn't do the replacement, so do nothing. */
10917 else if (! apply_change_group ())
10918 /* We couldn't replace all occurrences of the MEM. */
10919 loop_info->mems[i].optimize = 0;
10920 else
10921 {
10922 /* Load the memory immediately before LOOP->START, which is
10923 the NOTE_LOOP_BEG. */
10924 cselib_val *e = cselib_lookup (mem, VOIDmode, 0);
10925 rtx set;
10926 rtx best = mem;
10927 unsigned j;
10928 struct elt_loc_list *const_equiv = 0;
10929 reg_set_iterator rsi;
10930
10931 if (e)
10932 {
10933 struct elt_loc_list *equiv;
10934 struct elt_loc_list *best_equiv = 0;
10935 for (equiv = e->locs; equiv; equiv = equiv->next)
10936 {
10937 if (CONSTANT_P (equiv->loc))
10938 const_equiv = equiv;
10939 else if (REG_P (equiv->loc)
10940 /* Extending hard register lifetimes causes crash
10941 on SRC targets. Doing so on non-SRC is
10942 probably also not good idea, since we most
10943 probably have pseudoregister equivalence as
10944 well. */
10945 && REGNO (equiv->loc) >= FIRST_PSEUDO_REGISTER)
10946 best_equiv = equiv;
10947 }
10948 /* Use the constant equivalence if that is cheap enough. */
10949 if (! best_equiv)
10950 best_equiv = const_equiv;
10951 else if (const_equiv
10952 && (rtx_cost (const_equiv->loc, SET)
10953 <= rtx_cost (best_equiv->loc, SET)))
10954 {
10955 best_equiv = const_equiv;
10956 const_equiv = 0;
10957 }
10958
10959 /* If best_equiv is nonzero, we know that MEM is set to a
10960 constant or register before the loop. We will use this
10961 knowledge to initialize the shadow register with that
10962 constant or reg rather than by loading from MEM. */
10963 if (best_equiv)
10964 best = copy_rtx (best_equiv->loc);
10965 }
10966
10967 set = gen_move_insn (reg, best);
10968 set = loop_insn_hoist (loop, set);
10969 if (REG_P (best))
10970 {
10971 for (p = prev_ebb_head; p != loop->start; p = NEXT_INSN (p))
10972 if (REGNO_LAST_UID (REGNO (best)) == INSN_UID (p))
10973 {
10974 REGNO_LAST_UID (REGNO (best)) = INSN_UID (set);
10975 break;
10976 }
10977 }
10978
10979 if (const_equiv)
10980 set_unique_reg_note (set, REG_EQUAL, copy_rtx (const_equiv->loc));
10981
10982 if (written)
10983 {
10984 if (label == NULL_RTX)
10985 {
10986 label = gen_label_rtx ();
10987 emit_label_after (label, loop->end);
10988 }
10989
10990 /* Store the memory immediately after END, which is
10991 the NOTE_LOOP_END. */
10992 set = gen_move_insn (copy_rtx (mem), reg);
10993 loop_insn_emit_after (loop, 0, label, set);
10994 }
10995
10996 if (loop_dump_stream)
10997 {
10998 fprintf (loop_dump_stream, "Hoisted regno %d %s from ",
10999 REGNO (reg), (written ? "r/w" : "r/o"));
11000 print_rtl (loop_dump_stream, mem);
11001 fputc ('\n', loop_dump_stream);
11002 }
11003
11004 /* Attempt a bit of copy propagation. This helps untangle the
11005 data flow, and enables {basic,general}_induction_var to find
11006 more bivs/givs. */
11007 EXECUTE_IF_SET_IN_REG_SET
11008 (&load_copies, FIRST_PSEUDO_REGISTER, j, rsi)
11009 {
11010 try_copy_prop (loop, reg, j);
11011 }
11012 CLEAR_REG_SET (&load_copies);
11013
11014 EXECUTE_IF_SET_IN_REG_SET
11015 (&store_copies, FIRST_PSEUDO_REGISTER, j, rsi)
11016 {
11017 try_swap_copy_prop (loop, reg, j);
11018 }
11019 CLEAR_REG_SET (&store_copies);
11020 }
11021 }
11022
11023 /* Now, we need to replace all references to the previous exit
11024 label with the new one. */
11025 if (label != NULL_RTX && end_label != NULL_RTX)
11026 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
11027 if (JUMP_P (p) && JUMP_LABEL (p) == end_label)
11028 redirect_jump (p, label, false);
11029
11030 cselib_finish ();
11031 }
11032
11033 /* For communication between note_reg_stored and its caller. */
11034 struct note_reg_stored_arg
11035 {
11036 int set_seen;
11037 rtx reg;
11038 };
11039
11040 /* Called via note_stores, record in SET_SEEN whether X, which is written,
11041 is equal to ARG. */
11042 static void
11043 note_reg_stored (rtx x, rtx setter ATTRIBUTE_UNUSED, void *arg)
11044 {
11045 struct note_reg_stored_arg *t = (struct note_reg_stored_arg *) arg;
11046 if (t->reg == x)
11047 t->set_seen = 1;
11048 }
11049
11050 /* Try to replace every occurrence of pseudo REGNO with REPLACEMENT.
11051 There must be exactly one insn that sets this pseudo; it will be
11052 deleted if all replacements succeed and we can prove that the register
11053 is not used after the loop. */
11054
11055 static void
11056 try_copy_prop (const struct loop *loop, rtx replacement, unsigned int regno)
11057 {
11058 /* This is the reg that we are copying from. */
11059 rtx reg_rtx = regno_reg_rtx[regno];
11060 rtx init_insn = 0;
11061 rtx insn;
11062 /* These help keep track of whether we replaced all uses of the reg. */
11063 int replaced_last = 0;
11064 int store_is_first = 0;
11065
11066 for (insn = next_insn_in_loop (loop, loop->scan_start);
11067 insn != NULL_RTX;
11068 insn = next_insn_in_loop (loop, insn))
11069 {
11070 rtx set;
11071
11072 /* Only substitute within one extended basic block from the initializing
11073 insn. */
11074 if (LABEL_P (insn) && init_insn)
11075 break;
11076
11077 if (! INSN_P (insn))
11078 continue;
11079
11080 /* Is this the initializing insn? */
11081 set = single_set (insn);
11082 if (set
11083 && REG_P (SET_DEST (set))
11084 && REGNO (SET_DEST (set)) == regno)
11085 {
11086 gcc_assert (!init_insn);
11087
11088 init_insn = insn;
11089 if (REGNO_FIRST_UID (regno) == INSN_UID (insn))
11090 store_is_first = 1;
11091 }
11092
11093 /* Only substitute after seeing the initializing insn. */
11094 if (init_insn && insn != init_insn)
11095 {
11096 struct note_reg_stored_arg arg;
11097
11098 replace_loop_regs (insn, reg_rtx, replacement);
11099 if (REGNO_LAST_UID (regno) == INSN_UID (insn))
11100 replaced_last = 1;
11101
11102 /* Stop replacing when REPLACEMENT is modified. */
11103 arg.reg = replacement;
11104 arg.set_seen = 0;
11105 note_stores (PATTERN (insn), note_reg_stored, &arg);
11106 if (arg.set_seen)
11107 {
11108 rtx note = find_reg_note (insn, REG_EQUAL, NULL);
11109
11110 /* It is possible that we've turned previously valid REG_EQUAL to
11111 invalid, as we change the REGNO to REPLACEMENT and unlike REGNO,
11112 REPLACEMENT is modified, we get different meaning. */
11113 if (note && reg_mentioned_p (replacement, XEXP (note, 0)))
11114 remove_note (insn, note);
11115 break;
11116 }
11117 }
11118 }
11119 gcc_assert (init_insn);
11120 if (apply_change_group ())
11121 {
11122 if (loop_dump_stream)
11123 fprintf (loop_dump_stream, " Replaced reg %d", regno);
11124 if (store_is_first && replaced_last)
11125 {
11126 rtx first;
11127 rtx retval_note;
11128
11129 /* Assume we're just deleting INIT_INSN. */
11130 first = init_insn;
11131 /* Look for REG_RETVAL note. If we're deleting the end of
11132 the libcall sequence, the whole sequence can go. */
11133 retval_note = find_reg_note (init_insn, REG_RETVAL, NULL_RTX);
11134 /* If we found a REG_RETVAL note, find the first instruction
11135 in the sequence. */
11136 if (retval_note)
11137 first = XEXP (retval_note, 0);
11138
11139 /* Delete the instructions. */
11140 loop_delete_insns (first, init_insn);
11141 }
11142 if (loop_dump_stream)
11143 fprintf (loop_dump_stream, ".\n");
11144 }
11145 }
11146
11147 /* Replace all the instructions from FIRST up to and including LAST
11148 with NOTE_INSN_DELETED notes. */
11149
11150 static void
11151 loop_delete_insns (rtx first, rtx last)
11152 {
11153 while (1)
11154 {
11155 if (loop_dump_stream)
11156 fprintf (loop_dump_stream, ", deleting init_insn (%d)",
11157 INSN_UID (first));
11158 delete_insn (first);
11159
11160 /* If this was the LAST instructions we're supposed to delete,
11161 we're done. */
11162 if (first == last)
11163 break;
11164
11165 first = NEXT_INSN (first);
11166 }
11167 }
11168
11169 /* Try to replace occurrences of pseudo REGNO with REPLACEMENT within
11170 loop LOOP if the order of the sets of these registers can be
11171 swapped. There must be exactly one insn within the loop that sets
11172 this pseudo followed immediately by a move insn that sets
11173 REPLACEMENT with REGNO. */
11174 static void
11175 try_swap_copy_prop (const struct loop *loop, rtx replacement,
11176 unsigned int regno)
11177 {
11178 rtx insn;
11179 rtx set = NULL_RTX;
11180 unsigned int new_regno;
11181
11182 new_regno = REGNO (replacement);
11183
11184 for (insn = next_insn_in_loop (loop, loop->scan_start);
11185 insn != NULL_RTX;
11186 insn = next_insn_in_loop (loop, insn))
11187 {
11188 /* Search for the insn that copies REGNO to NEW_REGNO? */
11189 if (INSN_P (insn)
11190 && (set = single_set (insn))
11191 && REG_P (SET_DEST (set))
11192 && REGNO (SET_DEST (set)) == new_regno
11193 && REG_P (SET_SRC (set))
11194 && REGNO (SET_SRC (set)) == regno)
11195 break;
11196 }
11197
11198 if (insn != NULL_RTX)
11199 {
11200 rtx prev_insn;
11201 rtx prev_set;
11202
11203 /* Some DEF-USE info would come in handy here to make this
11204 function more general. For now, just check the previous insn
11205 which is the most likely candidate for setting REGNO. */
11206
11207 prev_insn = PREV_INSN (insn);
11208
11209 if (INSN_P (insn)
11210 && (prev_set = single_set (prev_insn))
11211 && REG_P (SET_DEST (prev_set))
11212 && REGNO (SET_DEST (prev_set)) == regno)
11213 {
11214 /* We have:
11215 (set (reg regno) (expr))
11216 (set (reg new_regno) (reg regno))
11217
11218 so try converting this to:
11219 (set (reg new_regno) (expr))
11220 (set (reg regno) (reg new_regno))
11221
11222 The former construct is often generated when a global
11223 variable used for an induction variable is shadowed by a
11224 register (NEW_REGNO). The latter construct improves the
11225 chances of GIV replacement and BIV elimination. */
11226
11227 validate_change (prev_insn, &SET_DEST (prev_set),
11228 replacement, 1);
11229 validate_change (insn, &SET_DEST (set),
11230 SET_SRC (set), 1);
11231 validate_change (insn, &SET_SRC (set),
11232 replacement, 1);
11233
11234 if (apply_change_group ())
11235 {
11236 if (loop_dump_stream)
11237 fprintf (loop_dump_stream,
11238 " Swapped set of reg %d at %d with reg %d at %d.\n",
11239 regno, INSN_UID (insn),
11240 new_regno, INSN_UID (prev_insn));
11241
11242 /* Update first use of REGNO. */
11243 if (REGNO_FIRST_UID (regno) == INSN_UID (prev_insn))
11244 REGNO_FIRST_UID (regno) = INSN_UID (insn);
11245
11246 /* Now perform copy propagation to hopefully
11247 remove all uses of REGNO within the loop. */
11248 try_copy_prop (loop, replacement, regno);
11249 }
11250 }
11251 }
11252 }
11253
11254 /* Worker function for find_mem_in_note, called via for_each_rtx. */
11255
11256 static int
11257 find_mem_in_note_1 (rtx *x, void *data)
11258 {
11259 if (*x != NULL_RTX && MEM_P (*x))
11260 {
11261 rtx *res = (rtx *) data;
11262 *res = *x;
11263 return 1;
11264 }
11265 return 0;
11266 }
11267
11268 /* Returns the first MEM found in NOTE by depth-first search. */
11269
11270 static rtx
11271 find_mem_in_note (rtx note)
11272 {
11273 if (note && for_each_rtx (&note, find_mem_in_note_1, &note))
11274 return note;
11275 return NULL_RTX;
11276 }
11277
11278 /* Replace MEM with its associated pseudo register. This function is
11279 called from load_mems via for_each_rtx. DATA is actually a pointer
11280 to a structure describing the instruction currently being scanned
11281 and the MEM we are currently replacing. */
11282
11283 static int
11284 replace_loop_mem (rtx *mem, void *data)
11285 {
11286 loop_replace_args *args = (loop_replace_args *) data;
11287 rtx m = *mem;
11288
11289 if (m == NULL_RTX)
11290 return 0;
11291
11292 switch (GET_CODE (m))
11293 {
11294 case MEM:
11295 break;
11296
11297 case CONST_DOUBLE:
11298 /* We're not interested in the MEM associated with a
11299 CONST_DOUBLE, so there's no need to traverse into one. */
11300 return -1;
11301
11302 default:
11303 /* This is not a MEM. */
11304 return 0;
11305 }
11306
11307 if (!rtx_equal_p (args->match, m))
11308 /* This is not the MEM we are currently replacing. */
11309 return 0;
11310
11311 /* Actually replace the MEM. */
11312 validate_change (args->insn, mem, args->replacement, 1);
11313
11314 return 0;
11315 }
11316
11317 static void
11318 replace_loop_mems (rtx insn, rtx mem, rtx reg, int written)
11319 {
11320 loop_replace_args args;
11321
11322 args.insn = insn;
11323 args.match = mem;
11324 args.replacement = reg;
11325
11326 for_each_rtx (&insn, replace_loop_mem, &args);
11327
11328 /* If we hoist a mem write out of the loop, then REG_EQUAL
11329 notes referring to the mem are no longer valid. */
11330 if (written)
11331 {
11332 rtx note, sub;
11333 rtx *link;
11334
11335 for (link = &REG_NOTES (insn); (note = *link); link = &XEXP (note, 1))
11336 {
11337 if (REG_NOTE_KIND (note) == REG_EQUAL
11338 && (sub = find_mem_in_note (note))
11339 && true_dependence (mem, VOIDmode, sub, rtx_varies_p))
11340 {
11341 /* Remove the note. */
11342 validate_change (NULL_RTX, link, XEXP (note, 1), 1);
11343 break;
11344 }
11345 }
11346 }
11347 }
11348
11349 /* Replace one register with another. Called through for_each_rtx; PX points
11350 to the rtx being scanned. DATA is actually a pointer to
11351 a structure of arguments. */
11352
11353 static int
11354 replace_loop_reg (rtx *px, void *data)
11355 {
11356 rtx x = *px;
11357 loop_replace_args *args = (loop_replace_args *) data;
11358
11359 if (x == NULL_RTX)
11360 return 0;
11361
11362 if (x == args->match)
11363 validate_change (args->insn, px, args->replacement, 1);
11364
11365 return 0;
11366 }
11367
11368 static void
11369 replace_loop_regs (rtx insn, rtx reg, rtx replacement)
11370 {
11371 loop_replace_args args;
11372
11373 args.insn = insn;
11374 args.match = reg;
11375 args.replacement = replacement;
11376
11377 for_each_rtx (&insn, replace_loop_reg, &args);
11378 }
11379 \f
11380 /* Emit insn for PATTERN after WHERE_INSN in basic block WHERE_BB
11381 (ignored in the interim). */
11382
11383 static rtx
11384 loop_insn_emit_after (const struct loop *loop ATTRIBUTE_UNUSED,
11385 basic_block where_bb ATTRIBUTE_UNUSED, rtx where_insn,
11386 rtx pattern)
11387 {
11388 return emit_insn_after (pattern, where_insn);
11389 }
11390
11391
11392 /* If WHERE_INSN is nonzero emit insn for PATTERN before WHERE_INSN
11393 in basic block WHERE_BB (ignored in the interim) within the loop
11394 otherwise hoist PATTERN into the loop pre-header. */
11395
11396 static rtx
11397 loop_insn_emit_before (const struct loop *loop,
11398 basic_block where_bb ATTRIBUTE_UNUSED,
11399 rtx where_insn, rtx pattern)
11400 {
11401 if (! where_insn)
11402 return loop_insn_hoist (loop, pattern);
11403 return emit_insn_before (pattern, where_insn);
11404 }
11405
11406
11407 /* Emit call insn for PATTERN before WHERE_INSN in basic block
11408 WHERE_BB (ignored in the interim) within the loop. */
11409
11410 static rtx
11411 loop_call_insn_emit_before (const struct loop *loop ATTRIBUTE_UNUSED,
11412 basic_block where_bb ATTRIBUTE_UNUSED,
11413 rtx where_insn, rtx pattern)
11414 {
11415 return emit_call_insn_before (pattern, where_insn);
11416 }
11417
11418
11419 /* Hoist insn for PATTERN into the loop pre-header. */
11420
11421 static rtx
11422 loop_insn_hoist (const struct loop *loop, rtx pattern)
11423 {
11424 return loop_insn_emit_before (loop, 0, loop->start, pattern);
11425 }
11426
11427
11428 /* Hoist call insn for PATTERN into the loop pre-header. */
11429
11430 static rtx
11431 loop_call_insn_hoist (const struct loop *loop, rtx pattern)
11432 {
11433 return loop_call_insn_emit_before (loop, 0, loop->start, pattern);
11434 }
11435
11436
11437 /* Sink insn for PATTERN after the loop end. */
11438
11439 static rtx
11440 loop_insn_sink (const struct loop *loop, rtx pattern)
11441 {
11442 return loop_insn_emit_before (loop, 0, loop->sink, pattern);
11443 }
11444
11445 /* bl->final_value can be either general_operand or PLUS of general_operand
11446 and constant. Emit sequence of instructions to load it into REG. */
11447 static rtx
11448 gen_load_of_final_value (rtx reg, rtx final_value)
11449 {
11450 rtx seq;
11451 start_sequence ();
11452 final_value = force_operand (final_value, reg);
11453 if (final_value != reg)
11454 emit_move_insn (reg, final_value);
11455 seq = get_insns ();
11456 end_sequence ();
11457 return seq;
11458 }
11459
11460 /* If the loop has multiple exits, emit insn for PATTERN before the
11461 loop to ensure that it will always be executed no matter how the
11462 loop exits. Otherwise, emit the insn for PATTERN after the loop,
11463 since this is slightly more efficient. */
11464
11465 static rtx
11466 loop_insn_sink_or_swim (const struct loop *loop, rtx pattern)
11467 {
11468 if (loop->exit_count)
11469 return loop_insn_hoist (loop, pattern);
11470 else
11471 return loop_insn_sink (loop, pattern);
11472 }
11473 \f
11474 static void
11475 loop_ivs_dump (const struct loop *loop, FILE *file, int verbose)
11476 {
11477 struct iv_class *bl;
11478 int iv_num = 0;
11479
11480 if (! loop || ! file)
11481 return;
11482
11483 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
11484 iv_num++;
11485
11486 fprintf (file, "Loop %d: %d IV classes\n", loop->num, iv_num);
11487
11488 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
11489 {
11490 loop_iv_class_dump (bl, file, verbose);
11491 fputc ('\n', file);
11492 }
11493 }
11494
11495
11496 static void
11497 loop_iv_class_dump (const struct iv_class *bl, FILE *file,
11498 int verbose ATTRIBUTE_UNUSED)
11499 {
11500 struct induction *v;
11501 rtx incr;
11502 int i;
11503
11504 if (! bl || ! file)
11505 return;
11506
11507 fprintf (file, "IV class for reg %d, benefit %d\n",
11508 bl->regno, bl->total_benefit);
11509
11510 fprintf (file, " Init insn %d", INSN_UID (bl->init_insn));
11511 if (bl->initial_value)
11512 {
11513 fprintf (file, ", init val: ");
11514 print_simple_rtl (file, bl->initial_value);
11515 }
11516 if (bl->initial_test)
11517 {
11518 fprintf (file, ", init test: ");
11519 print_simple_rtl (file, bl->initial_test);
11520 }
11521 fputc ('\n', file);
11522
11523 if (bl->final_value)
11524 {
11525 fprintf (file, " Final val: ");
11526 print_simple_rtl (file, bl->final_value);
11527 fputc ('\n', file);
11528 }
11529
11530 if ((incr = biv_total_increment (bl)))
11531 {
11532 fprintf (file, " Total increment: ");
11533 print_simple_rtl (file, incr);
11534 fputc ('\n', file);
11535 }
11536
11537 /* List the increments. */
11538 for (i = 0, v = bl->biv; v; v = v->next_iv, i++)
11539 {
11540 fprintf (file, " Inc%d: insn %d, incr: ", i, INSN_UID (v->insn));
11541 print_simple_rtl (file, v->add_val);
11542 fputc ('\n', file);
11543 }
11544
11545 /* List the givs. */
11546 for (i = 0, v = bl->giv; v; v = v->next_iv, i++)
11547 {
11548 fprintf (file, " Giv%d: insn %d, benefit %d, ",
11549 i, INSN_UID (v->insn), v->benefit);
11550 if (v->giv_type == DEST_ADDR)
11551 print_simple_rtl (file, v->mem);
11552 else
11553 print_simple_rtl (file, single_set (v->insn));
11554 fputc ('\n', file);
11555 }
11556 }
11557
11558
11559 static void
11560 loop_biv_dump (const struct induction *v, FILE *file, int verbose)
11561 {
11562 if (! v || ! file)
11563 return;
11564
11565 fprintf (file,
11566 "Biv %d: insn %d",
11567 REGNO (v->dest_reg), INSN_UID (v->insn));
11568 fprintf (file, " const ");
11569 print_simple_rtl (file, v->add_val);
11570
11571 if (verbose && v->final_value)
11572 {
11573 fputc ('\n', file);
11574 fprintf (file, " final ");
11575 print_simple_rtl (file, v->final_value);
11576 }
11577
11578 fputc ('\n', file);
11579 }
11580
11581
11582 static void
11583 loop_giv_dump (const struct induction *v, FILE *file, int verbose)
11584 {
11585 if (! v || ! file)
11586 return;
11587
11588 if (v->giv_type == DEST_REG)
11589 fprintf (file, "Giv %d: insn %d",
11590 REGNO (v->dest_reg), INSN_UID (v->insn));
11591 else
11592 fprintf (file, "Dest address: insn %d",
11593 INSN_UID (v->insn));
11594
11595 fprintf (file, " src reg %d benefit %d",
11596 REGNO (v->src_reg), v->benefit);
11597 fprintf (file, " lifetime %d",
11598 v->lifetime);
11599
11600 if (v->replaceable)
11601 fprintf (file, " replaceable");
11602
11603 if (v->no_const_addval)
11604 fprintf (file, " ncav");
11605
11606 if (v->ext_dependent)
11607 {
11608 switch (GET_CODE (v->ext_dependent))
11609 {
11610 case SIGN_EXTEND:
11611 fprintf (file, " ext se");
11612 break;
11613 case ZERO_EXTEND:
11614 fprintf (file, " ext ze");
11615 break;
11616 case TRUNCATE:
11617 fprintf (file, " ext tr");
11618 break;
11619 default:
11620 gcc_unreachable ();
11621 }
11622 }
11623
11624 fputc ('\n', file);
11625 fprintf (file, " mult ");
11626 print_simple_rtl (file, v->mult_val);
11627
11628 fputc ('\n', file);
11629 fprintf (file, " add ");
11630 print_simple_rtl (file, v->add_val);
11631
11632 if (verbose && v->final_value)
11633 {
11634 fputc ('\n', file);
11635 fprintf (file, " final ");
11636 print_simple_rtl (file, v->final_value);
11637 }
11638
11639 fputc ('\n', file);
11640 }
11641
11642
11643 void
11644 debug_ivs (const struct loop *loop)
11645 {
11646 loop_ivs_dump (loop, stderr, 1);
11647 }
11648
11649
11650 void
11651 debug_iv_class (const struct iv_class *bl)
11652 {
11653 loop_iv_class_dump (bl, stderr, 1);
11654 }
11655
11656
11657 void
11658 debug_biv (const struct induction *v)
11659 {
11660 loop_biv_dump (v, stderr, 1);
11661 }
11662
11663
11664 void
11665 debug_giv (const struct induction *v)
11666 {
11667 loop_giv_dump (v, stderr, 1);
11668 }
11669
11670
11671 #define LOOP_BLOCK_NUM_1(INSN) \
11672 ((INSN) ? (BLOCK_FOR_INSN (INSN) ? BLOCK_NUM (INSN) : - 1) : -1)
11673
11674 /* The notes do not have an assigned block, so look at the next insn. */
11675 #define LOOP_BLOCK_NUM(INSN) \
11676 ((INSN) ? (NOTE_P (INSN) \
11677 ? LOOP_BLOCK_NUM_1 (next_nonnote_insn (INSN)) \
11678 : LOOP_BLOCK_NUM_1 (INSN)) \
11679 : -1)
11680
11681 #define LOOP_INSN_UID(INSN) ((INSN) ? INSN_UID (INSN) : -1)
11682
11683 static void
11684 loop_dump_aux (const struct loop *loop, FILE *file,
11685 int verbose ATTRIBUTE_UNUSED)
11686 {
11687 rtx label;
11688
11689 if (! loop || ! file || !BB_HEAD (loop->first))
11690 return;
11691
11692 /* Print diagnostics to compare our concept of a loop with
11693 what the loop notes say. */
11694 if (! PREV_INSN (BB_HEAD (loop->first))
11695 || !NOTE_P (PREV_INSN (BB_HEAD (loop->first)))
11696 || NOTE_LINE_NUMBER (PREV_INSN (BB_HEAD (loop->first)))
11697 != NOTE_INSN_LOOP_BEG)
11698 fprintf (file, ";; No NOTE_INSN_LOOP_BEG at %d\n",
11699 INSN_UID (PREV_INSN (BB_HEAD (loop->first))));
11700 if (! NEXT_INSN (BB_END (loop->last))
11701 || !NOTE_P (NEXT_INSN (BB_END (loop->last)))
11702 || NOTE_LINE_NUMBER (NEXT_INSN (BB_END (loop->last)))
11703 != NOTE_INSN_LOOP_END)
11704 fprintf (file, ";; No NOTE_INSN_LOOP_END at %d\n",
11705 INSN_UID (NEXT_INSN (BB_END (loop->last))));
11706
11707 if (loop->start)
11708 {
11709 fprintf (file,
11710 ";; start %d (%d), end %d (%d)\n",
11711 LOOP_BLOCK_NUM (loop->start),
11712 LOOP_INSN_UID (loop->start),
11713 LOOP_BLOCK_NUM (loop->end),
11714 LOOP_INSN_UID (loop->end));
11715 fprintf (file, ";; top %d (%d), scan start %d (%d)\n",
11716 LOOP_BLOCK_NUM (loop->top),
11717 LOOP_INSN_UID (loop->top),
11718 LOOP_BLOCK_NUM (loop->scan_start),
11719 LOOP_INSN_UID (loop->scan_start));
11720 fprintf (file, ";; exit_count %d", loop->exit_count);
11721 if (loop->exit_count)
11722 {
11723 fputs (", labels:", file);
11724 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
11725 {
11726 fprintf (file, " %d ",
11727 LOOP_INSN_UID (XEXP (label, 0)));
11728 }
11729 }
11730 fputs ("\n", file);
11731 }
11732 }
11733
11734 /* Call this function from the debugger to dump LOOP. */
11735
11736 void
11737 debug_loop (const struct loop *loop)
11738 {
11739 flow_loop_dump (loop, stderr, loop_dump_aux, 1);
11740 }
11741
11742 /* Call this function from the debugger to dump LOOPS. */
11743
11744 void
11745 debug_loops (const struct loops *loops)
11746 {
11747 flow_loops_dump (loops, stderr, loop_dump_aux, 1);
11748 }
11749 \f
11750 static bool
11751 gate_handle_loop_optimize (void)
11752 {
11753 return (optimize > 0 && flag_loop_optimize);
11754 }
11755
11756 /* Move constant computations out of loops. */
11757 static void
11758 rest_of_handle_loop_optimize (void)
11759 {
11760 int do_prefetch;
11761
11762 /* CFG is no longer maintained up-to-date. */
11763 free_bb_for_insn ();
11764 profile_status = PROFILE_ABSENT;
11765
11766 do_prefetch = flag_prefetch_loop_arrays ? LOOP_PREFETCH : 0;
11767
11768 if (flag_rerun_loop_opt)
11769 {
11770 cleanup_barriers ();
11771
11772 /* We only want to perform unrolling once. */
11773 loop_optimize (get_insns (), dump_file, 0);
11774
11775 /* The first call to loop_optimize makes some instructions
11776 trivially dead. We delete those instructions now in the
11777 hope that doing so will make the heuristics in loop work
11778 better and possibly speed up compilation. */
11779 delete_trivially_dead_insns (get_insns (), max_reg_num ());
11780
11781 /* The regscan pass is currently necessary as the alias
11782 analysis code depends on this information. */
11783 reg_scan (get_insns (), max_reg_num ());
11784 }
11785 cleanup_barriers ();
11786 loop_optimize (get_insns (), dump_file, do_prefetch);
11787
11788 /* Loop can create trivially dead instructions. */
11789 delete_trivially_dead_insns (get_insns (), max_reg_num ());
11790 find_basic_blocks (get_insns ());
11791 }
11792
11793 struct tree_opt_pass pass_loop_optimize =
11794 {
11795 "old-loop", /* name */
11796 gate_handle_loop_optimize, /* gate */
11797 rest_of_handle_loop_optimize, /* execute */
11798 NULL, /* sub */
11799 NULL, /* next */
11800 0, /* static_pass_number */
11801 TV_LOOP, /* tv_id */
11802 0, /* properties_required */
11803 0, /* properties_provided */
11804 0, /* properties_destroyed */
11805 0, /* todo_flags_start */
11806 TODO_dump_func |
11807 TODO_ggc_collect, /* todo_flags_finish */
11808 'L' /* letter */
11809 };
11810
11811