re PR c++/24780 (ICE set_mem_attributes_minus_bitpos)
[gcc.git] / gcc / loop.c
1 /* Perform various loop optimizations, including strength reduction.
2 Copyright (C) 1987, 1988, 1989, 1991, 1992, 1993, 1994, 1995,
3 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005
4 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
21 02110-1301, USA. */
22
23 /* This is the loop optimization pass of the compiler.
24 It finds invariant computations within loops and moves them
25 to the beginning of the loop. Then it identifies basic and
26 general induction variables.
27
28 Basic induction variables (BIVs) are a pseudo registers which are set within
29 a loop only by incrementing or decrementing its value. General induction
30 variables (GIVs) are pseudo registers with a value which is a linear function
31 of a basic induction variable. BIVs are recognized by `basic_induction_var';
32 GIVs by `general_induction_var'.
33
34 Once induction variables are identified, strength reduction is applied to the
35 general induction variables, and induction variable elimination is applied to
36 the basic induction variables.
37
38 It also finds cases where
39 a register is set within the loop by zero-extending a narrower value
40 and changes these to zero the entire register once before the loop
41 and merely copy the low part within the loop.
42
43 Most of the complexity is in heuristics to decide when it is worth
44 while to do these things. */
45
46 #include "config.h"
47 #include "system.h"
48 #include "coretypes.h"
49 #include "tm.h"
50 #include "rtl.h"
51 #include "tm_p.h"
52 #include "function.h"
53 #include "expr.h"
54 #include "hard-reg-set.h"
55 #include "basic-block.h"
56 #include "insn-config.h"
57 #include "regs.h"
58 #include "recog.h"
59 #include "flags.h"
60 #include "real.h"
61 #include "cselib.h"
62 #include "except.h"
63 #include "toplev.h"
64 #include "predict.h"
65 #include "insn-flags.h"
66 #include "optabs.h"
67 #include "cfgloop.h"
68 #include "ggc.h"
69 #include "timevar.h"
70 #include "tree-pass.h"
71
72 /* Get the loop info pointer of a loop. */
73 #define LOOP_INFO(LOOP) ((struct loop_info *) (LOOP)->aux)
74
75 /* Get a pointer to the loop movables structure. */
76 #define LOOP_MOVABLES(LOOP) (&LOOP_INFO (LOOP)->movables)
77
78 /* Get a pointer to the loop registers structure. */
79 #define LOOP_REGS(LOOP) (&LOOP_INFO (LOOP)->regs)
80
81 /* Get a pointer to the loop induction variables structure. */
82 #define LOOP_IVS(LOOP) (&LOOP_INFO (LOOP)->ivs)
83
84 /* Get the luid of an insn. Catch the error of trying to reference the LUID
85 of an insn added during loop, since these don't have LUIDs. */
86
87 #define INSN_LUID(INSN) \
88 (gcc_assert (INSN_UID (INSN) < max_uid_for_loop), uid_luid[INSN_UID (INSN)])
89
90 #define REGNO_FIRST_LUID(REGNO) \
91 (REGNO_FIRST_UID (REGNO) < max_uid_for_loop \
92 ? uid_luid[REGNO_FIRST_UID (REGNO)] \
93 : 0)
94 #define REGNO_LAST_LUID(REGNO) \
95 (REGNO_LAST_UID (REGNO) < max_uid_for_loop \
96 ? uid_luid[REGNO_LAST_UID (REGNO)] \
97 : INT_MAX)
98
99 /* A "basic induction variable" or biv is a pseudo reg that is set
100 (within this loop) only by incrementing or decrementing it. */
101 /* A "general induction variable" or giv is a pseudo reg whose
102 value is a linear function of a biv. */
103
104 /* Bivs are recognized by `basic_induction_var';
105 Givs by `general_induction_var'. */
106
107 /* An enum for the two different types of givs, those that are used
108 as memory addresses and those that are calculated into registers. */
109 enum g_types
110 {
111 DEST_ADDR,
112 DEST_REG
113 };
114
115
116 /* A `struct induction' is created for every instruction that sets
117 an induction variable (either a biv or a giv). */
118
119 struct induction
120 {
121 rtx insn; /* The insn that sets a biv or giv */
122 rtx new_reg; /* New register, containing strength reduced
123 version of this giv. */
124 rtx src_reg; /* Biv from which this giv is computed.
125 (If this is a biv, then this is the biv.) */
126 enum g_types giv_type; /* Indicate whether DEST_ADDR or DEST_REG */
127 rtx dest_reg; /* Destination register for insn: this is the
128 register which was the biv or giv.
129 For a biv, this equals src_reg.
130 For a DEST_ADDR type giv, this is 0. */
131 rtx *location; /* Place in the insn where this giv occurs.
132 If GIV_TYPE is DEST_REG, this is 0. */
133 /* For a biv, this is the place where add_val
134 was found. */
135 enum machine_mode mode; /* The mode of this biv or giv */
136 rtx mem; /* For DEST_ADDR, the memory object. */
137 rtx mult_val; /* Multiplicative factor for src_reg. */
138 rtx add_val; /* Additive constant for that product. */
139 int benefit; /* Gain from eliminating this insn. */
140 rtx final_value; /* If the giv is used outside the loop, and its
141 final value could be calculated, it is put
142 here, and the giv is made replaceable. Set
143 the giv to this value before the loop. */
144 unsigned combined_with; /* The number of givs this giv has been
145 combined with. If nonzero, this giv
146 cannot combine with any other giv. */
147 unsigned replaceable : 1; /* 1 if we can substitute the strength-reduced
148 variable for the original variable.
149 0 means they must be kept separate and the
150 new one must be copied into the old pseudo
151 reg each time the old one is set. */
152 unsigned not_replaceable : 1; /* Used to prevent duplicating work. This is
153 1 if we know that the giv definitely can
154 not be made replaceable, in which case we
155 don't bother checking the variable again
156 even if further info is available.
157 Both this and the above can be zero. */
158 unsigned ignore : 1; /* 1 prohibits further processing of giv */
159 unsigned always_computable : 1;/* 1 if this value is computable every
160 iteration. */
161 unsigned always_executed : 1; /* 1 if this set occurs each iteration. */
162 unsigned maybe_multiple : 1; /* Only used for a biv and 1 if this biv
163 update may be done multiple times per
164 iteration. */
165 unsigned cant_derive : 1; /* For giv's, 1 if this giv cannot derive
166 another giv. This occurs in many cases
167 where a giv's lifetime spans an update to
168 a biv. */
169 unsigned maybe_dead : 1; /* 1 if this giv might be dead. In that case,
170 we won't use it to eliminate a biv, it
171 would probably lose. */
172 unsigned auto_inc_opt : 1; /* 1 if this giv had its increment output next
173 to it to try to form an auto-inc address. */
174 unsigned shared : 1;
175 unsigned no_const_addval : 1; /* 1 if add_val does not contain a const. */
176 int lifetime; /* Length of life of this giv */
177 rtx derive_adjustment; /* If nonzero, is an adjustment to be
178 subtracted from add_val when this giv
179 derives another. This occurs when the
180 giv spans a biv update by incrementation. */
181 rtx ext_dependent; /* If nonzero, is a sign or zero extension
182 if a biv on which this giv is dependent. */
183 struct induction *next_iv; /* For givs, links together all givs that are
184 based on the same biv. For bivs, links
185 together all biv entries that refer to the
186 same biv register. */
187 struct induction *same; /* For givs, if the giv has been combined with
188 another giv, this points to the base giv.
189 The base giv will have COMBINED_WITH nonzero.
190 For bivs, if the biv has the same LOCATION
191 than another biv, this points to the base
192 biv. */
193 struct induction *same_insn; /* If there are multiple identical givs in
194 the same insn, then all but one have this
195 field set, and they all point to the giv
196 that doesn't have this field set. */
197 rtx last_use; /* For a giv made from a biv increment, this is
198 a substitute for the lifetime information. */
199 };
200
201
202 /* A `struct iv_class' is created for each biv. */
203
204 struct iv_class
205 {
206 unsigned int regno; /* Pseudo reg which is the biv. */
207 int biv_count; /* Number of insns setting this reg. */
208 struct induction *biv; /* List of all insns that set this reg. */
209 int giv_count; /* Number of DEST_REG givs computed from this
210 biv. The resulting count is only used in
211 check_dbra_loop. */
212 struct induction *giv; /* List of all insns that compute a giv
213 from this reg. */
214 int total_benefit; /* Sum of BENEFITs of all those givs. */
215 rtx initial_value; /* Value of reg at loop start. */
216 rtx initial_test; /* Test performed on BIV before loop. */
217 rtx final_value; /* Value of reg at loop end, if known. */
218 struct iv_class *next; /* Links all class structures together. */
219 rtx init_insn; /* insn which initializes biv, 0 if none. */
220 rtx init_set; /* SET of INIT_INSN, if any. */
221 unsigned incremented : 1; /* 1 if somewhere incremented/decremented */
222 unsigned eliminable : 1; /* 1 if plausible candidate for
223 elimination. */
224 unsigned nonneg : 1; /* 1 if we added a REG_NONNEG note for
225 this. */
226 unsigned reversed : 1; /* 1 if we reversed the loop that this
227 biv controls. */
228 unsigned all_reduced : 1; /* 1 if all givs using this biv have
229 been reduced. */
230 };
231
232
233 /* Definitions used by the basic induction variable discovery code. */
234 enum iv_mode
235 {
236 UNKNOWN_INDUCT,
237 BASIC_INDUCT,
238 NOT_BASIC_INDUCT,
239 GENERAL_INDUCT
240 };
241
242
243 /* A `struct iv' is created for every register. */
244
245 struct iv
246 {
247 enum iv_mode type;
248 union
249 {
250 struct iv_class *class;
251 struct induction *info;
252 } iv;
253 };
254
255
256 #define REG_IV_TYPE(ivs, n) ivs->regs[n].type
257 #define REG_IV_INFO(ivs, n) ivs->regs[n].iv.info
258 #define REG_IV_CLASS(ivs, n) ivs->regs[n].iv.class
259
260
261 struct loop_ivs
262 {
263 /* Indexed by register number, contains pointer to `struct
264 iv' if register is an induction variable. */
265 struct iv *regs;
266
267 /* Size of regs array. */
268 unsigned int n_regs;
269
270 /* The head of a list which links together (via the next field)
271 every iv class for the current loop. */
272 struct iv_class *list;
273 };
274
275
276 typedef struct loop_mem_info
277 {
278 rtx mem; /* The MEM itself. */
279 rtx reg; /* Corresponding pseudo, if any. */
280 int optimize; /* Nonzero if we can optimize access to this MEM. */
281 } loop_mem_info;
282
283
284
285 struct loop_reg
286 {
287 /* Number of times the reg is set during the loop being scanned.
288 During code motion, a negative value indicates a reg that has
289 been made a candidate; in particular -2 means that it is an
290 candidate that we know is equal to a constant and -1 means that
291 it is a candidate not known equal to a constant. After code
292 motion, regs moved have 0 (which is accurate now) while the
293 failed candidates have the original number of times set.
294
295 Therefore, at all times, == 0 indicates an invariant register;
296 < 0 a conditionally invariant one. */
297 int set_in_loop;
298
299 /* Original value of set_in_loop; same except that this value
300 is not set negative for a reg whose sets have been made candidates
301 and not set to 0 for a reg that is moved. */
302 int n_times_set;
303
304 /* Contains the insn in which a register was used if it was used
305 exactly once; contains const0_rtx if it was used more than once. */
306 rtx single_usage;
307
308 /* Nonzero indicates that the register cannot be moved or strength
309 reduced. */
310 char may_not_optimize;
311
312 /* Nonzero means reg N has already been moved out of one loop.
313 This reduces the desire to move it out of another. */
314 char moved_once;
315 };
316
317
318 struct loop_regs
319 {
320 int num; /* Number of regs used in table. */
321 int size; /* Size of table. */
322 struct loop_reg *array; /* Register usage info. array. */
323 int multiple_uses; /* Nonzero if a reg has multiple uses. */
324 };
325
326
327
328 struct loop_movables
329 {
330 /* Head of movable chain. */
331 struct movable *head;
332 /* Last movable in chain. */
333 struct movable *last;
334 };
335
336
337 /* Information pertaining to a loop. */
338
339 struct loop_info
340 {
341 /* Nonzero if there is a subroutine call in the current loop. */
342 int has_call;
343 /* Nonzero if there is a libcall in the current loop. */
344 int has_libcall;
345 /* Nonzero if there is a non constant call in the current loop. */
346 int has_nonconst_call;
347 /* Nonzero if there is a prefetch instruction in the current loop. */
348 int has_prefetch;
349 /* Nonzero if there is a volatile memory reference in the current
350 loop. */
351 int has_volatile;
352 /* Nonzero if there is a tablejump in the current loop. */
353 int has_tablejump;
354 /* Nonzero if there are ways to leave the loop other than falling
355 off the end. */
356 int has_multiple_exit_targets;
357 /* Nonzero if there is an indirect jump in the current function. */
358 int has_indirect_jump;
359 /* Register or constant initial loop value. */
360 rtx initial_value;
361 /* Register or constant value used for comparison test. */
362 rtx comparison_value;
363 /* Register or constant approximate final value. */
364 rtx final_value;
365 /* Register or constant initial loop value with term common to
366 final_value removed. */
367 rtx initial_equiv_value;
368 /* Register or constant final loop value with term common to
369 initial_value removed. */
370 rtx final_equiv_value;
371 /* Register corresponding to iteration variable. */
372 rtx iteration_var;
373 /* Constant loop increment. */
374 rtx increment;
375 enum rtx_code comparison_code;
376 /* Holds the number of loop iterations. It is zero if the number
377 could not be calculated. Must be unsigned since the number of
378 iterations can be as high as 2^wordsize - 1. For loops with a
379 wider iterator, this number will be zero if the number of loop
380 iterations is too large for an unsigned integer to hold. */
381 unsigned HOST_WIDE_INT n_iterations;
382 int used_count_register;
383 /* The loop iterator induction variable. */
384 struct iv_class *iv;
385 /* List of MEMs that are stored in this loop. */
386 rtx store_mems;
387 /* Array of MEMs that are used (read or written) in this loop, but
388 cannot be aliased by anything in this loop, except perhaps
389 themselves. In other words, if mems[i] is altered during
390 the loop, it is altered by an expression that is rtx_equal_p to
391 it. */
392 loop_mem_info *mems;
393 /* The index of the next available slot in MEMS. */
394 int mems_idx;
395 /* The number of elements allocated in MEMS. */
396 int mems_allocated;
397 /* Nonzero if we don't know what MEMs were changed in the current
398 loop. This happens if the loop contains a call (in which case
399 `has_call' will also be set) or if we store into more than
400 NUM_STORES MEMs. */
401 int unknown_address_altered;
402 /* The above doesn't count any readonly memory locations that are
403 stored. This does. */
404 int unknown_constant_address_altered;
405 /* Count of memory write instructions discovered in the loop. */
406 int num_mem_sets;
407 /* The insn where the first of these was found. */
408 rtx first_loop_store_insn;
409 /* The chain of movable insns in loop. */
410 struct loop_movables movables;
411 /* The registers used the in loop. */
412 struct loop_regs regs;
413 /* The induction variable information in loop. */
414 struct loop_ivs ivs;
415 /* Nonzero if call is in pre_header extended basic block. */
416 int pre_header_has_call;
417 };
418
419 /* Not really meaningful values, but at least something. */
420 #ifndef SIMULTANEOUS_PREFETCHES
421 #define SIMULTANEOUS_PREFETCHES 3
422 #endif
423 #ifndef PREFETCH_BLOCK
424 #define PREFETCH_BLOCK 32
425 #endif
426 #ifndef HAVE_prefetch
427 #define HAVE_prefetch 0
428 #define CODE_FOR_prefetch 0
429 #define gen_prefetch(a,b,c) (gcc_unreachable (), NULL_RTX)
430 #endif
431
432 /* Give up the prefetch optimizations once we exceed a given threshold.
433 It is unlikely that we would be able to optimize something in a loop
434 with so many detected prefetches. */
435 #define MAX_PREFETCHES 100
436 /* The number of prefetch blocks that are beneficial to fetch at once before
437 a loop with a known (and low) iteration count. */
438 #define PREFETCH_BLOCKS_BEFORE_LOOP_MAX 6
439 /* For very tiny loops it is not worthwhile to prefetch even before the loop,
440 since it is likely that the data are already in the cache. */
441 #define PREFETCH_BLOCKS_BEFORE_LOOP_MIN 2
442
443 /* Parameterize some prefetch heuristics so they can be turned on and off
444 easily for performance testing on new architectures. These can be
445 defined in target-dependent files. */
446
447 /* Prefetch is worthwhile only when loads/stores are dense. */
448 #ifndef PREFETCH_ONLY_DENSE_MEM
449 #define PREFETCH_ONLY_DENSE_MEM 1
450 #endif
451
452 /* Define what we mean by "dense" loads and stores; This value divided by 256
453 is the minimum percentage of memory references that worth prefetching. */
454 #ifndef PREFETCH_DENSE_MEM
455 #define PREFETCH_DENSE_MEM 220
456 #endif
457
458 /* Do not prefetch for a loop whose iteration count is known to be low. */
459 #ifndef PREFETCH_NO_LOW_LOOPCNT
460 #define PREFETCH_NO_LOW_LOOPCNT 1
461 #endif
462
463 /* Define what we mean by a "low" iteration count. */
464 #ifndef PREFETCH_LOW_LOOPCNT
465 #define PREFETCH_LOW_LOOPCNT 32
466 #endif
467
468 /* Do not prefetch for a loop that contains a function call; such a loop is
469 probably not an internal loop. */
470 #ifndef PREFETCH_NO_CALL
471 #define PREFETCH_NO_CALL 1
472 #endif
473
474 /* Do not prefetch accesses with an extreme stride. */
475 #ifndef PREFETCH_NO_EXTREME_STRIDE
476 #define PREFETCH_NO_EXTREME_STRIDE 1
477 #endif
478
479 /* Define what we mean by an "extreme" stride. */
480 #ifndef PREFETCH_EXTREME_STRIDE
481 #define PREFETCH_EXTREME_STRIDE 4096
482 #endif
483
484 /* Define a limit to how far apart indices can be and still be merged
485 into a single prefetch. */
486 #ifndef PREFETCH_EXTREME_DIFFERENCE
487 #define PREFETCH_EXTREME_DIFFERENCE 4096
488 #endif
489
490 /* Issue prefetch instructions before the loop to fetch data to be used
491 in the first few loop iterations. */
492 #ifndef PREFETCH_BEFORE_LOOP
493 #define PREFETCH_BEFORE_LOOP 1
494 #endif
495
496 /* Do not handle reversed order prefetches (negative stride). */
497 #ifndef PREFETCH_NO_REVERSE_ORDER
498 #define PREFETCH_NO_REVERSE_ORDER 1
499 #endif
500
501 /* Prefetch even if the GIV is in conditional code. */
502 #ifndef PREFETCH_CONDITIONAL
503 #define PREFETCH_CONDITIONAL 1
504 #endif
505
506 #define LOOP_REG_LIFETIME(LOOP, REGNO) \
507 ((REGNO_LAST_LUID (REGNO) - REGNO_FIRST_LUID (REGNO)))
508
509 #define LOOP_REG_GLOBAL_P(LOOP, REGNO) \
510 ((REGNO_LAST_LUID (REGNO) > INSN_LUID ((LOOP)->end) \
511 || REGNO_FIRST_LUID (REGNO) < INSN_LUID ((LOOP)->start)))
512
513 #define LOOP_REGNO_NREGS(REGNO, SET_DEST) \
514 ((REGNO) < FIRST_PSEUDO_REGISTER \
515 ? (int) hard_regno_nregs[(REGNO)][GET_MODE (SET_DEST)] : 1)
516
517
518 /* Vector mapping INSN_UIDs to luids.
519 The luids are like uids but increase monotonically always.
520 We use them to see whether a jump comes from outside a given loop. */
521
522 static int *uid_luid;
523
524 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
525 number the insn is contained in. */
526
527 static struct loop **uid_loop;
528
529 /* 1 + largest uid of any insn. */
530
531 static int max_uid_for_loop;
532
533 /* Number of loops detected in current function. Used as index to the
534 next few tables. */
535
536 static int max_loop_num;
537
538 /* Bound on pseudo register number before loop optimization.
539 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
540 static unsigned int max_reg_before_loop;
541
542 /* The value to pass to the next call of reg_scan_update. */
543 static int loop_max_reg;
544 \f
545 /* During the analysis of a loop, a chain of `struct movable's
546 is made to record all the movable insns found.
547 Then the entire chain can be scanned to decide which to move. */
548
549 struct movable
550 {
551 rtx insn; /* A movable insn */
552 rtx set_src; /* The expression this reg is set from. */
553 rtx set_dest; /* The destination of this SET. */
554 rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST
555 of any registers used within the LIBCALL. */
556 int consec; /* Number of consecutive following insns
557 that must be moved with this one. */
558 unsigned int regno; /* The register it sets */
559 short lifetime; /* lifetime of that register;
560 may be adjusted when matching movables
561 that load the same value are found. */
562 short savings; /* Number of insns we can move for this reg,
563 including other movables that force this
564 or match this one. */
565 ENUM_BITFIELD(machine_mode) savemode : 8; /* Nonzero means it is a mode for
566 a low part that we should avoid changing when
567 clearing the rest of the reg. */
568 unsigned int cond : 1; /* 1 if only conditionally movable */
569 unsigned int force : 1; /* 1 means MUST move this insn */
570 unsigned int global : 1; /* 1 means reg is live outside this loop */
571 /* If PARTIAL is 1, GLOBAL means something different:
572 that the reg is live outside the range from where it is set
573 to the following label. */
574 unsigned int done : 1; /* 1 inhibits further processing of this */
575
576 unsigned int partial : 1; /* 1 means this reg is used for zero-extending.
577 In particular, moving it does not make it
578 invariant. */
579 unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to
580 load SRC, rather than copying INSN. */
581 unsigned int move_insn_first:1;/* Same as above, if this is necessary for the
582 first insn of a consecutive sets group. */
583 unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */
584 unsigned int insert_temp : 1; /* 1 means we copy to a new pseudo and replace
585 the original insn with a copy from that
586 pseudo, rather than deleting it. */
587 struct movable *match; /* First entry for same value */
588 struct movable *forces; /* An insn that must be moved if this is */
589 struct movable *next;
590 };
591
592
593 static FILE *loop_dump_stream;
594
595 /* Forward declarations. */
596
597 static void invalidate_loops_containing_label (rtx);
598 static void find_and_verify_loops (rtx, struct loops *);
599 static void mark_loop_jump (rtx, struct loop *);
600 static void prescan_loop (struct loop *);
601 static int reg_in_basic_block_p (rtx, rtx);
602 static int consec_sets_invariant_p (const struct loop *, rtx, int, rtx);
603 static int labels_in_range_p (rtx, int);
604 static void count_one_set (struct loop_regs *, rtx, rtx, rtx *);
605 static void note_addr_stored (rtx, rtx, void *);
606 static void note_set_pseudo_multiple_uses (rtx, rtx, void *);
607 static int loop_reg_used_before_p (const struct loop *, rtx, rtx);
608 static rtx find_regs_nested (rtx, rtx);
609 static void scan_loop (struct loop*, int);
610 #if 0
611 static void replace_call_address (rtx, rtx, rtx);
612 #endif
613 static rtx skip_consec_insns (rtx, int);
614 static int libcall_benefit (rtx);
615 static rtx libcall_other_reg (rtx, rtx);
616 static void record_excess_regs (rtx, rtx, rtx *);
617 static void ignore_some_movables (struct loop_movables *);
618 static void force_movables (struct loop_movables *);
619 static void combine_movables (struct loop_movables *, struct loop_regs *);
620 static int num_unmoved_movables (const struct loop *);
621 static int regs_match_p (rtx, rtx, struct loop_movables *);
622 static int rtx_equal_for_loop_p (rtx, rtx, struct loop_movables *,
623 struct loop_regs *);
624 static void add_label_notes (rtx, rtx);
625 static void move_movables (struct loop *loop, struct loop_movables *, int,
626 int);
627 static void loop_movables_add (struct loop_movables *, struct movable *);
628 static void loop_movables_free (struct loop_movables *);
629 static int count_nonfixed_reads (const struct loop *, rtx);
630 static void loop_bivs_find (struct loop *);
631 static void loop_bivs_init_find (struct loop *);
632 static void loop_bivs_check (struct loop *);
633 static void loop_givs_find (struct loop *);
634 static void loop_givs_check (struct loop *);
635 static int loop_biv_eliminable_p (struct loop *, struct iv_class *, int, int);
636 static int loop_giv_reduce_benefit (struct loop *, struct iv_class *,
637 struct induction *, rtx);
638 static void loop_givs_dead_check (struct loop *, struct iv_class *);
639 static void loop_givs_reduce (struct loop *, struct iv_class *);
640 static void loop_givs_rescan (struct loop *, struct iv_class *, rtx *);
641 static void loop_ivs_free (struct loop *);
642 static void strength_reduce (struct loop *, int);
643 static void find_single_use_in_loop (struct loop_regs *, rtx, rtx);
644 static int valid_initial_value_p (rtx, rtx, int, rtx);
645 static void find_mem_givs (const struct loop *, rtx, rtx, int, int);
646 static void record_biv (struct loop *, struct induction *, rtx, rtx, rtx,
647 rtx, rtx *, int, int);
648 static void check_final_value (const struct loop *, struct induction *);
649 static void loop_ivs_dump (const struct loop *, FILE *, int);
650 static void loop_iv_class_dump (const struct iv_class *, FILE *, int);
651 static void loop_biv_dump (const struct induction *, FILE *, int);
652 static void loop_giv_dump (const struct induction *, FILE *, int);
653 static void record_giv (const struct loop *, struct induction *, rtx, rtx,
654 rtx, rtx, rtx, rtx, int, enum g_types, int, int,
655 rtx *);
656 static void update_giv_derive (const struct loop *, rtx);
657 static HOST_WIDE_INT get_monotonic_increment (struct iv_class *);
658 static bool biased_biv_fits_mode_p (const struct loop *, struct iv_class *,
659 HOST_WIDE_INT, enum machine_mode,
660 unsigned HOST_WIDE_INT);
661 static bool biv_fits_mode_p (const struct loop *, struct iv_class *,
662 HOST_WIDE_INT, enum machine_mode, bool);
663 static bool extension_within_bounds_p (const struct loop *, struct iv_class *,
664 HOST_WIDE_INT, rtx);
665 static void check_ext_dependent_givs (const struct loop *, struct iv_class *);
666 static int basic_induction_var (const struct loop *, rtx, enum machine_mode,
667 rtx, rtx, rtx *, rtx *, rtx **);
668 static rtx simplify_giv_expr (const struct loop *, rtx, rtx *, int *);
669 static int general_induction_var (const struct loop *loop, rtx, rtx *, rtx *,
670 rtx *, rtx *, int, int *, enum machine_mode);
671 static int consec_sets_giv (const struct loop *, int, rtx, rtx, rtx, rtx *,
672 rtx *, rtx *, rtx *);
673 static int check_dbra_loop (struct loop *, int);
674 static rtx express_from_1 (rtx, rtx, rtx);
675 static rtx combine_givs_p (struct induction *, struct induction *);
676 static int cmp_combine_givs_stats (const void *, const void *);
677 static void combine_givs (struct loop_regs *, struct iv_class *);
678 static int product_cheap_p (rtx, rtx);
679 static int maybe_eliminate_biv (const struct loop *, struct iv_class *, int,
680 int, int);
681 static int maybe_eliminate_biv_1 (const struct loop *, rtx, rtx,
682 struct iv_class *, int, basic_block, rtx);
683 static int last_use_this_basic_block (rtx, rtx);
684 static void record_initial (rtx, rtx, void *);
685 static void update_reg_last_use (rtx, rtx);
686 static rtx next_insn_in_loop (const struct loop *, rtx);
687 static void loop_regs_scan (const struct loop *, int);
688 static int count_insns_in_loop (const struct loop *);
689 static int find_mem_in_note_1 (rtx *, void *);
690 static rtx find_mem_in_note (rtx);
691 static void load_mems (const struct loop *);
692 static int insert_loop_mem (rtx *, void *);
693 static int replace_loop_mem (rtx *, void *);
694 static void replace_loop_mems (rtx, rtx, rtx, int);
695 static int replace_loop_reg (rtx *, void *);
696 static void replace_loop_regs (rtx insn, rtx, rtx);
697 static void note_reg_stored (rtx, rtx, void *);
698 static void try_copy_prop (const struct loop *, rtx, unsigned int);
699 static void try_swap_copy_prop (const struct loop *, rtx, unsigned int);
700 static rtx check_insn_for_givs (struct loop *, rtx, int, int);
701 static rtx check_insn_for_bivs (struct loop *, rtx, int, int);
702 static rtx gen_add_mult (rtx, rtx, rtx, rtx);
703 static void loop_regs_update (const struct loop *, rtx);
704 static int iv_add_mult_cost (rtx, rtx, rtx, rtx);
705 static int loop_invariant_p (const struct loop *, rtx);
706 static rtx loop_insn_hoist (const struct loop *, rtx);
707 static void loop_iv_add_mult_emit_before (const struct loop *, rtx, rtx, rtx,
708 rtx, basic_block, rtx);
709 static rtx loop_insn_emit_before (const struct loop *, basic_block,
710 rtx, rtx);
711 static int loop_insn_first_p (rtx, rtx);
712 static rtx get_condition_for_loop (const struct loop *, rtx);
713 static void loop_iv_add_mult_sink (const struct loop *, rtx, rtx, rtx, rtx);
714 static void loop_iv_add_mult_hoist (const struct loop *, rtx, rtx, rtx, rtx);
715 static rtx extend_value_for_giv (struct induction *, rtx);
716 static rtx loop_insn_sink (const struct loop *, rtx);
717
718 static rtx loop_insn_emit_after (const struct loop *, basic_block, rtx, rtx);
719 static rtx loop_call_insn_emit_before (const struct loop *, basic_block,
720 rtx, rtx);
721 static rtx loop_call_insn_hoist (const struct loop *, rtx);
722 static rtx loop_insn_sink_or_swim (const struct loop *, rtx);
723
724 static void loop_dump_aux (const struct loop *, FILE *, int);
725 static void loop_delete_insns (rtx, rtx);
726 static HOST_WIDE_INT remove_constant_addition (rtx *);
727 static rtx gen_load_of_final_value (rtx, rtx);
728 void debug_ivs (const struct loop *);
729 void debug_iv_class (const struct iv_class *);
730 void debug_biv (const struct induction *);
731 void debug_giv (const struct induction *);
732 void debug_loop (const struct loop *);
733 void debug_loops (const struct loops *);
734
735 typedef struct loop_replace_args
736 {
737 rtx match;
738 rtx replacement;
739 rtx insn;
740 } loop_replace_args;
741
742 /* Nonzero iff INSN is between START and END, inclusive. */
743 #define INSN_IN_RANGE_P(INSN, START, END) \
744 (INSN_UID (INSN) < max_uid_for_loop \
745 && INSN_LUID (INSN) >= INSN_LUID (START) \
746 && INSN_LUID (INSN) <= INSN_LUID (END))
747
748 /* Indirect_jump_in_function is computed once per function. */
749 static int indirect_jump_in_function;
750 static int indirect_jump_in_function_p (rtx);
751
752 static int compute_luids (rtx, rtx, int);
753
754 static int biv_elimination_giv_has_0_offset (struct induction *,
755 struct induction *, rtx);
756 \f
757 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
758 copy the value of the strength reduced giv to its original register. */
759 static int copy_cost;
760
761 /* Cost of using a register, to normalize the benefits of a giv. */
762 static int reg_address_cost;
763
764 void
765 init_loop (void)
766 {
767 rtx reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
768
769 reg_address_cost = address_cost (reg, SImode);
770
771 copy_cost = COSTS_N_INSNS (1);
772 }
773 \f
774 /* Compute the mapping from uids to luids.
775 LUIDs are numbers assigned to insns, like uids,
776 except that luids increase monotonically through the code.
777 Start at insn START and stop just before END. Assign LUIDs
778 starting with PREV_LUID + 1. Return the last assigned LUID + 1. */
779 static int
780 compute_luids (rtx start, rtx end, int prev_luid)
781 {
782 int i;
783 rtx insn;
784
785 for (insn = start, i = prev_luid; insn != end; insn = NEXT_INSN (insn))
786 {
787 if (INSN_UID (insn) >= max_uid_for_loop)
788 continue;
789 /* Don't assign luids to line-number NOTEs, so that the distance in
790 luids between two insns is not affected by -g. */
791 if (!NOTE_P (insn)
792 || NOTE_LINE_NUMBER (insn) <= 0)
793 uid_luid[INSN_UID (insn)] = ++i;
794 else
795 /* Give a line number note the same luid as preceding insn. */
796 uid_luid[INSN_UID (insn)] = i;
797 }
798 return i + 1;
799 }
800 \f
801 /* Entry point of this file. Perform loop optimization
802 on the current function. F is the first insn of the function
803 and DUMPFILE is a stream for output of a trace of actions taken
804 (or 0 if none should be output). */
805
806 void
807 loop_optimize (rtx f, FILE *dumpfile, int flags)
808 {
809 rtx insn;
810 int i;
811 struct loops loops_data;
812 struct loops *loops = &loops_data;
813 struct loop_info *loops_info;
814
815 loop_dump_stream = dumpfile;
816
817 init_recog_no_volatile ();
818
819 max_reg_before_loop = max_reg_num ();
820 loop_max_reg = max_reg_before_loop;
821
822 regs_may_share = 0;
823
824 /* Count the number of loops. */
825
826 max_loop_num = 0;
827 for (insn = f; insn; insn = NEXT_INSN (insn))
828 {
829 if (NOTE_P (insn)
830 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
831 max_loop_num++;
832 }
833
834 /* Don't waste time if no loops. */
835 if (max_loop_num == 0)
836 return;
837
838 loops->num = max_loop_num;
839
840 /* Get size to use for tables indexed by uids.
841 Leave some space for labels allocated by find_and_verify_loops. */
842 max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
843
844 uid_luid = xcalloc (max_uid_for_loop, sizeof (int));
845 uid_loop = xcalloc (max_uid_for_loop, sizeof (struct loop *));
846
847 /* Allocate storage for array of loops. */
848 loops->array = xcalloc (loops->num, sizeof (struct loop));
849
850 /* Find and process each loop.
851 First, find them, and record them in order of their beginnings. */
852 find_and_verify_loops (f, loops);
853
854 /* Allocate and initialize auxiliary loop information. */
855 loops_info = xcalloc (loops->num, sizeof (struct loop_info));
856 for (i = 0; i < (int) loops->num; i++)
857 loops->array[i].aux = loops_info + i;
858
859 /* Now find all register lifetimes. This must be done after
860 find_and_verify_loops, because it might reorder the insns in the
861 function. */
862 reg_scan (f, max_reg_before_loop);
863
864 /* This must occur after reg_scan so that registers created by gcse
865 will have entries in the register tables.
866
867 We could have added a call to reg_scan after gcse_main in toplev.c,
868 but moving this call to init_alias_analysis is more efficient. */
869 init_alias_analysis ();
870
871 /* See if we went too far. Note that get_max_uid already returns
872 one more that the maximum uid of all insn. */
873 gcc_assert (get_max_uid () <= max_uid_for_loop);
874 /* Now reset it to the actual size we need. See above. */
875 max_uid_for_loop = get_max_uid ();
876
877 /* find_and_verify_loops has already called compute_luids, but it
878 might have rearranged code afterwards, so we need to recompute
879 the luids now. */
880 compute_luids (f, NULL_RTX, 0);
881
882 /* Don't leave gaps in uid_luid for insns that have been
883 deleted. It is possible that the first or last insn
884 using some register has been deleted by cross-jumping.
885 Make sure that uid_luid for that former insn's uid
886 points to the general area where that insn used to be. */
887 for (i = 0; i < max_uid_for_loop; i++)
888 {
889 uid_luid[0] = uid_luid[i];
890 if (uid_luid[0] != 0)
891 break;
892 }
893 for (i = 0; i < max_uid_for_loop; i++)
894 if (uid_luid[i] == 0)
895 uid_luid[i] = uid_luid[i - 1];
896
897 /* Determine if the function has indirect jump. On some systems
898 this prevents low overhead loop instructions from being used. */
899 indirect_jump_in_function = indirect_jump_in_function_p (f);
900
901 /* Now scan the loops, last ones first, since this means inner ones are done
902 before outer ones. */
903 for (i = max_loop_num - 1; i >= 0; i--)
904 {
905 struct loop *loop = &loops->array[i];
906
907 if (! loop->invalid && loop->end)
908 {
909 scan_loop (loop, flags);
910 ggc_collect ();
911 }
912 }
913
914 end_alias_analysis ();
915
916 /* Clean up. */
917 for (i = 0; i < (int) loops->num; i++)
918 free (loops_info[i].mems);
919
920 free (uid_luid);
921 free (uid_loop);
922 free (loops_info);
923 free (loops->array);
924 }
925 \f
926 /* Returns the next insn, in execution order, after INSN. START and
927 END are the NOTE_INSN_LOOP_BEG and NOTE_INSN_LOOP_END for the loop,
928 respectively. LOOP->TOP, if non-NULL, is the top of the loop in the
929 insn-stream; it is used with loops that are entered near the
930 bottom. */
931
932 static rtx
933 next_insn_in_loop (const struct loop *loop, rtx insn)
934 {
935 insn = NEXT_INSN (insn);
936
937 if (insn == loop->end)
938 {
939 if (loop->top)
940 /* Go to the top of the loop, and continue there. */
941 insn = loop->top;
942 else
943 /* We're done. */
944 insn = NULL_RTX;
945 }
946
947 if (insn == loop->scan_start)
948 /* We're done. */
949 insn = NULL_RTX;
950
951 return insn;
952 }
953
954 /* Find any register references hidden inside X and add them to
955 the dependency list DEPS. This is used to look inside CLOBBER (MEM
956 when checking whether a PARALLEL can be pulled out of a loop. */
957
958 static rtx
959 find_regs_nested (rtx deps, rtx x)
960 {
961 enum rtx_code code = GET_CODE (x);
962 if (code == REG)
963 deps = gen_rtx_EXPR_LIST (VOIDmode, x, deps);
964 else
965 {
966 const char *fmt = GET_RTX_FORMAT (code);
967 int i, j;
968 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
969 {
970 if (fmt[i] == 'e')
971 deps = find_regs_nested (deps, XEXP (x, i));
972 else if (fmt[i] == 'E')
973 for (j = 0; j < XVECLEN (x, i); j++)
974 deps = find_regs_nested (deps, XVECEXP (x, i, j));
975 }
976 }
977 return deps;
978 }
979
980 /* Optimize one loop described by LOOP. */
981
982 /* ??? Could also move memory writes out of loops if the destination address
983 is invariant, the source is invariant, the memory write is not volatile,
984 and if we can prove that no read inside the loop can read this address
985 before the write occurs. If there is a read of this address after the
986 write, then we can also mark the memory read as invariant. */
987
988 static void
989 scan_loop (struct loop *loop, int flags)
990 {
991 struct loop_info *loop_info = LOOP_INFO (loop);
992 struct loop_regs *regs = LOOP_REGS (loop);
993 int i;
994 rtx loop_start = loop->start;
995 rtx loop_end = loop->end;
996 rtx p;
997 /* 1 if we are scanning insns that could be executed zero times. */
998 int maybe_never = 0;
999 /* 1 if we are scanning insns that might never be executed
1000 due to a subroutine call which might exit before they are reached. */
1001 int call_passed = 0;
1002 /* Number of insns in the loop. */
1003 int insn_count;
1004 int tem;
1005 rtx temp, update_start, update_end;
1006 /* The SET from an insn, if it is the only SET in the insn. */
1007 rtx set, set1;
1008 /* Chain describing insns movable in current loop. */
1009 struct loop_movables *movables = LOOP_MOVABLES (loop);
1010 /* Ratio of extra register life span we can justify
1011 for saving an instruction. More if loop doesn't call subroutines
1012 since in that case saving an insn makes more difference
1013 and more registers are available. */
1014 int threshold;
1015 int in_libcall;
1016
1017 loop->top = 0;
1018
1019 movables->head = 0;
1020 movables->last = 0;
1021
1022 /* Determine whether this loop starts with a jump down to a test at
1023 the end. This will occur for a small number of loops with a test
1024 that is too complex to duplicate in front of the loop.
1025
1026 We search for the first insn or label in the loop, skipping NOTEs.
1027 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
1028 (because we might have a loop executed only once that contains a
1029 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
1030 (in case we have a degenerate loop).
1031
1032 Note that if we mistakenly think that a loop is entered at the top
1033 when, in fact, it is entered at the exit test, the only effect will be
1034 slightly poorer optimization. Making the opposite error can generate
1035 incorrect code. Since very few loops now start with a jump to the
1036 exit test, the code here to detect that case is very conservative. */
1037
1038 for (p = NEXT_INSN (loop_start);
1039 p != loop_end
1040 && !LABEL_P (p) && ! INSN_P (p)
1041 && (!NOTE_P (p)
1042 || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
1043 && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END));
1044 p = NEXT_INSN (p))
1045 ;
1046
1047 loop->scan_start = p;
1048
1049 /* If loop end is the end of the current function, then emit a
1050 NOTE_INSN_DELETED after loop_end and set loop->sink to the dummy
1051 note insn. This is the position we use when sinking insns out of
1052 the loop. */
1053 if (NEXT_INSN (loop->end) != 0)
1054 loop->sink = NEXT_INSN (loop->end);
1055 else
1056 loop->sink = emit_note_after (NOTE_INSN_DELETED, loop->end);
1057
1058 /* Set up variables describing this loop. */
1059 prescan_loop (loop);
1060 threshold = (loop_info->has_call ? 1 : 2) * (1 + n_non_fixed_regs);
1061
1062 /* If loop has a jump before the first label,
1063 the true entry is the target of that jump.
1064 Start scan from there.
1065 But record in LOOP->TOP the place where the end-test jumps
1066 back to so we can scan that after the end of the loop. */
1067 if (JUMP_P (p)
1068 /* Loop entry must be unconditional jump (and not a RETURN) */
1069 && any_uncondjump_p (p)
1070 && JUMP_LABEL (p) != 0
1071 /* Check to see whether the jump actually
1072 jumps out of the loop (meaning it's no loop).
1073 This case can happen for things like
1074 do {..} while (0). If this label was generated previously
1075 by loop, we can't tell anything about it and have to reject
1076 the loop. */
1077 && INSN_IN_RANGE_P (JUMP_LABEL (p), loop_start, loop_end))
1078 {
1079 loop->top = next_label (loop->scan_start);
1080 loop->scan_start = JUMP_LABEL (p);
1081 }
1082
1083 /* If LOOP->SCAN_START was an insn created by loop, we don't know its luid
1084 as required by loop_reg_used_before_p. So skip such loops. (This
1085 test may never be true, but it's best to play it safe.)
1086
1087 Also, skip loops where we do not start scanning at a label. This
1088 test also rejects loops starting with a JUMP_INSN that failed the
1089 test above. */
1090
1091 if (INSN_UID (loop->scan_start) >= max_uid_for_loop
1092 || !LABEL_P (loop->scan_start))
1093 {
1094 if (loop_dump_stream)
1095 fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n",
1096 INSN_UID (loop_start), INSN_UID (loop_end));
1097 return;
1098 }
1099
1100 /* Allocate extra space for REGs that might be created by load_mems.
1101 We allocate a little extra slop as well, in the hopes that we
1102 won't have to reallocate the regs array. */
1103 loop_regs_scan (loop, loop_info->mems_idx + 16);
1104 insn_count = count_insns_in_loop (loop);
1105
1106 if (loop_dump_stream)
1107 fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n",
1108 INSN_UID (loop_start), INSN_UID (loop_end), insn_count);
1109
1110 /* Scan through the loop finding insns that are safe to move.
1111 Set REGS->ARRAY[I].SET_IN_LOOP negative for the reg I being set, so that
1112 this reg will be considered invariant for subsequent insns.
1113 We consider whether subsequent insns use the reg
1114 in deciding whether it is worth actually moving.
1115
1116 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
1117 and therefore it is possible that the insns we are scanning
1118 would never be executed. At such times, we must make sure
1119 that it is safe to execute the insn once instead of zero times.
1120 When MAYBE_NEVER is 0, all insns will be executed at least once
1121 so that is not a problem. */
1122
1123 for (in_libcall = 0, p = next_insn_in_loop (loop, loop->scan_start);
1124 p != NULL_RTX;
1125 p = next_insn_in_loop (loop, p))
1126 {
1127 if (in_libcall && INSN_P (p) && find_reg_note (p, REG_RETVAL, NULL_RTX))
1128 in_libcall--;
1129 if (NONJUMP_INSN_P (p))
1130 {
1131 /* Do not scan past an optimization barrier. */
1132 if (GET_CODE (PATTERN (p)) == ASM_INPUT)
1133 break;
1134 temp = find_reg_note (p, REG_LIBCALL, NULL_RTX);
1135 if (temp)
1136 in_libcall++;
1137 if (! in_libcall
1138 && (set = single_set (p))
1139 && REG_P (SET_DEST (set))
1140 && SET_DEST (set) != frame_pointer_rtx
1141 #ifdef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
1142 && SET_DEST (set) != pic_offset_table_rtx
1143 #endif
1144 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
1145 {
1146 int tem1 = 0;
1147 int tem2 = 0;
1148 int move_insn = 0;
1149 int insert_temp = 0;
1150 rtx src = SET_SRC (set);
1151 rtx dependencies = 0;
1152
1153 /* Figure out what to use as a source of this insn. If a
1154 REG_EQUIV note is given or if a REG_EQUAL note with a
1155 constant operand is specified, use it as the source and
1156 mark that we should move this insn by calling
1157 emit_move_insn rather that duplicating the insn.
1158
1159 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL
1160 note is present. */
1161 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
1162 if (temp)
1163 src = XEXP (temp, 0), move_insn = 1;
1164 else
1165 {
1166 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
1167 if (temp && CONSTANT_P (XEXP (temp, 0)))
1168 src = XEXP (temp, 0), move_insn = 1;
1169 if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
1170 {
1171 src = XEXP (temp, 0);
1172 /* A libcall block can use regs that don't appear in
1173 the equivalent expression. To move the libcall,
1174 we must move those regs too. */
1175 dependencies = libcall_other_reg (p, src);
1176 }
1177 }
1178
1179 /* For parallels, add any possible uses to the dependencies, as
1180 we can't move the insn without resolving them first.
1181 MEMs inside CLOBBERs may also reference registers; these
1182 count as implicit uses. */
1183 if (GET_CODE (PATTERN (p)) == PARALLEL)
1184 {
1185 for (i = 0; i < XVECLEN (PATTERN (p), 0); i++)
1186 {
1187 rtx x = XVECEXP (PATTERN (p), 0, i);
1188 if (GET_CODE (x) == USE)
1189 dependencies
1190 = gen_rtx_EXPR_LIST (VOIDmode, XEXP (x, 0),
1191 dependencies);
1192 else if (GET_CODE (x) == CLOBBER
1193 && MEM_P (XEXP (x, 0)))
1194 dependencies = find_regs_nested (dependencies,
1195 XEXP (XEXP (x, 0), 0));
1196 }
1197 }
1198
1199 if (/* The register is used in basic blocks other
1200 than the one where it is set (meaning that
1201 something after this point in the loop might
1202 depend on its value before the set). */
1203 ! reg_in_basic_block_p (p, SET_DEST (set))
1204 /* And the set is not guaranteed to be executed once
1205 the loop starts, or the value before the set is
1206 needed before the set occurs...
1207
1208 ??? Note we have quadratic behavior here, mitigated
1209 by the fact that the previous test will often fail for
1210 large loops. Rather than re-scanning the entire loop
1211 each time for register usage, we should build tables
1212 of the register usage and use them here instead. */
1213 && (maybe_never
1214 || loop_reg_used_before_p (loop, set, p)))
1215 /* It is unsafe to move the set. However, it may be OK to
1216 move the source into a new pseudo, and substitute a
1217 reg-to-reg copy for the original insn.
1218
1219 This code used to consider it OK to move a set of a variable
1220 which was not created by the user and not used in an exit
1221 test.
1222 That behavior is incorrect and was removed. */
1223 insert_temp = 1;
1224
1225 /* Don't try to optimize a MODE_CC set with a constant
1226 source. It probably will be combined with a conditional
1227 jump. */
1228 if (GET_MODE_CLASS (GET_MODE (SET_DEST (set))) == MODE_CC
1229 && CONSTANT_P (src))
1230 ;
1231 /* Don't try to optimize a register that was made
1232 by loop-optimization for an inner loop.
1233 We don't know its life-span, so we can't compute
1234 the benefit. */
1235 else if (REGNO (SET_DEST (set)) >= max_reg_before_loop)
1236 ;
1237 /* Don't move the source and add a reg-to-reg copy:
1238 - with -Os (this certainly increases size),
1239 - if the mode doesn't support copy operations (obviously),
1240 - if the source is already a reg (the motion will gain nothing),
1241 - if the source is a legitimate constant (likewise). */
1242 else if (insert_temp
1243 && (optimize_size
1244 || ! can_copy_p (GET_MODE (SET_SRC (set)))
1245 || REG_P (SET_SRC (set))
1246 || (CONSTANT_P (SET_SRC (set))
1247 && LEGITIMATE_CONSTANT_P (SET_SRC (set)))))
1248 ;
1249 else if ((tem = loop_invariant_p (loop, src))
1250 && (dependencies == 0
1251 || (tem2
1252 = loop_invariant_p (loop, dependencies)) != 0)
1253 && (regs->array[REGNO (SET_DEST (set))].set_in_loop == 1
1254 || (tem1
1255 = consec_sets_invariant_p
1256 (loop, SET_DEST (set),
1257 regs->array[REGNO (SET_DEST (set))].set_in_loop,
1258 p)))
1259 /* If the insn can cause a trap (such as divide by zero),
1260 can't move it unless it's guaranteed to be executed
1261 once loop is entered. Even a function call might
1262 prevent the trap insn from being reached
1263 (since it might exit!) */
1264 && ! ((maybe_never || call_passed)
1265 && may_trap_p (src)))
1266 {
1267 struct movable *m;
1268 int regno = REGNO (SET_DEST (set));
1269 rtx user, user_set;
1270
1271 /* A potential lossage is where we have a case where two
1272 insns can be combined as long as they are both in the
1273 loop, but we move one of them outside the loop. For
1274 large loops, this can lose. The most common case of
1275 this is the address of a function being called.
1276
1277 Therefore, if this register is marked as being used
1278 exactly once if we are in a loop with calls
1279 (a "large loop"), see if we can replace the usage of
1280 this register with the source of this SET. If we can,
1281 delete this insn.
1282
1283 Don't do this if:
1284 (1) P has a REG_RETVAL note or
1285 (2) if we have SMALL_REGISTER_CLASSES and
1286 (a) SET_SRC is a hard register or
1287 (b) the destination of the user is a hard register. */
1288
1289 if (loop_info->has_call
1290 && regno >= FIRST_PSEUDO_REGISTER
1291 && (user = regs->array[regno].single_usage) != NULL
1292 && user != const0_rtx
1293 && REGNO_FIRST_UID (regno) == INSN_UID (p)
1294 && REGNO_LAST_UID (regno) == INSN_UID (user)
1295 && regs->array[regno].set_in_loop == 1
1296 && GET_CODE (SET_SRC (set)) != ASM_OPERANDS
1297 && ! side_effects_p (SET_SRC (set))
1298 && ! find_reg_note (p, REG_RETVAL, NULL_RTX)
1299 && (!SMALL_REGISTER_CLASSES
1300 || !REG_P (SET_SRC (set))
1301 || !HARD_REGISTER_P (SET_SRC (set)))
1302 && (!SMALL_REGISTER_CLASSES
1303 || !NONJUMP_INSN_P (user)
1304 || !(user_set = single_set (user))
1305 || !REG_P (SET_DEST (user_set))
1306 || !HARD_REGISTER_P (SET_DEST (user_set)))
1307 /* This test is not redundant; SET_SRC (set) might be
1308 a call-clobbered register and the life of REGNO
1309 might span a call. */
1310 && ! modified_between_p (SET_SRC (set), p, user)
1311 && no_labels_between_p (p, user)
1312 && validate_replace_rtx (SET_DEST (set),
1313 SET_SRC (set), user))
1314 {
1315 /* Replace any usage in a REG_EQUAL note. Must copy
1316 the new source, so that we don't get rtx sharing
1317 between the SET_SOURCE and REG_NOTES of insn p. */
1318 REG_NOTES (user)
1319 = replace_rtx (REG_NOTES (user), SET_DEST (set),
1320 copy_rtx (SET_SRC (set)));
1321
1322 delete_insn (p);
1323 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set));
1324 i++)
1325 regs->array[regno+i].set_in_loop = 0;
1326 continue;
1327 }
1328
1329 m = xmalloc (sizeof (struct movable));
1330 m->next = 0;
1331 m->insn = p;
1332 m->set_src = src;
1333 m->dependencies = dependencies;
1334 m->set_dest = SET_DEST (set);
1335 m->force = 0;
1336 m->consec
1337 = regs->array[REGNO (SET_DEST (set))].set_in_loop - 1;
1338 m->done = 0;
1339 m->forces = 0;
1340 m->partial = 0;
1341 m->move_insn = move_insn;
1342 m->move_insn_first = 0;
1343 m->insert_temp = insert_temp;
1344 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
1345 m->savemode = VOIDmode;
1346 m->regno = regno;
1347 /* Set M->cond if either loop_invariant_p
1348 or consec_sets_invariant_p returned 2
1349 (only conditionally invariant). */
1350 m->cond = ((tem | tem1 | tem2) > 1);
1351 m->global = LOOP_REG_GLOBAL_P (loop, regno);
1352 m->match = 0;
1353 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
1354 m->savings = regs->array[regno].n_times_set;
1355 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
1356 m->savings += libcall_benefit (p);
1357 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set)); i++)
1358 regs->array[regno+i].set_in_loop = move_insn ? -2 : -1;
1359 /* Add M to the end of the chain MOVABLES. */
1360 loop_movables_add (movables, m);
1361
1362 if (m->consec > 0)
1363 {
1364 /* It is possible for the first instruction to have a
1365 REG_EQUAL note but a non-invariant SET_SRC, so we must
1366 remember the status of the first instruction in case
1367 the last instruction doesn't have a REG_EQUAL note. */
1368 m->move_insn_first = m->move_insn;
1369
1370 /* Skip this insn, not checking REG_LIBCALL notes. */
1371 p = next_nonnote_insn (p);
1372 /* Skip the consecutive insns, if there are any. */
1373 p = skip_consec_insns (p, m->consec);
1374 /* Back up to the last insn of the consecutive group. */
1375 p = prev_nonnote_insn (p);
1376
1377 /* We must now reset m->move_insn, m->is_equiv, and
1378 possibly m->set_src to correspond to the effects of
1379 all the insns. */
1380 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
1381 if (temp)
1382 m->set_src = XEXP (temp, 0), m->move_insn = 1;
1383 else
1384 {
1385 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
1386 if (temp && CONSTANT_P (XEXP (temp, 0)))
1387 m->set_src = XEXP (temp, 0), m->move_insn = 1;
1388 else
1389 m->move_insn = 0;
1390
1391 }
1392 m->is_equiv
1393 = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
1394 }
1395 }
1396 /* If this register is always set within a STRICT_LOW_PART
1397 or set to zero, then its high bytes are constant.
1398 So clear them outside the loop and within the loop
1399 just load the low bytes.
1400 We must check that the machine has an instruction to do so.
1401 Also, if the value loaded into the register
1402 depends on the same register, this cannot be done. */
1403 else if (SET_SRC (set) == const0_rtx
1404 && NONJUMP_INSN_P (NEXT_INSN (p))
1405 && (set1 = single_set (NEXT_INSN (p)))
1406 && GET_CODE (set1) == SET
1407 && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART)
1408 && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG)
1409 && (SUBREG_REG (XEXP (SET_DEST (set1), 0))
1410 == SET_DEST (set))
1411 && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1)))
1412 {
1413 int regno = REGNO (SET_DEST (set));
1414 if (regs->array[regno].set_in_loop == 2)
1415 {
1416 struct movable *m;
1417 m = xmalloc (sizeof (struct movable));
1418 m->next = 0;
1419 m->insn = p;
1420 m->set_dest = SET_DEST (set);
1421 m->dependencies = 0;
1422 m->force = 0;
1423 m->consec = 0;
1424 m->done = 0;
1425 m->forces = 0;
1426 m->move_insn = 0;
1427 m->move_insn_first = 0;
1428 m->insert_temp = insert_temp;
1429 m->partial = 1;
1430 /* If the insn may not be executed on some cycles,
1431 we can't clear the whole reg; clear just high part.
1432 Not even if the reg is used only within this loop.
1433 Consider this:
1434 while (1)
1435 while (s != t) {
1436 if (foo ()) x = *s;
1437 use (x);
1438 }
1439 Clearing x before the inner loop could clobber a value
1440 being saved from the last time around the outer loop.
1441 However, if the reg is not used outside this loop
1442 and all uses of the register are in the same
1443 basic block as the store, there is no problem.
1444
1445 If this insn was made by loop, we don't know its
1446 INSN_LUID and hence must make a conservative
1447 assumption. */
1448 m->global = (INSN_UID (p) >= max_uid_for_loop
1449 || LOOP_REG_GLOBAL_P (loop, regno)
1450 || (labels_in_range_p
1451 (p, REGNO_FIRST_LUID (regno))));
1452 if (maybe_never && m->global)
1453 m->savemode = GET_MODE (SET_SRC (set1));
1454 else
1455 m->savemode = VOIDmode;
1456 m->regno = regno;
1457 m->cond = 0;
1458 m->match = 0;
1459 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
1460 m->savings = 1;
1461 for (i = 0;
1462 i < LOOP_REGNO_NREGS (regno, SET_DEST (set));
1463 i++)
1464 regs->array[regno+i].set_in_loop = -1;
1465 /* Add M to the end of the chain MOVABLES. */
1466 loop_movables_add (movables, m);
1467 }
1468 }
1469 }
1470 }
1471 /* Past a call insn, we get to insns which might not be executed
1472 because the call might exit. This matters for insns that trap.
1473 Constant and pure call insns always return, so they don't count. */
1474 else if (CALL_P (p) && ! CONST_OR_PURE_CALL_P (p))
1475 call_passed = 1;
1476 /* Past a label or a jump, we get to insns for which we
1477 can't count on whether or how many times they will be
1478 executed during each iteration. Therefore, we can
1479 only move out sets of trivial variables
1480 (those not used after the loop). */
1481 /* Similar code appears twice in strength_reduce. */
1482 else if ((LABEL_P (p) || JUMP_P (p))
1483 /* If we enter the loop in the middle, and scan around to the
1484 beginning, don't set maybe_never for that. This must be an
1485 unconditional jump, otherwise the code at the top of the
1486 loop might never be executed. Unconditional jumps are
1487 followed by a barrier then the loop_end. */
1488 && ! (JUMP_P (p) && JUMP_LABEL (p) == loop->top
1489 && NEXT_INSN (NEXT_INSN (p)) == loop_end
1490 && any_uncondjump_p (p)))
1491 maybe_never = 1;
1492 }
1493
1494 /* If one movable subsumes another, ignore that other. */
1495
1496 ignore_some_movables (movables);
1497
1498 /* For each movable insn, see if the reg that it loads
1499 leads when it dies right into another conditionally movable insn.
1500 If so, record that the second insn "forces" the first one,
1501 since the second can be moved only if the first is. */
1502
1503 force_movables (movables);
1504
1505 /* See if there are multiple movable insns that load the same value.
1506 If there are, make all but the first point at the first one
1507 through the `match' field, and add the priorities of them
1508 all together as the priority of the first. */
1509
1510 combine_movables (movables, regs);
1511
1512 /* Now consider each movable insn to decide whether it is worth moving.
1513 Store 0 in regs->array[I].set_in_loop for each reg I that is moved.
1514
1515 For machines with few registers this increases code size, so do not
1516 move moveables when optimizing for code size on such machines.
1517 (The 18 below is the value for i386.) */
1518
1519 if (!optimize_size
1520 || (reg_class_size[GENERAL_REGS] > 18 && !loop_info->has_call))
1521 {
1522 move_movables (loop, movables, threshold, insn_count);
1523
1524 /* Recalculate regs->array if move_movables has created new
1525 registers. */
1526 if (max_reg_num () > regs->num)
1527 {
1528 loop_regs_scan (loop, 0);
1529 for (update_start = loop_start;
1530 PREV_INSN (update_start)
1531 && !LABEL_P (PREV_INSN (update_start));
1532 update_start = PREV_INSN (update_start))
1533 ;
1534 update_end = NEXT_INSN (loop_end);
1535
1536 reg_scan_update (update_start, update_end, loop_max_reg);
1537 loop_max_reg = max_reg_num ();
1538 }
1539 }
1540
1541 /* Now candidates that still are negative are those not moved.
1542 Change regs->array[I].set_in_loop to indicate that those are not actually
1543 invariant. */
1544 for (i = 0; i < regs->num; i++)
1545 if (regs->array[i].set_in_loop < 0)
1546 regs->array[i].set_in_loop = regs->array[i].n_times_set;
1547
1548 /* Now that we've moved some things out of the loop, we might be able to
1549 hoist even more memory references. */
1550 load_mems (loop);
1551
1552 /* Recalculate regs->array if load_mems has created new registers. */
1553 if (max_reg_num () > regs->num)
1554 loop_regs_scan (loop, 0);
1555
1556 for (update_start = loop_start;
1557 PREV_INSN (update_start)
1558 && !LABEL_P (PREV_INSN (update_start));
1559 update_start = PREV_INSN (update_start))
1560 ;
1561 update_end = NEXT_INSN (loop_end);
1562
1563 reg_scan_update (update_start, update_end, loop_max_reg);
1564 loop_max_reg = max_reg_num ();
1565
1566 if (flag_strength_reduce)
1567 {
1568 if (update_end && LABEL_P (update_end))
1569 /* Ensure our label doesn't go away. */
1570 LABEL_NUSES (update_end)++;
1571
1572 strength_reduce (loop, flags);
1573
1574 reg_scan_update (update_start, update_end, loop_max_reg);
1575 loop_max_reg = max_reg_num ();
1576
1577 if (update_end && LABEL_P (update_end)
1578 && --LABEL_NUSES (update_end) == 0)
1579 delete_related_insns (update_end);
1580 }
1581
1582
1583 /* The movable information is required for strength reduction. */
1584 loop_movables_free (movables);
1585
1586 free (regs->array);
1587 regs->array = 0;
1588 regs->num = 0;
1589 }
1590 \f
1591 /* Add elements to *OUTPUT to record all the pseudo-regs
1592 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
1593
1594 static void
1595 record_excess_regs (rtx in_this, rtx not_in_this, rtx *output)
1596 {
1597 enum rtx_code code;
1598 const char *fmt;
1599 int i;
1600
1601 code = GET_CODE (in_this);
1602
1603 switch (code)
1604 {
1605 case PC:
1606 case CC0:
1607 case CONST_INT:
1608 case CONST_DOUBLE:
1609 case CONST:
1610 case SYMBOL_REF:
1611 case LABEL_REF:
1612 return;
1613
1614 case REG:
1615 if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER
1616 && ! reg_mentioned_p (in_this, not_in_this))
1617 *output = gen_rtx_EXPR_LIST (VOIDmode, in_this, *output);
1618 return;
1619
1620 default:
1621 break;
1622 }
1623
1624 fmt = GET_RTX_FORMAT (code);
1625 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1626 {
1627 int j;
1628
1629 switch (fmt[i])
1630 {
1631 case 'E':
1632 for (j = 0; j < XVECLEN (in_this, i); j++)
1633 record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output);
1634 break;
1635
1636 case 'e':
1637 record_excess_regs (XEXP (in_this, i), not_in_this, output);
1638 break;
1639 }
1640 }
1641 }
1642 \f
1643 /* Check what regs are referred to in the libcall block ending with INSN,
1644 aside from those mentioned in the equivalent value.
1645 If there are none, return 0.
1646 If there are one or more, return an EXPR_LIST containing all of them. */
1647
1648 static rtx
1649 libcall_other_reg (rtx insn, rtx equiv)
1650 {
1651 rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
1652 rtx p = XEXP (note, 0);
1653 rtx output = 0;
1654
1655 /* First, find all the regs used in the libcall block
1656 that are not mentioned as inputs to the result. */
1657
1658 while (p != insn)
1659 {
1660 if (INSN_P (p))
1661 record_excess_regs (PATTERN (p), equiv, &output);
1662 p = NEXT_INSN (p);
1663 }
1664
1665 return output;
1666 }
1667 \f
1668 /* Return 1 if all uses of REG
1669 are between INSN and the end of the basic block. */
1670
1671 static int
1672 reg_in_basic_block_p (rtx insn, rtx reg)
1673 {
1674 int regno = REGNO (reg);
1675 rtx p;
1676
1677 if (REGNO_FIRST_UID (regno) != INSN_UID (insn))
1678 return 0;
1679
1680 /* Search this basic block for the already recorded last use of the reg. */
1681 for (p = insn; p; p = NEXT_INSN (p))
1682 {
1683 switch (GET_CODE (p))
1684 {
1685 case NOTE:
1686 break;
1687
1688 case INSN:
1689 case CALL_INSN:
1690 /* Ordinary insn: if this is the last use, we win. */
1691 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1692 return 1;
1693 break;
1694
1695 case JUMP_INSN:
1696 /* Jump insn: if this is the last use, we win. */
1697 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1698 return 1;
1699 /* Otherwise, it's the end of the basic block, so we lose. */
1700 return 0;
1701
1702 case CODE_LABEL:
1703 case BARRIER:
1704 /* It's the end of the basic block, so we lose. */
1705 return 0;
1706
1707 default:
1708 break;
1709 }
1710 }
1711
1712 /* The "last use" that was recorded can't be found after the first
1713 use. This can happen when the last use was deleted while
1714 processing an inner loop, this inner loop was then completely
1715 unrolled, and the outer loop is always exited after the inner loop,
1716 so that everything after the first use becomes a single basic block. */
1717 return 1;
1718 }
1719 \f
1720 /* Compute the benefit of eliminating the insns in the block whose
1721 last insn is LAST. This may be a group of insns used to compute a
1722 value directly or can contain a library call. */
1723
1724 static int
1725 libcall_benefit (rtx last)
1726 {
1727 rtx insn;
1728 int benefit = 0;
1729
1730 for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0);
1731 insn != last; insn = NEXT_INSN (insn))
1732 {
1733 if (CALL_P (insn))
1734 benefit += 10; /* Assume at least this many insns in a library
1735 routine. */
1736 else if (NONJUMP_INSN_P (insn)
1737 && GET_CODE (PATTERN (insn)) != USE
1738 && GET_CODE (PATTERN (insn)) != CLOBBER)
1739 benefit++;
1740 }
1741
1742 return benefit;
1743 }
1744 \f
1745 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1746
1747 static rtx
1748 skip_consec_insns (rtx insn, int count)
1749 {
1750 for (; count > 0; count--)
1751 {
1752 rtx temp;
1753
1754 /* If first insn of libcall sequence, skip to end. */
1755 /* Do this at start of loop, since INSN is guaranteed to
1756 be an insn here. */
1757 if (!NOTE_P (insn)
1758 && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
1759 insn = XEXP (temp, 0);
1760
1761 do
1762 insn = NEXT_INSN (insn);
1763 while (NOTE_P (insn));
1764 }
1765
1766 return insn;
1767 }
1768
1769 /* Ignore any movable whose insn falls within a libcall
1770 which is part of another movable.
1771 We make use of the fact that the movable for the libcall value
1772 was made later and so appears later on the chain. */
1773
1774 static void
1775 ignore_some_movables (struct loop_movables *movables)
1776 {
1777 struct movable *m, *m1;
1778
1779 for (m = movables->head; m; m = m->next)
1780 {
1781 /* Is this a movable for the value of a libcall? */
1782 rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX);
1783 if (note)
1784 {
1785 rtx insn;
1786 /* Check for earlier movables inside that range,
1787 and mark them invalid. We cannot use LUIDs here because
1788 insns created by loop.c for prior loops don't have LUIDs.
1789 Rather than reject all such insns from movables, we just
1790 explicitly check each insn in the libcall (since invariant
1791 libcalls aren't that common). */
1792 for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn))
1793 for (m1 = movables->head; m1 != m; m1 = m1->next)
1794 if (m1->insn == insn)
1795 m1->done = 1;
1796 }
1797 }
1798 }
1799
1800 /* For each movable insn, see if the reg that it loads
1801 leads when it dies right into another conditionally movable insn.
1802 If so, record that the second insn "forces" the first one,
1803 since the second can be moved only if the first is. */
1804
1805 static void
1806 force_movables (struct loop_movables *movables)
1807 {
1808 struct movable *m, *m1;
1809
1810 for (m1 = movables->head; m1; m1 = m1->next)
1811 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1812 if (!m1->partial && !m1->done)
1813 {
1814 int regno = m1->regno;
1815 for (m = m1->next; m; m = m->next)
1816 /* ??? Could this be a bug? What if CSE caused the
1817 register of M1 to be used after this insn?
1818 Since CSE does not update regno_last_uid,
1819 this insn M->insn might not be where it dies.
1820 But very likely this doesn't matter; what matters is
1821 that M's reg is computed from M1's reg. */
1822 if (INSN_UID (m->insn) == REGNO_LAST_UID (regno)
1823 && !m->done)
1824 break;
1825 if (m != 0 && m->set_src == m1->set_dest
1826 /* If m->consec, m->set_src isn't valid. */
1827 && m->consec == 0)
1828 m = 0;
1829
1830 /* Increase the priority of the moving the first insn
1831 since it permits the second to be moved as well.
1832 Likewise for insns already forced by the first insn. */
1833 if (m != 0)
1834 {
1835 struct movable *m2;
1836
1837 m->forces = m1;
1838 for (m2 = m1; m2; m2 = m2->forces)
1839 {
1840 m2->lifetime += m->lifetime;
1841 m2->savings += m->savings;
1842 }
1843 }
1844 }
1845 }
1846 \f
1847 /* Find invariant expressions that are equal and can be combined into
1848 one register. */
1849
1850 static void
1851 combine_movables (struct loop_movables *movables, struct loop_regs *regs)
1852 {
1853 struct movable *m;
1854 char *matched_regs = xmalloc (regs->num);
1855 enum machine_mode mode;
1856
1857 /* Regs that are set more than once are not allowed to match
1858 or be matched. I'm no longer sure why not. */
1859 /* Only pseudo registers are allowed to match or be matched,
1860 since move_movables does not validate the change. */
1861 /* Perhaps testing m->consec_sets would be more appropriate here? */
1862
1863 for (m = movables->head; m; m = m->next)
1864 if (m->match == 0 && regs->array[m->regno].n_times_set == 1
1865 && m->regno >= FIRST_PSEUDO_REGISTER
1866 && !m->insert_temp
1867 && !m->partial)
1868 {
1869 struct movable *m1;
1870 int regno = m->regno;
1871
1872 memset (matched_regs, 0, regs->num);
1873 matched_regs[regno] = 1;
1874
1875 /* We want later insns to match the first one. Don't make the first
1876 one match any later ones. So start this loop at m->next. */
1877 for (m1 = m->next; m1; m1 = m1->next)
1878 if (m != m1 && m1->match == 0
1879 && !m1->insert_temp
1880 && regs->array[m1->regno].n_times_set == 1
1881 && m1->regno >= FIRST_PSEUDO_REGISTER
1882 /* A reg used outside the loop mustn't be eliminated. */
1883 && !m1->global
1884 /* A reg used for zero-extending mustn't be eliminated. */
1885 && !m1->partial
1886 && (matched_regs[m1->regno]
1887 ||
1888 (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)
1889 /* See if the source of M1 says it matches M. */
1890 && ((REG_P (m1->set_src)
1891 && matched_regs[REGNO (m1->set_src)])
1892 || rtx_equal_for_loop_p (m->set_src, m1->set_src,
1893 movables, regs))))
1894 && ((m->dependencies == m1->dependencies)
1895 || rtx_equal_p (m->dependencies, m1->dependencies)))
1896 {
1897 m->lifetime += m1->lifetime;
1898 m->savings += m1->savings;
1899 m1->done = 1;
1900 m1->match = m;
1901 matched_regs[m1->regno] = 1;
1902 }
1903 }
1904
1905 /* Now combine the regs used for zero-extension.
1906 This can be done for those not marked `global'
1907 provided their lives don't overlap. */
1908
1909 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1910 mode = GET_MODE_WIDER_MODE (mode))
1911 {
1912 struct movable *m0 = 0;
1913
1914 /* Combine all the registers for extension from mode MODE.
1915 Don't combine any that are used outside this loop. */
1916 for (m = movables->head; m; m = m->next)
1917 if (m->partial && ! m->global
1918 && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn)))))
1919 {
1920 struct movable *m1;
1921
1922 int first = REGNO_FIRST_LUID (m->regno);
1923 int last = REGNO_LAST_LUID (m->regno);
1924
1925 if (m0 == 0)
1926 {
1927 /* First one: don't check for overlap, just record it. */
1928 m0 = m;
1929 continue;
1930 }
1931
1932 /* Make sure they extend to the same mode.
1933 (Almost always true.) */
1934 if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest))
1935 continue;
1936
1937 /* We already have one: check for overlap with those
1938 already combined together. */
1939 for (m1 = movables->head; m1 != m; m1 = m1->next)
1940 if (m1 == m0 || (m1->partial && m1->match == m0))
1941 if (! (REGNO_FIRST_LUID (m1->regno) > last
1942 || REGNO_LAST_LUID (m1->regno) < first))
1943 goto overlap;
1944
1945 /* No overlap: we can combine this with the others. */
1946 m0->lifetime += m->lifetime;
1947 m0->savings += m->savings;
1948 m->done = 1;
1949 m->match = m0;
1950
1951 overlap:
1952 ;
1953 }
1954 }
1955
1956 /* Clean up. */
1957 free (matched_regs);
1958 }
1959
1960 /* Returns the number of movable instructions in LOOP that were not
1961 moved outside the loop. */
1962
1963 static int
1964 num_unmoved_movables (const struct loop *loop)
1965 {
1966 int num = 0;
1967 struct movable *m;
1968
1969 for (m = LOOP_MOVABLES (loop)->head; m; m = m->next)
1970 if (!m->done)
1971 ++num;
1972
1973 return num;
1974 }
1975
1976 \f
1977 /* Return 1 if regs X and Y will become the same if moved. */
1978
1979 static int
1980 regs_match_p (rtx x, rtx y, struct loop_movables *movables)
1981 {
1982 unsigned int xn = REGNO (x);
1983 unsigned int yn = REGNO (y);
1984 struct movable *mx, *my;
1985
1986 for (mx = movables->head; mx; mx = mx->next)
1987 if (mx->regno == xn)
1988 break;
1989
1990 for (my = movables->head; my; my = my->next)
1991 if (my->regno == yn)
1992 break;
1993
1994 return (mx && my
1995 && ((mx->match == my->match && mx->match != 0)
1996 || mx->match == my
1997 || mx == my->match));
1998 }
1999
2000 /* Return 1 if X and Y are identical-looking rtx's.
2001 This is the Lisp function EQUAL for rtx arguments.
2002
2003 If two registers are matching movables or a movable register and an
2004 equivalent constant, consider them equal. */
2005
2006 static int
2007 rtx_equal_for_loop_p (rtx x, rtx y, struct loop_movables *movables,
2008 struct loop_regs *regs)
2009 {
2010 int i;
2011 int j;
2012 struct movable *m;
2013 enum rtx_code code;
2014 const char *fmt;
2015
2016 if (x == y)
2017 return 1;
2018 if (x == 0 || y == 0)
2019 return 0;
2020
2021 code = GET_CODE (x);
2022
2023 /* If we have a register and a constant, they may sometimes be
2024 equal. */
2025 if (REG_P (x) && regs->array[REGNO (x)].set_in_loop == -2
2026 && CONSTANT_P (y))
2027 {
2028 for (m = movables->head; m; m = m->next)
2029 if (m->move_insn && m->regno == REGNO (x)
2030 && rtx_equal_p (m->set_src, y))
2031 return 1;
2032 }
2033 else if (REG_P (y) && regs->array[REGNO (y)].set_in_loop == -2
2034 && CONSTANT_P (x))
2035 {
2036 for (m = movables->head; m; m = m->next)
2037 if (m->move_insn && m->regno == REGNO (y)
2038 && rtx_equal_p (m->set_src, x))
2039 return 1;
2040 }
2041
2042 /* Otherwise, rtx's of different codes cannot be equal. */
2043 if (code != GET_CODE (y))
2044 return 0;
2045
2046 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
2047 (REG:SI x) and (REG:HI x) are NOT equivalent. */
2048
2049 if (GET_MODE (x) != GET_MODE (y))
2050 return 0;
2051
2052 /* These types of rtx's can be compared nonrecursively. */
2053 switch (code)
2054 {
2055 case PC:
2056 case CC0:
2057 case CONST_INT:
2058 case CONST_DOUBLE:
2059 return 0;
2060
2061 case REG:
2062 return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables));
2063
2064 case LABEL_REF:
2065 return XEXP (x, 0) == XEXP (y, 0);
2066 case SYMBOL_REF:
2067 return XSTR (x, 0) == XSTR (y, 0);
2068
2069 default:
2070 break;
2071 }
2072
2073 /* Compare the elements. If any pair of corresponding elements
2074 fail to match, return 0 for the whole things. */
2075
2076 fmt = GET_RTX_FORMAT (code);
2077 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2078 {
2079 switch (fmt[i])
2080 {
2081 case 'w':
2082 if (XWINT (x, i) != XWINT (y, i))
2083 return 0;
2084 break;
2085
2086 case 'i':
2087 if (XINT (x, i) != XINT (y, i))
2088 return 0;
2089 break;
2090
2091 case 'E':
2092 /* Two vectors must have the same length. */
2093 if (XVECLEN (x, i) != XVECLEN (y, i))
2094 return 0;
2095
2096 /* And the corresponding elements must match. */
2097 for (j = 0; j < XVECLEN (x, i); j++)
2098 if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j),
2099 movables, regs) == 0)
2100 return 0;
2101 break;
2102
2103 case 'e':
2104 if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables, regs)
2105 == 0)
2106 return 0;
2107 break;
2108
2109 case 's':
2110 if (strcmp (XSTR (x, i), XSTR (y, i)))
2111 return 0;
2112 break;
2113
2114 case 'u':
2115 /* These are just backpointers, so they don't matter. */
2116 break;
2117
2118 case '0':
2119 break;
2120
2121 /* It is believed that rtx's at this level will never
2122 contain anything but integers and other rtx's,
2123 except for within LABEL_REFs and SYMBOL_REFs. */
2124 default:
2125 gcc_unreachable ();
2126 }
2127 }
2128 return 1;
2129 }
2130 \f
2131 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
2132 insns in INSNS which use the reference. LABEL_NUSES for CODE_LABEL
2133 references is incremented once for each added note. */
2134
2135 static void
2136 add_label_notes (rtx x, rtx insns)
2137 {
2138 enum rtx_code code = GET_CODE (x);
2139 int i, j;
2140 const char *fmt;
2141 rtx insn;
2142
2143 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
2144 {
2145 /* This code used to ignore labels that referred to dispatch tables to
2146 avoid flow generating (slightly) worse code.
2147
2148 We no longer ignore such label references (see LABEL_REF handling in
2149 mark_jump_label for additional information). */
2150 for (insn = insns; insn; insn = NEXT_INSN (insn))
2151 if (reg_mentioned_p (XEXP (x, 0), insn))
2152 {
2153 REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_LABEL, XEXP (x, 0),
2154 REG_NOTES (insn));
2155 if (LABEL_P (XEXP (x, 0)))
2156 LABEL_NUSES (XEXP (x, 0))++;
2157 }
2158 }
2159
2160 fmt = GET_RTX_FORMAT (code);
2161 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2162 {
2163 if (fmt[i] == 'e')
2164 add_label_notes (XEXP (x, i), insns);
2165 else if (fmt[i] == 'E')
2166 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2167 add_label_notes (XVECEXP (x, i, j), insns);
2168 }
2169 }
2170 \f
2171 /* Scan MOVABLES, and move the insns that deserve to be moved.
2172 If two matching movables are combined, replace one reg with the
2173 other throughout. */
2174
2175 static void
2176 move_movables (struct loop *loop, struct loop_movables *movables,
2177 int threshold, int insn_count)
2178 {
2179 struct loop_regs *regs = LOOP_REGS (loop);
2180 int nregs = regs->num;
2181 rtx new_start = 0;
2182 struct movable *m;
2183 rtx p;
2184 rtx loop_start = loop->start;
2185 rtx loop_end = loop->end;
2186 /* Map of pseudo-register replacements to handle combining
2187 when we move several insns that load the same value
2188 into different pseudo-registers. */
2189 rtx *reg_map = xcalloc (nregs, sizeof (rtx));
2190 char *already_moved = xcalloc (nregs, sizeof (char));
2191
2192 for (m = movables->head; m; m = m->next)
2193 {
2194 /* Describe this movable insn. */
2195
2196 if (loop_dump_stream)
2197 {
2198 fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ",
2199 INSN_UID (m->insn), m->regno, m->lifetime);
2200 if (m->consec > 0)
2201 fprintf (loop_dump_stream, "consec %d, ", m->consec);
2202 if (m->cond)
2203 fprintf (loop_dump_stream, "cond ");
2204 if (m->force)
2205 fprintf (loop_dump_stream, "force ");
2206 if (m->global)
2207 fprintf (loop_dump_stream, "global ");
2208 if (m->done)
2209 fprintf (loop_dump_stream, "done ");
2210 if (m->move_insn)
2211 fprintf (loop_dump_stream, "move-insn ");
2212 if (m->match)
2213 fprintf (loop_dump_stream, "matches %d ",
2214 INSN_UID (m->match->insn));
2215 if (m->forces)
2216 fprintf (loop_dump_stream, "forces %d ",
2217 INSN_UID (m->forces->insn));
2218 }
2219
2220 /* Ignore the insn if it's already done (it matched something else).
2221 Otherwise, see if it is now safe to move. */
2222
2223 if (!m->done
2224 && (! m->cond
2225 || (1 == loop_invariant_p (loop, m->set_src)
2226 && (m->dependencies == 0
2227 || 1 == loop_invariant_p (loop, m->dependencies))
2228 && (m->consec == 0
2229 || 1 == consec_sets_invariant_p (loop, m->set_dest,
2230 m->consec + 1,
2231 m->insn))))
2232 && (! m->forces || m->forces->done))
2233 {
2234 int regno;
2235 rtx p;
2236 int savings = m->savings;
2237
2238 /* We have an insn that is safe to move.
2239 Compute its desirability. */
2240
2241 p = m->insn;
2242 regno = m->regno;
2243
2244 if (loop_dump_stream)
2245 fprintf (loop_dump_stream, "savings %d ", savings);
2246
2247 if (regs->array[regno].moved_once && loop_dump_stream)
2248 fprintf (loop_dump_stream, "halved since already moved ");
2249
2250 /* An insn MUST be moved if we already moved something else
2251 which is safe only if this one is moved too: that is,
2252 if already_moved[REGNO] is nonzero. */
2253
2254 /* An insn is desirable to move if the new lifetime of the
2255 register is no more than THRESHOLD times the old lifetime.
2256 If it's not desirable, it means the loop is so big
2257 that moving won't speed things up much,
2258 and it is liable to make register usage worse. */
2259
2260 /* It is also desirable to move if it can be moved at no
2261 extra cost because something else was already moved. */
2262
2263 if (already_moved[regno]
2264 || (threshold * savings * m->lifetime) >=
2265 (regs->array[regno].moved_once ? insn_count * 2 : insn_count)
2266 || (m->forces && m->forces->done
2267 && regs->array[m->forces->regno].n_times_set == 1))
2268 {
2269 int count;
2270 struct movable *m1;
2271 rtx first = NULL_RTX;
2272 rtx newreg = NULL_RTX;
2273
2274 if (m->insert_temp)
2275 newreg = gen_reg_rtx (GET_MODE (m->set_dest));
2276
2277 /* Now move the insns that set the reg. */
2278
2279 if (m->partial && m->match)
2280 {
2281 rtx newpat, i1;
2282 rtx r1, r2;
2283 /* Find the end of this chain of matching regs.
2284 Thus, we load each reg in the chain from that one reg.
2285 And that reg is loaded with 0 directly,
2286 since it has ->match == 0. */
2287 for (m1 = m; m1->match; m1 = m1->match);
2288 newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)),
2289 SET_DEST (PATTERN (m1->insn)));
2290 i1 = loop_insn_hoist (loop, newpat);
2291
2292 /* Mark the moved, invariant reg as being allowed to
2293 share a hard reg with the other matching invariant. */
2294 REG_NOTES (i1) = REG_NOTES (m->insn);
2295 r1 = SET_DEST (PATTERN (m->insn));
2296 r2 = SET_DEST (PATTERN (m1->insn));
2297 regs_may_share
2298 = gen_rtx_EXPR_LIST (VOIDmode, r1,
2299 gen_rtx_EXPR_LIST (VOIDmode, r2,
2300 regs_may_share));
2301 delete_insn (m->insn);
2302
2303 if (new_start == 0)
2304 new_start = i1;
2305
2306 if (loop_dump_stream)
2307 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
2308 }
2309 /* If we are to re-generate the item being moved with a
2310 new move insn, first delete what we have and then emit
2311 the move insn before the loop. */
2312 else if (m->move_insn)
2313 {
2314 rtx i1, temp, seq;
2315
2316 for (count = m->consec; count >= 0; count--)
2317 {
2318 if (!NOTE_P (p))
2319 {
2320 /* If this is the first insn of a library
2321 call sequence, something is very
2322 wrong. */
2323 gcc_assert (!find_reg_note
2324 (p, REG_LIBCALL, NULL_RTX));
2325
2326 /* If this is the last insn of a libcall
2327 sequence, then delete every insn in the
2328 sequence except the last. The last insn
2329 is handled in the normal manner. */
2330 temp = find_reg_note (p, REG_RETVAL, NULL_RTX);
2331
2332 if (temp)
2333 {
2334 temp = XEXP (temp, 0);
2335 while (temp != p)
2336 temp = delete_insn (temp);
2337 }
2338 }
2339
2340 temp = p;
2341 p = delete_insn (p);
2342
2343 /* simplify_giv_expr expects that it can walk the insns
2344 at m->insn forwards and see this old sequence we are
2345 tossing here. delete_insn does preserve the next
2346 pointers, but when we skip over a NOTE we must fix
2347 it up. Otherwise that code walks into the non-deleted
2348 insn stream. */
2349 while (p && NOTE_P (p))
2350 p = NEXT_INSN (temp) = NEXT_INSN (p);
2351
2352 if (m->insert_temp)
2353 {
2354 /* Replace the original insn with a move from
2355 our newly created temp. */
2356 start_sequence ();
2357 emit_move_insn (m->set_dest, newreg);
2358 seq = get_insns ();
2359 end_sequence ();
2360 emit_insn_before (seq, p);
2361 }
2362 }
2363
2364 start_sequence ();
2365 emit_move_insn (m->insert_temp ? newreg : m->set_dest,
2366 m->set_src);
2367 seq = get_insns ();
2368 end_sequence ();
2369
2370 add_label_notes (m->set_src, seq);
2371
2372 i1 = loop_insn_hoist (loop, seq);
2373 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
2374 set_unique_reg_note (i1,
2375 m->is_equiv ? REG_EQUIV : REG_EQUAL,
2376 m->set_src);
2377
2378 if (loop_dump_stream)
2379 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
2380
2381 /* The more regs we move, the less we like moving them. */
2382 threshold -= 3;
2383 }
2384 else
2385 {
2386 for (count = m->consec; count >= 0; count--)
2387 {
2388 rtx i1, temp;
2389
2390 /* If first insn of libcall sequence, skip to end. */
2391 /* Do this at start of loop, since p is guaranteed to
2392 be an insn here. */
2393 if (!NOTE_P (p)
2394 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
2395 p = XEXP (temp, 0);
2396
2397 /* If last insn of libcall sequence, move all
2398 insns except the last before the loop. The last
2399 insn is handled in the normal manner. */
2400 if (!NOTE_P (p)
2401 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
2402 {
2403 rtx fn_address = 0;
2404 rtx fn_reg = 0;
2405 rtx fn_address_insn = 0;
2406
2407 first = 0;
2408 for (temp = XEXP (temp, 0); temp != p;
2409 temp = NEXT_INSN (temp))
2410 {
2411 rtx body;
2412 rtx n;
2413 rtx next;
2414
2415 if (NOTE_P (temp))
2416 continue;
2417
2418 body = PATTERN (temp);
2419
2420 /* Find the next insn after TEMP,
2421 not counting USE or NOTE insns. */
2422 for (next = NEXT_INSN (temp); next != p;
2423 next = NEXT_INSN (next))
2424 if (! (NONJUMP_INSN_P (next)
2425 && GET_CODE (PATTERN (next)) == USE)
2426 && !NOTE_P (next))
2427 break;
2428
2429 /* If that is the call, this may be the insn
2430 that loads the function address.
2431
2432 Extract the function address from the insn
2433 that loads it into a register.
2434 If this insn was cse'd, we get incorrect code.
2435
2436 So emit a new move insn that copies the
2437 function address into the register that the
2438 call insn will use. flow.c will delete any
2439 redundant stores that we have created. */
2440 if (CALL_P (next)
2441 && GET_CODE (body) == SET
2442 && REG_P (SET_DEST (body))
2443 && (n = find_reg_note (temp, REG_EQUAL,
2444 NULL_RTX)))
2445 {
2446 fn_reg = SET_SRC (body);
2447 if (!REG_P (fn_reg))
2448 fn_reg = SET_DEST (body);
2449 fn_address = XEXP (n, 0);
2450 fn_address_insn = temp;
2451 }
2452 /* We have the call insn.
2453 If it uses the register we suspect it might,
2454 load it with the correct address directly. */
2455 if (CALL_P (temp)
2456 && fn_address != 0
2457 && reg_referenced_p (fn_reg, body))
2458 loop_insn_emit_after (loop, 0, fn_address_insn,
2459 gen_move_insn
2460 (fn_reg, fn_address));
2461
2462 if (CALL_P (temp))
2463 {
2464 i1 = loop_call_insn_hoist (loop, body);
2465 /* Because the USAGE information potentially
2466 contains objects other than hard registers
2467 we need to copy it. */
2468 if (CALL_INSN_FUNCTION_USAGE (temp))
2469 CALL_INSN_FUNCTION_USAGE (i1)
2470 = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp));
2471 }
2472 else
2473 i1 = loop_insn_hoist (loop, body);
2474 if (first == 0)
2475 first = i1;
2476 if (temp == fn_address_insn)
2477 fn_address_insn = i1;
2478 REG_NOTES (i1) = REG_NOTES (temp);
2479 REG_NOTES (temp) = NULL;
2480 delete_insn (temp);
2481 }
2482 if (new_start == 0)
2483 new_start = first;
2484 }
2485 if (m->savemode != VOIDmode)
2486 {
2487 /* P sets REG to zero; but we should clear only
2488 the bits that are not covered by the mode
2489 m->savemode. */
2490 rtx reg = m->set_dest;
2491 rtx sequence;
2492 rtx tem;
2493
2494 start_sequence ();
2495 tem = expand_simple_binop
2496 (GET_MODE (reg), AND, reg,
2497 GEN_INT ((((HOST_WIDE_INT) 1
2498 << GET_MODE_BITSIZE (m->savemode)))
2499 - 1),
2500 reg, 1, OPTAB_LIB_WIDEN);
2501 gcc_assert (tem);
2502 if (tem != reg)
2503 emit_move_insn (reg, tem);
2504 sequence = get_insns ();
2505 end_sequence ();
2506 i1 = loop_insn_hoist (loop, sequence);
2507 }
2508 else if (CALL_P (p))
2509 {
2510 i1 = loop_call_insn_hoist (loop, PATTERN (p));
2511 /* Because the USAGE information potentially
2512 contains objects other than hard registers
2513 we need to copy it. */
2514 if (CALL_INSN_FUNCTION_USAGE (p))
2515 CALL_INSN_FUNCTION_USAGE (i1)
2516 = copy_rtx (CALL_INSN_FUNCTION_USAGE (p));
2517 }
2518 else if (count == m->consec && m->move_insn_first)
2519 {
2520 rtx seq;
2521 /* The SET_SRC might not be invariant, so we must
2522 use the REG_EQUAL note. */
2523 start_sequence ();
2524 emit_move_insn (m->insert_temp ? newreg : m->set_dest,
2525 m->set_src);
2526 seq = get_insns ();
2527 end_sequence ();
2528
2529 add_label_notes (m->set_src, seq);
2530
2531 i1 = loop_insn_hoist (loop, seq);
2532 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
2533 set_unique_reg_note (i1, m->is_equiv ? REG_EQUIV
2534 : REG_EQUAL, m->set_src);
2535 }
2536 else if (m->insert_temp)
2537 {
2538 rtx *reg_map2 = xcalloc (REGNO (newreg),
2539 sizeof(rtx));
2540 reg_map2 [m->regno] = newreg;
2541
2542 i1 = loop_insn_hoist (loop, copy_rtx (PATTERN (p)));
2543 replace_regs (i1, reg_map2, REGNO (newreg), 1);
2544 free (reg_map2);
2545 }
2546 else
2547 i1 = loop_insn_hoist (loop, PATTERN (p));
2548
2549 if (REG_NOTES (i1) == 0)
2550 {
2551 REG_NOTES (i1) = REG_NOTES (p);
2552 REG_NOTES (p) = NULL;
2553
2554 /* If there is a REG_EQUAL note present whose value
2555 is not loop invariant, then delete it, since it
2556 may cause problems with later optimization passes.
2557 It is possible for cse to create such notes
2558 like this as a result of record_jump_cond. */
2559
2560 if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX))
2561 && ! loop_invariant_p (loop, XEXP (temp, 0)))
2562 remove_note (i1, temp);
2563 }
2564
2565 if (new_start == 0)
2566 new_start = i1;
2567
2568 if (loop_dump_stream)
2569 fprintf (loop_dump_stream, " moved to %d",
2570 INSN_UID (i1));
2571
2572 /* If library call, now fix the REG_NOTES that contain
2573 insn pointers, namely REG_LIBCALL on FIRST
2574 and REG_RETVAL on I1. */
2575 if ((temp = find_reg_note (i1, REG_RETVAL, NULL_RTX)))
2576 {
2577 XEXP (temp, 0) = first;
2578 temp = find_reg_note (first, REG_LIBCALL, NULL_RTX);
2579 XEXP (temp, 0) = i1;
2580 }
2581
2582 temp = p;
2583 delete_insn (p);
2584 p = NEXT_INSN (p);
2585
2586 /* simplify_giv_expr expects that it can walk the insns
2587 at m->insn forwards and see this old sequence we are
2588 tossing here. delete_insn does preserve the next
2589 pointers, but when we skip over a NOTE we must fix
2590 it up. Otherwise that code walks into the non-deleted
2591 insn stream. */
2592 while (p && NOTE_P (p))
2593 p = NEXT_INSN (temp) = NEXT_INSN (p);
2594
2595 if (m->insert_temp)
2596 {
2597 rtx seq;
2598 /* Replace the original insn with a move from
2599 our newly created temp. */
2600 start_sequence ();
2601 emit_move_insn (m->set_dest, newreg);
2602 seq = get_insns ();
2603 end_sequence ();
2604 emit_insn_before (seq, p);
2605 }
2606 }
2607
2608 /* The more regs we move, the less we like moving them. */
2609 threshold -= 3;
2610 }
2611
2612 m->done = 1;
2613
2614 if (!m->insert_temp)
2615 {
2616 /* Any other movable that loads the same register
2617 MUST be moved. */
2618 already_moved[regno] = 1;
2619
2620 /* This reg has been moved out of one loop. */
2621 regs->array[regno].moved_once = 1;
2622
2623 /* The reg set here is now invariant. */
2624 if (! m->partial)
2625 {
2626 int i;
2627 for (i = 0; i < LOOP_REGNO_NREGS (regno, m->set_dest); i++)
2628 regs->array[regno+i].set_in_loop = 0;
2629 }
2630
2631 /* Change the length-of-life info for the register
2632 to say it lives at least the full length of this loop.
2633 This will help guide optimizations in outer loops. */
2634
2635 if (REGNO_FIRST_LUID (regno) > INSN_LUID (loop_start))
2636 /* This is the old insn before all the moved insns.
2637 We can't use the moved insn because it is out of range
2638 in uid_luid. Only the old insns have luids. */
2639 REGNO_FIRST_UID (regno) = INSN_UID (loop_start);
2640 if (REGNO_LAST_LUID (regno) < INSN_LUID (loop_end))
2641 REGNO_LAST_UID (regno) = INSN_UID (loop_end);
2642 }
2643
2644 /* Combine with this moved insn any other matching movables. */
2645
2646 if (! m->partial)
2647 for (m1 = movables->head; m1; m1 = m1->next)
2648 if (m1->match == m)
2649 {
2650 rtx temp;
2651
2652 reg_map[m1->regno] = m->set_dest;
2653
2654 /* Get rid of the matching insn
2655 and prevent further processing of it. */
2656 m1->done = 1;
2657
2658 /* If library call, delete all insns. */
2659 if ((temp = find_reg_note (m1->insn, REG_RETVAL,
2660 NULL_RTX)))
2661 delete_insn_chain (XEXP (temp, 0), m1->insn);
2662 else
2663 delete_insn (m1->insn);
2664
2665 /* Any other movable that loads the same register
2666 MUST be moved. */
2667 already_moved[m1->regno] = 1;
2668
2669 /* The reg merged here is now invariant,
2670 if the reg it matches is invariant. */
2671 if (! m->partial)
2672 {
2673 int i;
2674 for (i = 0;
2675 i < LOOP_REGNO_NREGS (regno, m1->set_dest);
2676 i++)
2677 regs->array[m1->regno+i].set_in_loop = 0;
2678 }
2679 }
2680 }
2681 else if (loop_dump_stream)
2682 fprintf (loop_dump_stream, "not desirable");
2683 }
2684 else if (loop_dump_stream && !m->match)
2685 fprintf (loop_dump_stream, "not safe");
2686
2687 if (loop_dump_stream)
2688 fprintf (loop_dump_stream, "\n");
2689 }
2690
2691 if (new_start == 0)
2692 new_start = loop_start;
2693
2694 /* Go through all the instructions in the loop, making
2695 all the register substitutions scheduled in REG_MAP. */
2696 for (p = new_start; p != loop_end; p = NEXT_INSN (p))
2697 if (INSN_P (p))
2698 {
2699 replace_regs (PATTERN (p), reg_map, nregs, 0);
2700 replace_regs (REG_NOTES (p), reg_map, nregs, 0);
2701 INSN_CODE (p) = -1;
2702 }
2703
2704 /* Clean up. */
2705 free (reg_map);
2706 free (already_moved);
2707 }
2708
2709
2710 static void
2711 loop_movables_add (struct loop_movables *movables, struct movable *m)
2712 {
2713 if (movables->head == 0)
2714 movables->head = m;
2715 else
2716 movables->last->next = m;
2717 movables->last = m;
2718 }
2719
2720
2721 static void
2722 loop_movables_free (struct loop_movables *movables)
2723 {
2724 struct movable *m;
2725 struct movable *m_next;
2726
2727 for (m = movables->head; m; m = m_next)
2728 {
2729 m_next = m->next;
2730 free (m);
2731 }
2732 }
2733 \f
2734 #if 0
2735 /* Scan X and replace the address of any MEM in it with ADDR.
2736 REG is the address that MEM should have before the replacement. */
2737
2738 static void
2739 replace_call_address (rtx x, rtx reg, rtx addr)
2740 {
2741 enum rtx_code code;
2742 int i;
2743 const char *fmt;
2744
2745 if (x == 0)
2746 return;
2747 code = GET_CODE (x);
2748 switch (code)
2749 {
2750 case PC:
2751 case CC0:
2752 case CONST_INT:
2753 case CONST_DOUBLE:
2754 case CONST:
2755 case SYMBOL_REF:
2756 case LABEL_REF:
2757 case REG:
2758 return;
2759
2760 case SET:
2761 /* Short cut for very common case. */
2762 replace_call_address (XEXP (x, 1), reg, addr);
2763 return;
2764
2765 case CALL:
2766 /* Short cut for very common case. */
2767 replace_call_address (XEXP (x, 0), reg, addr);
2768 return;
2769
2770 case MEM:
2771 /* If this MEM uses a reg other than the one we expected,
2772 something is wrong. */
2773 gcc_assert (XEXP (x, 0) == reg);
2774 XEXP (x, 0) = addr;
2775 return;
2776
2777 default:
2778 break;
2779 }
2780
2781 fmt = GET_RTX_FORMAT (code);
2782 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2783 {
2784 if (fmt[i] == 'e')
2785 replace_call_address (XEXP (x, i), reg, addr);
2786 else if (fmt[i] == 'E')
2787 {
2788 int j;
2789 for (j = 0; j < XVECLEN (x, i); j++)
2790 replace_call_address (XVECEXP (x, i, j), reg, addr);
2791 }
2792 }
2793 }
2794 #endif
2795 \f
2796 /* Return the number of memory refs to addresses that vary
2797 in the rtx X. */
2798
2799 static int
2800 count_nonfixed_reads (const struct loop *loop, rtx x)
2801 {
2802 enum rtx_code code;
2803 int i;
2804 const char *fmt;
2805 int value;
2806
2807 if (x == 0)
2808 return 0;
2809
2810 code = GET_CODE (x);
2811 switch (code)
2812 {
2813 case PC:
2814 case CC0:
2815 case CONST_INT:
2816 case CONST_DOUBLE:
2817 case CONST:
2818 case SYMBOL_REF:
2819 case LABEL_REF:
2820 case REG:
2821 return 0;
2822
2823 case MEM:
2824 return ((loop_invariant_p (loop, XEXP (x, 0)) != 1)
2825 + count_nonfixed_reads (loop, XEXP (x, 0)));
2826
2827 default:
2828 break;
2829 }
2830
2831 value = 0;
2832 fmt = GET_RTX_FORMAT (code);
2833 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2834 {
2835 if (fmt[i] == 'e')
2836 value += count_nonfixed_reads (loop, XEXP (x, i));
2837 if (fmt[i] == 'E')
2838 {
2839 int j;
2840 for (j = 0; j < XVECLEN (x, i); j++)
2841 value += count_nonfixed_reads (loop, XVECEXP (x, i, j));
2842 }
2843 }
2844 return value;
2845 }
2846 \f
2847 /* Scan a loop setting the elements `loops_enclosed',
2848 `has_call', `has_nonconst_call', `has_volatile', `has_tablejump',
2849 `unknown_address_altered', `unknown_constant_address_altered', and
2850 `num_mem_sets' in LOOP. Also, fill in the array `mems' and the
2851 list `store_mems' in LOOP. */
2852
2853 static void
2854 prescan_loop (struct loop *loop)
2855 {
2856 int level = 1;
2857 rtx insn;
2858 struct loop_info *loop_info = LOOP_INFO (loop);
2859 rtx start = loop->start;
2860 rtx end = loop->end;
2861 /* The label after END. Jumping here is just like falling off the
2862 end of the loop. We use next_nonnote_insn instead of next_label
2863 as a hedge against the (pathological) case where some actual insn
2864 might end up between the two. */
2865 rtx exit_target = next_nonnote_insn (end);
2866
2867 loop_info->has_indirect_jump = indirect_jump_in_function;
2868 loop_info->pre_header_has_call = 0;
2869 loop_info->has_call = 0;
2870 loop_info->has_nonconst_call = 0;
2871 loop_info->has_prefetch = 0;
2872 loop_info->has_volatile = 0;
2873 loop_info->has_tablejump = 0;
2874 loop_info->has_multiple_exit_targets = 0;
2875 loop->level = 1;
2876
2877 loop_info->unknown_address_altered = 0;
2878 loop_info->unknown_constant_address_altered = 0;
2879 loop_info->store_mems = NULL_RTX;
2880 loop_info->first_loop_store_insn = NULL_RTX;
2881 loop_info->mems_idx = 0;
2882 loop_info->num_mem_sets = 0;
2883
2884 for (insn = start; insn && !LABEL_P (insn);
2885 insn = PREV_INSN (insn))
2886 {
2887 if (CALL_P (insn))
2888 {
2889 loop_info->pre_header_has_call = 1;
2890 break;
2891 }
2892 }
2893
2894 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2895 insn = NEXT_INSN (insn))
2896 {
2897 switch (GET_CODE (insn))
2898 {
2899 case NOTE:
2900 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
2901 {
2902 ++level;
2903 /* Count number of loops contained in this one. */
2904 loop->level++;
2905 }
2906 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
2907 --level;
2908 break;
2909
2910 case CALL_INSN:
2911 if (! CONST_OR_PURE_CALL_P (insn))
2912 {
2913 loop_info->unknown_address_altered = 1;
2914 loop_info->has_nonconst_call = 1;
2915 }
2916 else if (pure_call_p (insn))
2917 loop_info->has_nonconst_call = 1;
2918 loop_info->has_call = 1;
2919 if (can_throw_internal (insn))
2920 loop_info->has_multiple_exit_targets = 1;
2921 break;
2922
2923 case JUMP_INSN:
2924 if (! loop_info->has_multiple_exit_targets)
2925 {
2926 rtx set = pc_set (insn);
2927
2928 if (set)
2929 {
2930 rtx src = SET_SRC (set);
2931 rtx label1, label2;
2932
2933 if (GET_CODE (src) == IF_THEN_ELSE)
2934 {
2935 label1 = XEXP (src, 1);
2936 label2 = XEXP (src, 2);
2937 }
2938 else
2939 {
2940 label1 = src;
2941 label2 = NULL_RTX;
2942 }
2943
2944 do
2945 {
2946 if (label1 && label1 != pc_rtx)
2947 {
2948 if (GET_CODE (label1) != LABEL_REF)
2949 {
2950 /* Something tricky. */
2951 loop_info->has_multiple_exit_targets = 1;
2952 break;
2953 }
2954 else if (XEXP (label1, 0) != exit_target
2955 && LABEL_OUTSIDE_LOOP_P (label1))
2956 {
2957 /* A jump outside the current loop. */
2958 loop_info->has_multiple_exit_targets = 1;
2959 break;
2960 }
2961 }
2962
2963 label1 = label2;
2964 label2 = NULL_RTX;
2965 }
2966 while (label1);
2967 }
2968 else
2969 {
2970 /* A return, or something tricky. */
2971 loop_info->has_multiple_exit_targets = 1;
2972 }
2973 }
2974 /* Fall through. */
2975
2976 case INSN:
2977 if (volatile_refs_p (PATTERN (insn)))
2978 loop_info->has_volatile = 1;
2979
2980 if (JUMP_P (insn)
2981 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
2982 || GET_CODE (PATTERN (insn)) == ADDR_VEC))
2983 loop_info->has_tablejump = 1;
2984
2985 note_stores (PATTERN (insn), note_addr_stored, loop_info);
2986 if (! loop_info->first_loop_store_insn && loop_info->store_mems)
2987 loop_info->first_loop_store_insn = insn;
2988
2989 if (flag_non_call_exceptions && can_throw_internal (insn))
2990 loop_info->has_multiple_exit_targets = 1;
2991 break;
2992
2993 default:
2994 break;
2995 }
2996 }
2997
2998 /* Now, rescan the loop, setting up the LOOP_MEMS array. */
2999 if (/* An exception thrown by a called function might land us
3000 anywhere. */
3001 ! loop_info->has_nonconst_call
3002 /* We don't want loads for MEMs moved to a location before the
3003 one at which their stack memory becomes allocated. (Note
3004 that this is not a problem for malloc, etc., since those
3005 require actual function calls. */
3006 && ! current_function_calls_alloca
3007 /* There are ways to leave the loop other than falling off the
3008 end. */
3009 && ! loop_info->has_multiple_exit_targets)
3010 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
3011 insn = NEXT_INSN (insn))
3012 for_each_rtx (&insn, insert_loop_mem, loop_info);
3013
3014 /* BLKmode MEMs are added to LOOP_STORE_MEM as necessary so
3015 that loop_invariant_p and load_mems can use true_dependence
3016 to determine what is really clobbered. */
3017 if (loop_info->unknown_address_altered)
3018 {
3019 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
3020
3021 loop_info->store_mems
3022 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
3023 }
3024 if (loop_info->unknown_constant_address_altered)
3025 {
3026 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
3027 MEM_READONLY_P (mem) = 1;
3028 loop_info->store_mems
3029 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
3030 }
3031 }
3032 \f
3033 /* Invalidate all loops containing LABEL. */
3034
3035 static void
3036 invalidate_loops_containing_label (rtx label)
3037 {
3038 struct loop *loop;
3039 for (loop = uid_loop[INSN_UID (label)]; loop; loop = loop->outer)
3040 loop->invalid = 1;
3041 }
3042
3043 /* Scan the function looking for loops. Record the start and end of each loop.
3044 Also mark as invalid loops any loops that contain a setjmp or are branched
3045 to from outside the loop. */
3046
3047 static void
3048 find_and_verify_loops (rtx f, struct loops *loops)
3049 {
3050 rtx insn;
3051 rtx label;
3052 int num_loops;
3053 struct loop *current_loop;
3054 struct loop *next_loop;
3055 struct loop *loop;
3056
3057 num_loops = loops->num;
3058
3059 compute_luids (f, NULL_RTX, 0);
3060
3061 /* If there are jumps to undefined labels,
3062 treat them as jumps out of any/all loops.
3063 This also avoids writing past end of tables when there are no loops. */
3064 uid_loop[0] = NULL;
3065
3066 /* Find boundaries of loops, mark which loops are contained within
3067 loops, and invalidate loops that have setjmp. */
3068
3069 num_loops = 0;
3070 current_loop = NULL;
3071 for (insn = f; insn; insn = NEXT_INSN (insn))
3072 {
3073 if (NOTE_P (insn))
3074 switch (NOTE_LINE_NUMBER (insn))
3075 {
3076 case NOTE_INSN_LOOP_BEG:
3077 next_loop = loops->array + num_loops;
3078 next_loop->num = num_loops;
3079 num_loops++;
3080 next_loop->start = insn;
3081 next_loop->outer = current_loop;
3082 current_loop = next_loop;
3083 break;
3084
3085 case NOTE_INSN_LOOP_END:
3086 gcc_assert (current_loop);
3087
3088 current_loop->end = insn;
3089 current_loop = current_loop->outer;
3090 break;
3091
3092 default:
3093 break;
3094 }
3095
3096 if (CALL_P (insn)
3097 && find_reg_note (insn, REG_SETJMP, NULL))
3098 {
3099 /* In this case, we must invalidate our current loop and any
3100 enclosing loop. */
3101 for (loop = current_loop; loop; loop = loop->outer)
3102 {
3103 loop->invalid = 1;
3104 if (loop_dump_stream)
3105 fprintf (loop_dump_stream,
3106 "\nLoop at %d ignored due to setjmp.\n",
3107 INSN_UID (loop->start));
3108 }
3109 }
3110
3111 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
3112 enclosing loop, but this doesn't matter. */
3113 uid_loop[INSN_UID (insn)] = current_loop;
3114 }
3115
3116 /* Any loop containing a label used in an initializer must be invalidated,
3117 because it can be jumped into from anywhere. */
3118 for (label = forced_labels; label; label = XEXP (label, 1))
3119 invalidate_loops_containing_label (XEXP (label, 0));
3120
3121 /* Any loop containing a label used for an exception handler must be
3122 invalidated, because it can be jumped into from anywhere. */
3123 for_each_eh_label (invalidate_loops_containing_label);
3124
3125 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
3126 loop that it is not contained within, that loop is marked invalid.
3127 If any INSN or CALL_INSN uses a label's address, then the loop containing
3128 that label is marked invalid, because it could be jumped into from
3129 anywhere.
3130
3131 Also look for blocks of code ending in an unconditional branch that
3132 exits the loop. If such a block is surrounded by a conditional
3133 branch around the block, move the block elsewhere (see below) and
3134 invert the jump to point to the code block. This may eliminate a
3135 label in our loop and will simplify processing by both us and a
3136 possible second cse pass. */
3137
3138 for (insn = f; insn; insn = NEXT_INSN (insn))
3139 if (INSN_P (insn))
3140 {
3141 struct loop *this_loop = uid_loop[INSN_UID (insn)];
3142
3143 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
3144 {
3145 rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
3146 if (note)
3147 invalidate_loops_containing_label (XEXP (note, 0));
3148 }
3149
3150 if (!JUMP_P (insn))
3151 continue;
3152
3153 mark_loop_jump (PATTERN (insn), this_loop);
3154
3155 /* See if this is an unconditional branch outside the loop. */
3156 if (this_loop
3157 && (GET_CODE (PATTERN (insn)) == RETURN
3158 || (any_uncondjump_p (insn)
3159 && onlyjump_p (insn)
3160 && (uid_loop[INSN_UID (JUMP_LABEL (insn))]
3161 != this_loop)))
3162 && get_max_uid () < max_uid_for_loop)
3163 {
3164 rtx p;
3165 rtx our_next = next_real_insn (insn);
3166 rtx last_insn_to_move = NEXT_INSN (insn);
3167 struct loop *dest_loop;
3168 struct loop *outer_loop = NULL;
3169
3170 /* Go backwards until we reach the start of the loop, a label,
3171 or a JUMP_INSN. */
3172 for (p = PREV_INSN (insn);
3173 !LABEL_P (p)
3174 && ! (NOTE_P (p)
3175 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
3176 && !JUMP_P (p);
3177 p = PREV_INSN (p))
3178 ;
3179
3180 /* Check for the case where we have a jump to an inner nested
3181 loop, and do not perform the optimization in that case. */
3182
3183 if (JUMP_LABEL (insn))
3184 {
3185 dest_loop = uid_loop[INSN_UID (JUMP_LABEL (insn))];
3186 if (dest_loop)
3187 {
3188 for (outer_loop = dest_loop; outer_loop;
3189 outer_loop = outer_loop->outer)
3190 if (outer_loop == this_loop)
3191 break;
3192 }
3193 }
3194
3195 /* Make sure that the target of P is within the current loop. */
3196
3197 if (JUMP_P (p) && JUMP_LABEL (p)
3198 && uid_loop[INSN_UID (JUMP_LABEL (p))] != this_loop)
3199 outer_loop = this_loop;
3200
3201 /* If we stopped on a JUMP_INSN to the next insn after INSN,
3202 we have a block of code to try to move.
3203
3204 We look backward and then forward from the target of INSN
3205 to find a BARRIER at the same loop depth as the target.
3206 If we find such a BARRIER, we make a new label for the start
3207 of the block, invert the jump in P and point it to that label,
3208 and move the block of code to the spot we found. */
3209
3210 if (! outer_loop
3211 && JUMP_P (p)
3212 && JUMP_LABEL (p) != 0
3213 /* Just ignore jumps to labels that were never emitted.
3214 These always indicate compilation errors. */
3215 && INSN_UID (JUMP_LABEL (p)) != 0
3216 && any_condjump_p (p) && onlyjump_p (p)
3217 && next_real_insn (JUMP_LABEL (p)) == our_next
3218 /* If it's not safe to move the sequence, then we
3219 mustn't try. */
3220 && insns_safe_to_move_p (p, NEXT_INSN (insn),
3221 &last_insn_to_move))
3222 {
3223 rtx target
3224 = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn ();
3225 struct loop *target_loop = uid_loop[INSN_UID (target)];
3226 rtx loc, loc2;
3227 rtx tmp;
3228
3229 /* Search for possible garbage past the conditional jumps
3230 and look for the last barrier. */
3231 for (tmp = last_insn_to_move;
3232 tmp && !LABEL_P (tmp); tmp = NEXT_INSN (tmp))
3233 if (BARRIER_P (tmp))
3234 last_insn_to_move = tmp;
3235
3236 for (loc = target; loc; loc = PREV_INSN (loc))
3237 if (BARRIER_P (loc)
3238 /* Don't move things inside a tablejump. */
3239 && ((loc2 = next_nonnote_insn (loc)) == 0
3240 || !LABEL_P (loc2)
3241 || (loc2 = next_nonnote_insn (loc2)) == 0
3242 || !JUMP_P (loc2)
3243 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
3244 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
3245 && uid_loop[INSN_UID (loc)] == target_loop)
3246 break;
3247
3248 if (loc == 0)
3249 for (loc = target; loc; loc = NEXT_INSN (loc))
3250 if (BARRIER_P (loc)
3251 /* Don't move things inside a tablejump. */
3252 && ((loc2 = next_nonnote_insn (loc)) == 0
3253 || !LABEL_P (loc2)
3254 || (loc2 = next_nonnote_insn (loc2)) == 0
3255 || !JUMP_P (loc2)
3256 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
3257 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
3258 && uid_loop[INSN_UID (loc)] == target_loop)
3259 break;
3260
3261 if (loc)
3262 {
3263 rtx cond_label = JUMP_LABEL (p);
3264 rtx new_label = get_label_after (p);
3265
3266 /* Ensure our label doesn't go away. */
3267 LABEL_NUSES (cond_label)++;
3268
3269 /* Verify that uid_loop is large enough and that
3270 we can invert P. */
3271 if (invert_jump (p, new_label, 1))
3272 {
3273 rtx q, r;
3274 bool only_notes;
3275
3276 /* If no suitable BARRIER was found, create a suitable
3277 one before TARGET. Since TARGET is a fall through
3278 path, we'll need to insert a jump around our block
3279 and add a BARRIER before TARGET.
3280
3281 This creates an extra unconditional jump outside
3282 the loop. However, the benefits of removing rarely
3283 executed instructions from inside the loop usually
3284 outweighs the cost of the extra unconditional jump
3285 outside the loop. */
3286 if (loc == 0)
3287 {
3288 rtx temp;
3289
3290 temp = gen_jump (JUMP_LABEL (insn));
3291 temp = emit_jump_insn_before (temp, target);
3292 JUMP_LABEL (temp) = JUMP_LABEL (insn);
3293 LABEL_NUSES (JUMP_LABEL (insn))++;
3294 loc = emit_barrier_before (target);
3295 }
3296
3297 /* Include the BARRIER after INSN and copy the
3298 block after LOC. */
3299 only_notes = squeeze_notes (&new_label,
3300 &last_insn_to_move);
3301 gcc_assert (!only_notes);
3302
3303 reorder_insns (new_label, last_insn_to_move, loc);
3304
3305 /* All those insns are now in TARGET_LOOP. */
3306 for (q = new_label;
3307 q != NEXT_INSN (last_insn_to_move);
3308 q = NEXT_INSN (q))
3309 uid_loop[INSN_UID (q)] = target_loop;
3310
3311 /* The label jumped to by INSN is no longer a loop
3312 exit. Unless INSN does not have a label (e.g.,
3313 it is a RETURN insn), search loop->exit_labels
3314 to find its label_ref, and remove it. Also turn
3315 off LABEL_OUTSIDE_LOOP_P bit. */
3316 if (JUMP_LABEL (insn))
3317 {
3318 for (q = 0, r = this_loop->exit_labels;
3319 r;
3320 q = r, r = LABEL_NEXTREF (r))
3321 if (XEXP (r, 0) == JUMP_LABEL (insn))
3322 {
3323 LABEL_OUTSIDE_LOOP_P (r) = 0;
3324 if (q)
3325 LABEL_NEXTREF (q) = LABEL_NEXTREF (r);
3326 else
3327 this_loop->exit_labels = LABEL_NEXTREF (r);
3328 break;
3329 }
3330
3331 for (loop = this_loop; loop && loop != target_loop;
3332 loop = loop->outer)
3333 loop->exit_count--;
3334
3335 /* If we didn't find it, then something is
3336 wrong. */
3337 gcc_assert (r);
3338 }
3339
3340 /* P is now a jump outside the loop, so it must be put
3341 in loop->exit_labels, and marked as such.
3342 The easiest way to do this is to just call
3343 mark_loop_jump again for P. */
3344 mark_loop_jump (PATTERN (p), this_loop);
3345
3346 /* If INSN now jumps to the insn after it,
3347 delete INSN. */
3348 if (JUMP_LABEL (insn) != 0
3349 && (next_real_insn (JUMP_LABEL (insn))
3350 == next_real_insn (insn)))
3351 delete_related_insns (insn);
3352 }
3353
3354 /* Continue the loop after where the conditional
3355 branch used to jump, since the only branch insn
3356 in the block (if it still remains) is an inter-loop
3357 branch and hence needs no processing. */
3358 insn = NEXT_INSN (cond_label);
3359
3360 if (--LABEL_NUSES (cond_label) == 0)
3361 delete_related_insns (cond_label);
3362
3363 /* This loop will be continued with NEXT_INSN (insn). */
3364 insn = PREV_INSN (insn);
3365 }
3366 }
3367 }
3368 }
3369 }
3370
3371 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
3372 loops it is contained in, mark the target loop invalid.
3373
3374 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
3375
3376 static void
3377 mark_loop_jump (rtx x, struct loop *loop)
3378 {
3379 struct loop *dest_loop;
3380 struct loop *outer_loop;
3381 int i;
3382
3383 switch (GET_CODE (x))
3384 {
3385 case PC:
3386 case USE:
3387 case CLOBBER:
3388 case REG:
3389 case MEM:
3390 case CONST_INT:
3391 case CONST_DOUBLE:
3392 case RETURN:
3393 return;
3394
3395 case CONST:
3396 /* There could be a label reference in here. */
3397 mark_loop_jump (XEXP (x, 0), loop);
3398 return;
3399
3400 case PLUS:
3401 case MINUS:
3402 case MULT:
3403 mark_loop_jump (XEXP (x, 0), loop);
3404 mark_loop_jump (XEXP (x, 1), loop);
3405 return;
3406
3407 case LO_SUM:
3408 /* This may refer to a LABEL_REF or SYMBOL_REF. */
3409 mark_loop_jump (XEXP (x, 1), loop);
3410 return;
3411
3412 case SIGN_EXTEND:
3413 case ZERO_EXTEND:
3414 mark_loop_jump (XEXP (x, 0), loop);
3415 return;
3416
3417 case LABEL_REF:
3418 dest_loop = uid_loop[INSN_UID (XEXP (x, 0))];
3419
3420 /* Link together all labels that branch outside the loop. This
3421 is used by final_[bg]iv_value and the loop unrolling code. Also
3422 mark this LABEL_REF so we know that this branch should predict
3423 false. */
3424
3425 /* A check to make sure the label is not in an inner nested loop,
3426 since this does not count as a loop exit. */
3427 if (dest_loop)
3428 {
3429 for (outer_loop = dest_loop; outer_loop;
3430 outer_loop = outer_loop->outer)
3431 if (outer_loop == loop)
3432 break;
3433 }
3434 else
3435 outer_loop = NULL;
3436
3437 if (loop && ! outer_loop)
3438 {
3439 LABEL_OUTSIDE_LOOP_P (x) = 1;
3440 LABEL_NEXTREF (x) = loop->exit_labels;
3441 loop->exit_labels = x;
3442
3443 for (outer_loop = loop;
3444 outer_loop && outer_loop != dest_loop;
3445 outer_loop = outer_loop->outer)
3446 outer_loop->exit_count++;
3447 }
3448
3449 /* If this is inside a loop, but not in the current loop or one enclosed
3450 by it, it invalidates at least one loop. */
3451
3452 if (! dest_loop)
3453 return;
3454
3455 /* We must invalidate every nested loop containing the target of this
3456 label, except those that also contain the jump insn. */
3457
3458 for (; dest_loop; dest_loop = dest_loop->outer)
3459 {
3460 /* Stop when we reach a loop that also contains the jump insn. */
3461 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
3462 if (dest_loop == outer_loop)
3463 return;
3464
3465 /* If we get here, we know we need to invalidate a loop. */
3466 if (loop_dump_stream && ! dest_loop->invalid)
3467 fprintf (loop_dump_stream,
3468 "\nLoop at %d ignored due to multiple entry points.\n",
3469 INSN_UID (dest_loop->start));
3470
3471 dest_loop->invalid = 1;
3472 }
3473 return;
3474
3475 case SET:
3476 /* If this is not setting pc, ignore. */
3477 if (SET_DEST (x) == pc_rtx)
3478 mark_loop_jump (SET_SRC (x), loop);
3479 return;
3480
3481 case IF_THEN_ELSE:
3482 mark_loop_jump (XEXP (x, 1), loop);
3483 mark_loop_jump (XEXP (x, 2), loop);
3484 return;
3485
3486 case PARALLEL:
3487 case ADDR_VEC:
3488 for (i = 0; i < XVECLEN (x, 0); i++)
3489 mark_loop_jump (XVECEXP (x, 0, i), loop);
3490 return;
3491
3492 case ADDR_DIFF_VEC:
3493 for (i = 0; i < XVECLEN (x, 1); i++)
3494 mark_loop_jump (XVECEXP (x, 1, i), loop);
3495 return;
3496
3497 default:
3498 /* Strictly speaking this is not a jump into the loop, only a possible
3499 jump out of the loop. However, we have no way to link the destination
3500 of this jump onto the list of exit labels. To be safe we mark this
3501 loop and any containing loops as invalid. */
3502 if (loop)
3503 {
3504 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
3505 {
3506 if (loop_dump_stream && ! outer_loop->invalid)
3507 fprintf (loop_dump_stream,
3508 "\nLoop at %d ignored due to unknown exit jump.\n",
3509 INSN_UID (outer_loop->start));
3510 outer_loop->invalid = 1;
3511 }
3512 }
3513 return;
3514 }
3515 }
3516 \f
3517 /* Return nonzero if there is a label in the range from
3518 insn INSN to and including the insn whose luid is END
3519 INSN must have an assigned luid (i.e., it must not have
3520 been previously created by loop.c). */
3521
3522 static int
3523 labels_in_range_p (rtx insn, int end)
3524 {
3525 while (insn && INSN_LUID (insn) <= end)
3526 {
3527 if (LABEL_P (insn))
3528 return 1;
3529 insn = NEXT_INSN (insn);
3530 }
3531
3532 return 0;
3533 }
3534
3535 /* Record that a memory reference X is being set. */
3536
3537 static void
3538 note_addr_stored (rtx x, rtx y ATTRIBUTE_UNUSED,
3539 void *data ATTRIBUTE_UNUSED)
3540 {
3541 struct loop_info *loop_info = data;
3542
3543 if (x == 0 || !MEM_P (x))
3544 return;
3545
3546 /* Count number of memory writes.
3547 This affects heuristics in strength_reduce. */
3548 loop_info->num_mem_sets++;
3549
3550 /* BLKmode MEM means all memory is clobbered. */
3551 if (GET_MODE (x) == BLKmode)
3552 {
3553 if (MEM_READONLY_P (x))
3554 loop_info->unknown_constant_address_altered = 1;
3555 else
3556 loop_info->unknown_address_altered = 1;
3557
3558 return;
3559 }
3560
3561 loop_info->store_mems = gen_rtx_EXPR_LIST (VOIDmode, x,
3562 loop_info->store_mems);
3563 }
3564
3565 /* X is a value modified by an INSN that references a biv inside a loop
3566 exit test (i.e., X is somehow related to the value of the biv). If X
3567 is a pseudo that is used more than once, then the biv is (effectively)
3568 used more than once. DATA is a pointer to a loop_regs structure. */
3569
3570 static void
3571 note_set_pseudo_multiple_uses (rtx x, rtx y ATTRIBUTE_UNUSED, void *data)
3572 {
3573 struct loop_regs *regs = (struct loop_regs *) data;
3574
3575 if (x == 0)
3576 return;
3577
3578 while (GET_CODE (x) == STRICT_LOW_PART
3579 || GET_CODE (x) == SIGN_EXTRACT
3580 || GET_CODE (x) == ZERO_EXTRACT
3581 || GET_CODE (x) == SUBREG)
3582 x = XEXP (x, 0);
3583
3584 if (!REG_P (x) || REGNO (x) < FIRST_PSEUDO_REGISTER)
3585 return;
3586
3587 /* If we do not have usage information, or if we know the register
3588 is used more than once, note that fact for check_dbra_loop. */
3589 if (REGNO (x) >= max_reg_before_loop
3590 || ! regs->array[REGNO (x)].single_usage
3591 || regs->array[REGNO (x)].single_usage == const0_rtx)
3592 regs->multiple_uses = 1;
3593 }
3594 \f
3595 /* Return nonzero if the rtx X is invariant over the current loop.
3596
3597 The value is 2 if we refer to something only conditionally invariant.
3598
3599 A memory ref is invariant if it is not volatile and does not conflict
3600 with anything stored in `loop_info->store_mems'. */
3601
3602 static int
3603 loop_invariant_p (const struct loop *loop, rtx x)
3604 {
3605 struct loop_info *loop_info = LOOP_INFO (loop);
3606 struct loop_regs *regs = LOOP_REGS (loop);
3607 int i;
3608 enum rtx_code code;
3609 const char *fmt;
3610 int conditional = 0;
3611 rtx mem_list_entry;
3612
3613 if (x == 0)
3614 return 1;
3615 code = GET_CODE (x);
3616 switch (code)
3617 {
3618 case CONST_INT:
3619 case CONST_DOUBLE:
3620 case SYMBOL_REF:
3621 case CONST:
3622 return 1;
3623
3624 case LABEL_REF:
3625 return 1;
3626
3627 case PC:
3628 case CC0:
3629 case UNSPEC_VOLATILE:
3630 return 0;
3631
3632 case REG:
3633 if ((x == frame_pointer_rtx || x == hard_frame_pointer_rtx
3634 || x == arg_pointer_rtx || x == pic_offset_table_rtx)
3635 && ! current_function_has_nonlocal_goto)
3636 return 1;
3637
3638 if (LOOP_INFO (loop)->has_call
3639 && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)])
3640 return 0;
3641
3642 /* Out-of-range regs can occur when we are called from unrolling.
3643 These registers created by the unroller are set in the loop,
3644 hence are never invariant.
3645 Other out-of-range regs can be generated by load_mems; those that
3646 are written to in the loop are not invariant, while those that are
3647 not written to are invariant. It would be easy for load_mems
3648 to set n_times_set correctly for these registers, however, there
3649 is no easy way to distinguish them from registers created by the
3650 unroller. */
3651
3652 if (REGNO (x) >= (unsigned) regs->num)
3653 return 0;
3654
3655 if (regs->array[REGNO (x)].set_in_loop < 0)
3656 return 2;
3657
3658 return regs->array[REGNO (x)].set_in_loop == 0;
3659
3660 case MEM:
3661 /* Volatile memory references must be rejected. Do this before
3662 checking for read-only items, so that volatile read-only items
3663 will be rejected also. */
3664 if (MEM_VOLATILE_P (x))
3665 return 0;
3666
3667 /* See if there is any dependence between a store and this load. */
3668 mem_list_entry = loop_info->store_mems;
3669 while (mem_list_entry)
3670 {
3671 if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
3672 x, rtx_varies_p))
3673 return 0;
3674
3675 mem_list_entry = XEXP (mem_list_entry, 1);
3676 }
3677
3678 /* It's not invalidated by a store in memory
3679 but we must still verify the address is invariant. */
3680 break;
3681
3682 case ASM_OPERANDS:
3683 /* Don't mess with insns declared volatile. */
3684 if (MEM_VOLATILE_P (x))
3685 return 0;
3686 break;
3687
3688 default:
3689 break;
3690 }
3691
3692 fmt = GET_RTX_FORMAT (code);
3693 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3694 {
3695 if (fmt[i] == 'e')
3696 {
3697 int tem = loop_invariant_p (loop, XEXP (x, i));
3698 if (tem == 0)
3699 return 0;
3700 if (tem == 2)
3701 conditional = 1;
3702 }
3703 else if (fmt[i] == 'E')
3704 {
3705 int j;
3706 for (j = 0; j < XVECLEN (x, i); j++)
3707 {
3708 int tem = loop_invariant_p (loop, XVECEXP (x, i, j));
3709 if (tem == 0)
3710 return 0;
3711 if (tem == 2)
3712 conditional = 1;
3713 }
3714
3715 }
3716 }
3717
3718 return 1 + conditional;
3719 }
3720 \f
3721 /* Return nonzero if all the insns in the loop that set REG
3722 are INSN and the immediately following insns,
3723 and if each of those insns sets REG in an invariant way
3724 (not counting uses of REG in them).
3725
3726 The value is 2 if some of these insns are only conditionally invariant.
3727
3728 We assume that INSN itself is the first set of REG
3729 and that its source is invariant. */
3730
3731 static int
3732 consec_sets_invariant_p (const struct loop *loop, rtx reg, int n_sets,
3733 rtx insn)
3734 {
3735 struct loop_regs *regs = LOOP_REGS (loop);
3736 rtx p = insn;
3737 unsigned int regno = REGNO (reg);
3738 rtx temp;
3739 /* Number of sets we have to insist on finding after INSN. */
3740 int count = n_sets - 1;
3741 int old = regs->array[regno].set_in_loop;
3742 int value = 0;
3743 int this;
3744
3745 /* If N_SETS hit the limit, we can't rely on its value. */
3746 if (n_sets == 127)
3747 return 0;
3748
3749 regs->array[regno].set_in_loop = 0;
3750
3751 while (count > 0)
3752 {
3753 enum rtx_code code;
3754 rtx set;
3755
3756 p = NEXT_INSN (p);
3757 code = GET_CODE (p);
3758
3759 /* If library call, skip to end of it. */
3760 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3761 p = XEXP (temp, 0);
3762
3763 this = 0;
3764 if (code == INSN
3765 && (set = single_set (p))
3766 && REG_P (SET_DEST (set))
3767 && REGNO (SET_DEST (set)) == regno)
3768 {
3769 this = loop_invariant_p (loop, SET_SRC (set));
3770 if (this != 0)
3771 value |= this;
3772 else if ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX)))
3773 {
3774 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
3775 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
3776 notes are OK. */
3777 this = (CONSTANT_P (XEXP (temp, 0))
3778 || (find_reg_note (p, REG_RETVAL, NULL_RTX)
3779 && loop_invariant_p (loop, XEXP (temp, 0))));
3780 if (this != 0)
3781 value |= this;
3782 }
3783 }
3784 if (this != 0)
3785 count--;
3786 else if (code != NOTE)
3787 {
3788 regs->array[regno].set_in_loop = old;
3789 return 0;
3790 }
3791 }
3792
3793 regs->array[regno].set_in_loop = old;
3794 /* If loop_invariant_p ever returned 2, we return 2. */
3795 return 1 + (value & 2);
3796 }
3797 \f
3798 /* Look at all uses (not sets) of registers in X. For each, if it is
3799 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
3800 a different insn, set USAGE[REGNO] to const0_rtx. */
3801
3802 static void
3803 find_single_use_in_loop (struct loop_regs *regs, rtx insn, rtx x)
3804 {
3805 enum rtx_code code = GET_CODE (x);
3806 const char *fmt = GET_RTX_FORMAT (code);
3807 int i, j;
3808
3809 if (code == REG)
3810 regs->array[REGNO (x)].single_usage
3811 = (regs->array[REGNO (x)].single_usage != 0
3812 && regs->array[REGNO (x)].single_usage != insn)
3813 ? const0_rtx : insn;
3814
3815 else if (code == SET)
3816 {
3817 /* Don't count SET_DEST if it is a REG; otherwise count things
3818 in SET_DEST because if a register is partially modified, it won't
3819 show up as a potential movable so we don't care how USAGE is set
3820 for it. */
3821 if (!REG_P (SET_DEST (x)))
3822 find_single_use_in_loop (regs, insn, SET_DEST (x));
3823 find_single_use_in_loop (regs, insn, SET_SRC (x));
3824 }
3825 else
3826 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3827 {
3828 if (fmt[i] == 'e' && XEXP (x, i) != 0)
3829 find_single_use_in_loop (regs, insn, XEXP (x, i));
3830 else if (fmt[i] == 'E')
3831 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3832 find_single_use_in_loop (regs, insn, XVECEXP (x, i, j));
3833 }
3834 }
3835 \f
3836 /* Count and record any set in X which is contained in INSN. Update
3837 REGS->array[I].MAY_NOT_OPTIMIZE and LAST_SET for any register I set
3838 in X. */
3839
3840 static void
3841 count_one_set (struct loop_regs *regs, rtx insn, rtx x, rtx *last_set)
3842 {
3843 if (GET_CODE (x) == CLOBBER && REG_P (XEXP (x, 0)))
3844 /* Don't move a reg that has an explicit clobber.
3845 It's not worth the pain to try to do it correctly. */
3846 regs->array[REGNO (XEXP (x, 0))].may_not_optimize = 1;
3847
3848 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
3849 {
3850 rtx dest = SET_DEST (x);
3851 while (GET_CODE (dest) == SUBREG
3852 || GET_CODE (dest) == ZERO_EXTRACT
3853 || GET_CODE (dest) == STRICT_LOW_PART)
3854 dest = XEXP (dest, 0);
3855 if (REG_P (dest))
3856 {
3857 int i;
3858 int regno = REGNO (dest);
3859 for (i = 0; i < LOOP_REGNO_NREGS (regno, dest); i++)
3860 {
3861 /* If this is the first setting of this reg
3862 in current basic block, and it was set before,
3863 it must be set in two basic blocks, so it cannot
3864 be moved out of the loop. */
3865 if (regs->array[regno].set_in_loop > 0
3866 && last_set[regno] == 0)
3867 regs->array[regno+i].may_not_optimize = 1;
3868 /* If this is not first setting in current basic block,
3869 see if reg was used in between previous one and this.
3870 If so, neither one can be moved. */
3871 if (last_set[regno] != 0
3872 && reg_used_between_p (dest, last_set[regno], insn))
3873 regs->array[regno+i].may_not_optimize = 1;
3874 if (regs->array[regno+i].set_in_loop < 127)
3875 ++regs->array[regno+i].set_in_loop;
3876 last_set[regno+i] = insn;
3877 }
3878 }
3879 }
3880 }
3881 \f
3882 /* Given a loop that is bounded by LOOP->START and LOOP->END and that
3883 is entered at LOOP->SCAN_START, return 1 if the register set in SET
3884 contained in insn INSN is used by any insn that precedes INSN in
3885 cyclic order starting from the loop entry point.
3886
3887 We don't want to use INSN_LUID here because if we restrict INSN to those
3888 that have a valid INSN_LUID, it means we cannot move an invariant out
3889 from an inner loop past two loops. */
3890
3891 static int
3892 loop_reg_used_before_p (const struct loop *loop, rtx set, rtx insn)
3893 {
3894 rtx reg = SET_DEST (set);
3895 rtx p;
3896
3897 /* Scan forward checking for register usage. If we hit INSN, we
3898 are done. Otherwise, if we hit LOOP->END, wrap around to LOOP->START. */
3899 for (p = loop->scan_start; p != insn; p = NEXT_INSN (p))
3900 {
3901 if (INSN_P (p) && reg_overlap_mentioned_p (reg, PATTERN (p)))
3902 return 1;
3903
3904 if (p == loop->end)
3905 p = loop->start;
3906 }
3907
3908 return 0;
3909 }
3910 \f
3911
3912 /* Information we collect about arrays that we might want to prefetch. */
3913 struct prefetch_info
3914 {
3915 struct iv_class *class; /* Class this prefetch is based on. */
3916 struct induction *giv; /* GIV this prefetch is based on. */
3917 rtx base_address; /* Start prefetching from this address plus
3918 index. */
3919 HOST_WIDE_INT index;
3920 HOST_WIDE_INT stride; /* Prefetch stride in bytes in each
3921 iteration. */
3922 unsigned int bytes_accessed; /* Sum of sizes of all accesses to this
3923 prefetch area in one iteration. */
3924 unsigned int total_bytes; /* Total bytes loop will access in this block.
3925 This is set only for loops with known
3926 iteration counts and is 0xffffffff
3927 otherwise. */
3928 int prefetch_in_loop; /* Number of prefetch insns in loop. */
3929 int prefetch_before_loop; /* Number of prefetch insns before loop. */
3930 unsigned int write : 1; /* 1 for read/write prefetches. */
3931 };
3932
3933 /* Data used by check_store function. */
3934 struct check_store_data
3935 {
3936 rtx mem_address;
3937 int mem_write;
3938 };
3939
3940 static void check_store (rtx, rtx, void *);
3941 static void emit_prefetch_instructions (struct loop *);
3942 static int rtx_equal_for_prefetch_p (rtx, rtx);
3943
3944 /* Set mem_write when mem_address is found. Used as callback to
3945 note_stores. */
3946 static void
3947 check_store (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
3948 {
3949 struct check_store_data *d = (struct check_store_data *) data;
3950
3951 if ((MEM_P (x)) && rtx_equal_p (d->mem_address, XEXP (x, 0)))
3952 d->mem_write = 1;
3953 }
3954 \f
3955 /* Like rtx_equal_p, but attempts to swap commutative operands. This is
3956 important to get some addresses combined. Later more sophisticated
3957 transformations can be added when necessary.
3958
3959 ??? Same trick with swapping operand is done at several other places.
3960 It can be nice to develop some common way to handle this. */
3961
3962 static int
3963 rtx_equal_for_prefetch_p (rtx x, rtx y)
3964 {
3965 int i;
3966 int j;
3967 enum rtx_code code = GET_CODE (x);
3968 const char *fmt;
3969
3970 if (x == y)
3971 return 1;
3972 if (code != GET_CODE (y))
3973 return 0;
3974
3975 if (GET_MODE (x) != GET_MODE (y))
3976 return 0;
3977
3978 switch (code)
3979 {
3980 case PC:
3981 case CC0:
3982 case CONST_INT:
3983 case CONST_DOUBLE:
3984 return 0;
3985
3986 case LABEL_REF:
3987 return XEXP (x, 0) == XEXP (y, 0);
3988
3989 default:
3990 break;
3991 }
3992
3993 if (COMMUTATIVE_ARITH_P (x))
3994 {
3995 return ((rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 0))
3996 && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 1)))
3997 || (rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 1))
3998 && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 0))));
3999 }
4000
4001 /* Compare the elements. If any pair of corresponding elements fails to
4002 match, return 0 for the whole thing. */
4003
4004 fmt = GET_RTX_FORMAT (code);
4005 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4006 {
4007 switch (fmt[i])
4008 {
4009 case 'w':
4010 if (XWINT (x, i) != XWINT (y, i))
4011 return 0;
4012 break;
4013
4014 case 'i':
4015 if (XINT (x, i) != XINT (y, i))
4016 return 0;
4017 break;
4018
4019 case 'E':
4020 /* Two vectors must have the same length. */
4021 if (XVECLEN (x, i) != XVECLEN (y, i))
4022 return 0;
4023
4024 /* And the corresponding elements must match. */
4025 for (j = 0; j < XVECLEN (x, i); j++)
4026 if (rtx_equal_for_prefetch_p (XVECEXP (x, i, j),
4027 XVECEXP (y, i, j)) == 0)
4028 return 0;
4029 break;
4030
4031 case 'e':
4032 if (rtx_equal_for_prefetch_p (XEXP (x, i), XEXP (y, i)) == 0)
4033 return 0;
4034 break;
4035
4036 case 's':
4037 if (strcmp (XSTR (x, i), XSTR (y, i)))
4038 return 0;
4039 break;
4040
4041 case 'u':
4042 /* These are just backpointers, so they don't matter. */
4043 break;
4044
4045 case '0':
4046 break;
4047
4048 /* It is believed that rtx's at this level will never
4049 contain anything but integers and other rtx's,
4050 except for within LABEL_REFs and SYMBOL_REFs. */
4051 default:
4052 gcc_unreachable ();
4053 }
4054 }
4055 return 1;
4056 }
4057 \f
4058 /* Remove constant addition value from the expression X (when present)
4059 and return it. */
4060
4061 static HOST_WIDE_INT
4062 remove_constant_addition (rtx *x)
4063 {
4064 HOST_WIDE_INT addval = 0;
4065 rtx exp = *x;
4066
4067 /* Avoid clobbering a shared CONST expression. */
4068 if (GET_CODE (exp) == CONST)
4069 {
4070 if (GET_CODE (XEXP (exp, 0)) == PLUS
4071 && GET_CODE (XEXP (XEXP (exp, 0), 0)) == SYMBOL_REF
4072 && GET_CODE (XEXP (XEXP (exp, 0), 1)) == CONST_INT)
4073 {
4074 *x = XEXP (XEXP (exp, 0), 0);
4075 return INTVAL (XEXP (XEXP (exp, 0), 1));
4076 }
4077 return 0;
4078 }
4079
4080 if (GET_CODE (exp) == CONST_INT)
4081 {
4082 addval = INTVAL (exp);
4083 *x = const0_rtx;
4084 }
4085
4086 /* For plus expression recurse on ourself. */
4087 else if (GET_CODE (exp) == PLUS)
4088 {
4089 addval += remove_constant_addition (&XEXP (exp, 0));
4090 addval += remove_constant_addition (&XEXP (exp, 1));
4091
4092 /* In case our parameter was constant, remove extra zero from the
4093 expression. */
4094 if (XEXP (exp, 0) == const0_rtx)
4095 *x = XEXP (exp, 1);
4096 else if (XEXP (exp, 1) == const0_rtx)
4097 *x = XEXP (exp, 0);
4098 }
4099
4100 return addval;
4101 }
4102
4103 /* Attempt to identify accesses to arrays that are most likely to cause cache
4104 misses, and emit prefetch instructions a few prefetch blocks forward.
4105
4106 To detect the arrays we use the GIV information that was collected by the
4107 strength reduction pass.
4108
4109 The prefetch instructions are generated after the GIV information is done
4110 and before the strength reduction process. The new GIVs are injected into
4111 the strength reduction tables, so the prefetch addresses are optimized as
4112 well.
4113
4114 GIVs are split into base address, stride, and constant addition values.
4115 GIVs with the same address, stride and close addition values are combined
4116 into a single prefetch. Also writes to GIVs are detected, so that prefetch
4117 for write instructions can be used for the block we write to, on machines
4118 that support write prefetches.
4119
4120 Several heuristics are used to determine when to prefetch. They are
4121 controlled by defined symbols that can be overridden for each target. */
4122
4123 static void
4124 emit_prefetch_instructions (struct loop *loop)
4125 {
4126 int num_prefetches = 0;
4127 int num_real_prefetches = 0;
4128 int num_real_write_prefetches = 0;
4129 int num_prefetches_before = 0;
4130 int num_write_prefetches_before = 0;
4131 int ahead = 0;
4132 int i;
4133 struct iv_class *bl;
4134 struct induction *iv;
4135 struct prefetch_info info[MAX_PREFETCHES];
4136 struct loop_ivs *ivs = LOOP_IVS (loop);
4137
4138 if (!HAVE_prefetch || PREFETCH_BLOCK == 0)
4139 return;
4140
4141 /* Consider only loops w/o calls. When a call is done, the loop is probably
4142 slow enough to read the memory. */
4143 if (PREFETCH_NO_CALL && LOOP_INFO (loop)->has_call)
4144 {
4145 if (loop_dump_stream)
4146 fprintf (loop_dump_stream, "Prefetch: ignoring loop: has call.\n");
4147
4148 return;
4149 }
4150
4151 /* Don't prefetch in loops known to have few iterations. */
4152 if (PREFETCH_NO_LOW_LOOPCNT
4153 && LOOP_INFO (loop)->n_iterations
4154 && LOOP_INFO (loop)->n_iterations <= PREFETCH_LOW_LOOPCNT)
4155 {
4156 if (loop_dump_stream)
4157 fprintf (loop_dump_stream,
4158 "Prefetch: ignoring loop: not enough iterations.\n");
4159 return;
4160 }
4161
4162 /* Search all induction variables and pick those interesting for the prefetch
4163 machinery. */
4164 for (bl = ivs->list; bl; bl = bl->next)
4165 {
4166 struct induction *biv = bl->biv, *biv1;
4167 int basestride = 0;
4168
4169 biv1 = biv;
4170
4171 /* Expect all BIVs to be executed in each iteration. This makes our
4172 analysis more conservative. */
4173 while (biv1)
4174 {
4175 /* Discard non-constant additions that we can't handle well yet, and
4176 BIVs that are executed multiple times; such BIVs ought to be
4177 handled in the nested loop. We accept not_every_iteration BIVs,
4178 since these only result in larger strides and make our
4179 heuristics more conservative. */
4180 if (GET_CODE (biv->add_val) != CONST_INT)
4181 {
4182 if (loop_dump_stream)
4183 {
4184 fprintf (loop_dump_stream,
4185 "Prefetch: ignoring biv %d: non-constant addition at insn %d:",
4186 REGNO (biv->src_reg), INSN_UID (biv->insn));
4187 print_rtl (loop_dump_stream, biv->add_val);
4188 fprintf (loop_dump_stream, "\n");
4189 }
4190 break;
4191 }
4192
4193 if (biv->maybe_multiple)
4194 {
4195 if (loop_dump_stream)
4196 {
4197 fprintf (loop_dump_stream,
4198 "Prefetch: ignoring biv %d: maybe_multiple at insn %i:",
4199 REGNO (biv->src_reg), INSN_UID (biv->insn));
4200 print_rtl (loop_dump_stream, biv->add_val);
4201 fprintf (loop_dump_stream, "\n");
4202 }
4203 break;
4204 }
4205
4206 basestride += INTVAL (biv1->add_val);
4207 biv1 = biv1->next_iv;
4208 }
4209
4210 if (biv1 || !basestride)
4211 continue;
4212
4213 for (iv = bl->giv; iv; iv = iv->next_iv)
4214 {
4215 rtx address;
4216 rtx temp;
4217 HOST_WIDE_INT index = 0;
4218 int add = 1;
4219 HOST_WIDE_INT stride = 0;
4220 int stride_sign = 1;
4221 struct check_store_data d;
4222 const char *ignore_reason = NULL;
4223 int size = GET_MODE_SIZE (GET_MODE (iv));
4224
4225 /* See whether an induction variable is interesting to us and if
4226 not, report the reason. */
4227 if (iv->giv_type != DEST_ADDR)
4228 ignore_reason = "giv is not a destination address";
4229
4230 /* We are interested only in constant stride memory references
4231 in order to be able to compute density easily. */
4232 else if (GET_CODE (iv->mult_val) != CONST_INT)
4233 ignore_reason = "stride is not constant";
4234
4235 else
4236 {
4237 stride = INTVAL (iv->mult_val) * basestride;
4238 if (stride < 0)
4239 {
4240 stride = -stride;
4241 stride_sign = -1;
4242 }
4243
4244 /* On some targets, reversed order prefetches are not
4245 worthwhile. */
4246 if (PREFETCH_NO_REVERSE_ORDER && stride_sign < 0)
4247 ignore_reason = "reversed order stride";
4248
4249 /* Prefetch of accesses with an extreme stride might not be
4250 worthwhile, either. */
4251 else if (PREFETCH_NO_EXTREME_STRIDE
4252 && stride > PREFETCH_EXTREME_STRIDE)
4253 ignore_reason = "extreme stride";
4254
4255 /* Ignore GIVs with varying add values; we can't predict the
4256 value for the next iteration. */
4257 else if (!loop_invariant_p (loop, iv->add_val))
4258 ignore_reason = "giv has varying add value";
4259
4260 /* Ignore GIVs in the nested loops; they ought to have been
4261 handled already. */
4262 else if (iv->maybe_multiple)
4263 ignore_reason = "giv is in nested loop";
4264 }
4265
4266 if (ignore_reason != NULL)
4267 {
4268 if (loop_dump_stream)
4269 fprintf (loop_dump_stream,
4270 "Prefetch: ignoring giv at %d: %s.\n",
4271 INSN_UID (iv->insn), ignore_reason);
4272 continue;
4273 }
4274
4275 /* Determine the pointer to the basic array we are examining. It is
4276 the sum of the BIV's initial value and the GIV's add_val. */
4277 address = copy_rtx (iv->add_val);
4278 temp = copy_rtx (bl->initial_value);
4279
4280 address = simplify_gen_binary (PLUS, Pmode, temp, address);
4281 index = remove_constant_addition (&address);
4282
4283 d.mem_write = 0;
4284 d.mem_address = *iv->location;
4285
4286 /* When the GIV is not always executed, we might be better off by
4287 not dirtying the cache pages. */
4288 if (PREFETCH_CONDITIONAL || iv->always_executed)
4289 note_stores (PATTERN (iv->insn), check_store, &d);
4290 else
4291 {
4292 if (loop_dump_stream)
4293 fprintf (loop_dump_stream, "Prefetch: Ignoring giv at %d: %s\n",
4294 INSN_UID (iv->insn), "in conditional code.");
4295 continue;
4296 }
4297
4298 /* Attempt to find another prefetch to the same array and see if we
4299 can merge this one. */
4300 for (i = 0; i < num_prefetches; i++)
4301 if (rtx_equal_for_prefetch_p (address, info[i].base_address)
4302 && stride == info[i].stride)
4303 {
4304 /* In case both access same array (same location
4305 just with small difference in constant indexes), merge
4306 the prefetches. Just do the later and the earlier will
4307 get prefetched from previous iteration.
4308 The artificial threshold should not be too small,
4309 but also not bigger than small portion of memory usually
4310 traversed by single loop. */
4311 if (index >= info[i].index
4312 && index - info[i].index < PREFETCH_EXTREME_DIFFERENCE)
4313 {
4314 info[i].write |= d.mem_write;
4315 info[i].bytes_accessed += size;
4316 info[i].index = index;
4317 info[i].giv = iv;
4318 info[i].class = bl;
4319 info[num_prefetches].base_address = address;
4320 add = 0;
4321 break;
4322 }
4323
4324 if (index < info[i].index
4325 && info[i].index - index < PREFETCH_EXTREME_DIFFERENCE)
4326 {
4327 info[i].write |= d.mem_write;
4328 info[i].bytes_accessed += size;
4329 add = 0;
4330 break;
4331 }
4332 }
4333
4334 /* Merging failed. */
4335 if (add)
4336 {
4337 info[num_prefetches].giv = iv;
4338 info[num_prefetches].class = bl;
4339 info[num_prefetches].index = index;
4340 info[num_prefetches].stride = stride;
4341 info[num_prefetches].base_address = address;
4342 info[num_prefetches].write = d.mem_write;
4343 info[num_prefetches].bytes_accessed = size;
4344 num_prefetches++;
4345 if (num_prefetches >= MAX_PREFETCHES)
4346 {
4347 if (loop_dump_stream)
4348 fprintf (loop_dump_stream,
4349 "Maximal number of prefetches exceeded.\n");
4350 return;
4351 }
4352 }
4353 }
4354 }
4355
4356 for (i = 0; i < num_prefetches; i++)
4357 {
4358 int density;
4359
4360 /* Attempt to calculate the total number of bytes fetched by all
4361 iterations of the loop. Avoid overflow. */
4362 if (LOOP_INFO (loop)->n_iterations
4363 && ((unsigned HOST_WIDE_INT) (0xffffffff / info[i].stride)
4364 >= LOOP_INFO (loop)->n_iterations))
4365 info[i].total_bytes = info[i].stride * LOOP_INFO (loop)->n_iterations;
4366 else
4367 info[i].total_bytes = 0xffffffff;
4368
4369 density = info[i].bytes_accessed * 100 / info[i].stride;
4370
4371 /* Prefetch might be worthwhile only when the loads/stores are dense. */
4372 if (PREFETCH_ONLY_DENSE_MEM)
4373 if (density * 256 > PREFETCH_DENSE_MEM * 100
4374 && (info[i].total_bytes / PREFETCH_BLOCK
4375 >= PREFETCH_BLOCKS_BEFORE_LOOP_MIN))
4376 {
4377 info[i].prefetch_before_loop = 1;
4378 info[i].prefetch_in_loop
4379 = (info[i].total_bytes / PREFETCH_BLOCK
4380 > PREFETCH_BLOCKS_BEFORE_LOOP_MAX);
4381 }
4382 else
4383 {
4384 info[i].prefetch_in_loop = 0, info[i].prefetch_before_loop = 0;
4385 if (loop_dump_stream)
4386 fprintf (loop_dump_stream,
4387 "Prefetch: ignoring giv at %d: %d%% density is too low.\n",
4388 INSN_UID (info[i].giv->insn), density);
4389 }
4390 else
4391 info[i].prefetch_in_loop = 1, info[i].prefetch_before_loop = 1;
4392
4393 /* Find how many prefetch instructions we'll use within the loop. */
4394 if (info[i].prefetch_in_loop != 0)
4395 {
4396 info[i].prefetch_in_loop = ((info[i].stride + PREFETCH_BLOCK - 1)
4397 / PREFETCH_BLOCK);
4398 num_real_prefetches += info[i].prefetch_in_loop;
4399 if (info[i].write)
4400 num_real_write_prefetches += info[i].prefetch_in_loop;
4401 }
4402 }
4403
4404 /* Determine how many iterations ahead to prefetch within the loop, based
4405 on how many prefetches we currently expect to do within the loop. */
4406 if (num_real_prefetches != 0)
4407 {
4408 if ((ahead = SIMULTANEOUS_PREFETCHES / num_real_prefetches) == 0)
4409 {
4410 if (loop_dump_stream)
4411 fprintf (loop_dump_stream,
4412 "Prefetch: ignoring prefetches within loop: ahead is zero; %d < %d\n",
4413 SIMULTANEOUS_PREFETCHES, num_real_prefetches);
4414 num_real_prefetches = 0, num_real_write_prefetches = 0;
4415 }
4416 }
4417 /* We'll also use AHEAD to determine how many prefetch instructions to
4418 emit before a loop, so don't leave it zero. */
4419 if (ahead == 0)
4420 ahead = PREFETCH_BLOCKS_BEFORE_LOOP_MAX;
4421
4422 for (i = 0; i < num_prefetches; i++)
4423 {
4424 /* Update if we've decided not to prefetch anything within the loop. */
4425 if (num_real_prefetches == 0)
4426 info[i].prefetch_in_loop = 0;
4427
4428 /* Find how many prefetch instructions we'll use before the loop. */
4429 if (info[i].prefetch_before_loop != 0)
4430 {
4431 int n = info[i].total_bytes / PREFETCH_BLOCK;
4432 if (n > ahead)
4433 n = ahead;
4434 info[i].prefetch_before_loop = n;
4435 num_prefetches_before += n;
4436 if (info[i].write)
4437 num_write_prefetches_before += n;
4438 }
4439
4440 if (loop_dump_stream)
4441 {
4442 if (info[i].prefetch_in_loop == 0
4443 && info[i].prefetch_before_loop == 0)
4444 continue;
4445 fprintf (loop_dump_stream, "Prefetch insn: %d",
4446 INSN_UID (info[i].giv->insn));
4447 fprintf (loop_dump_stream,
4448 "; in loop: %d; before: %d; %s\n",
4449 info[i].prefetch_in_loop,
4450 info[i].prefetch_before_loop,
4451 info[i].write ? "read/write" : "read only");
4452 fprintf (loop_dump_stream,
4453 " density: %d%%; bytes_accessed: %u; total_bytes: %u\n",
4454 (int) (info[i].bytes_accessed * 100 / info[i].stride),
4455 info[i].bytes_accessed, info[i].total_bytes);
4456 fprintf (loop_dump_stream, " index: " HOST_WIDE_INT_PRINT_DEC
4457 "; stride: " HOST_WIDE_INT_PRINT_DEC "; address: ",
4458 info[i].index, info[i].stride);
4459 print_rtl (loop_dump_stream, info[i].base_address);
4460 fprintf (loop_dump_stream, "\n");
4461 }
4462 }
4463
4464 if (num_real_prefetches + num_prefetches_before > 0)
4465 {
4466 /* Record that this loop uses prefetch instructions. */
4467 LOOP_INFO (loop)->has_prefetch = 1;
4468
4469 if (loop_dump_stream)
4470 {
4471 fprintf (loop_dump_stream, "Real prefetches needed within loop: %d (write: %d)\n",
4472 num_real_prefetches, num_real_write_prefetches);
4473 fprintf (loop_dump_stream, "Real prefetches needed before loop: %d (write: %d)\n",
4474 num_prefetches_before, num_write_prefetches_before);
4475 }
4476 }
4477
4478 for (i = 0; i < num_prefetches; i++)
4479 {
4480 int y;
4481
4482 for (y = 0; y < info[i].prefetch_in_loop; y++)
4483 {
4484 rtx loc = copy_rtx (*info[i].giv->location);
4485 rtx insn;
4486 int bytes_ahead = PREFETCH_BLOCK * (ahead + y);
4487 rtx before_insn = info[i].giv->insn;
4488 rtx prev_insn = PREV_INSN (info[i].giv->insn);
4489 rtx seq;
4490
4491 /* We can save some effort by offsetting the address on
4492 architectures with offsettable memory references. */
4493 if (offsettable_address_p (0, VOIDmode, loc))
4494 loc = plus_constant (loc, bytes_ahead);
4495 else
4496 {
4497 rtx reg = gen_reg_rtx (Pmode);
4498 loop_iv_add_mult_emit_before (loop, loc, const1_rtx,
4499 GEN_INT (bytes_ahead), reg,
4500 0, before_insn);
4501 loc = reg;
4502 }
4503
4504 start_sequence ();
4505 /* Make sure the address operand is valid for prefetch. */
4506 if (! (*insn_data[(int)CODE_FOR_prefetch].operand[0].predicate)
4507 (loc, insn_data[(int)CODE_FOR_prefetch].operand[0].mode))
4508 loc = force_reg (Pmode, loc);
4509 emit_insn (gen_prefetch (loc, GEN_INT (info[i].write),
4510 GEN_INT (3)));
4511 seq = get_insns ();
4512 end_sequence ();
4513 emit_insn_before (seq, before_insn);
4514
4515 /* Check all insns emitted and record the new GIV
4516 information. */
4517 insn = NEXT_INSN (prev_insn);
4518 while (insn != before_insn)
4519 {
4520 insn = check_insn_for_givs (loop, insn,
4521 info[i].giv->always_executed,
4522 info[i].giv->maybe_multiple);
4523 insn = NEXT_INSN (insn);
4524 }
4525 }
4526
4527 if (PREFETCH_BEFORE_LOOP)
4528 {
4529 /* Emit insns before the loop to fetch the first cache lines or,
4530 if we're not prefetching within the loop, everything we expect
4531 to need. */
4532 for (y = 0; y < info[i].prefetch_before_loop; y++)
4533 {
4534 rtx reg = gen_reg_rtx (Pmode);
4535 rtx loop_start = loop->start;
4536 rtx init_val = info[i].class->initial_value;
4537 rtx add_val = simplify_gen_binary (PLUS, Pmode,
4538 info[i].giv->add_val,
4539 GEN_INT (y * PREFETCH_BLOCK));
4540
4541 /* Functions called by LOOP_IV_ADD_EMIT_BEFORE expect a
4542 non-constant INIT_VAL to have the same mode as REG, which
4543 in this case we know to be Pmode. */
4544 if (GET_MODE (init_val) != Pmode && !CONSTANT_P (init_val))
4545 {
4546 rtx seq;
4547
4548 start_sequence ();
4549 init_val = convert_to_mode (Pmode, init_val, 0);
4550 seq = get_insns ();
4551 end_sequence ();
4552 loop_insn_emit_before (loop, 0, loop_start, seq);
4553 }
4554 loop_iv_add_mult_emit_before (loop, init_val,
4555 info[i].giv->mult_val,
4556 add_val, reg, 0, loop_start);
4557 emit_insn_before (gen_prefetch (reg, GEN_INT (info[i].write),
4558 GEN_INT (3)),
4559 loop_start);
4560 }
4561 }
4562 }
4563
4564 return;
4565 }
4566 \f
4567 /* Communication with routines called via `note_stores'. */
4568
4569 static rtx note_insn;
4570
4571 /* Dummy register to have nonzero DEST_REG for DEST_ADDR type givs. */
4572
4573 static rtx addr_placeholder;
4574
4575 /* ??? Unfinished optimizations, and possible future optimizations,
4576 for the strength reduction code. */
4577
4578 /* ??? The interaction of biv elimination, and recognition of 'constant'
4579 bivs, may cause problems. */
4580
4581 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
4582 performance problems.
4583
4584 Perhaps don't eliminate things that can be combined with an addressing
4585 mode. Find all givs that have the same biv, mult_val, and add_val;
4586 then for each giv, check to see if its only use dies in a following
4587 memory address. If so, generate a new memory address and check to see
4588 if it is valid. If it is valid, then store the modified memory address,
4589 otherwise, mark the giv as not done so that it will get its own iv. */
4590
4591 /* ??? Could try to optimize branches when it is known that a biv is always
4592 positive. */
4593
4594 /* ??? When replace a biv in a compare insn, we should replace with closest
4595 giv so that an optimized branch can still be recognized by the combiner,
4596 e.g. the VAX acb insn. */
4597
4598 /* ??? Many of the checks involving uid_luid could be simplified if regscan
4599 was rerun in loop_optimize whenever a register was added or moved.
4600 Also, some of the optimizations could be a little less conservative. */
4601 \f
4602 /* Searches the insns between INSN and LOOP->END. Returns 1 if there
4603 is a backward branch in that range that branches to somewhere between
4604 LOOP->START and INSN. Returns 0 otherwise. */
4605
4606 /* ??? This is quadratic algorithm. Could be rewritten to be linear.
4607 In practice, this is not a problem, because this function is seldom called,
4608 and uses a negligible amount of CPU time on average. */
4609
4610 static int
4611 back_branch_in_range_p (const struct loop *loop, rtx insn)
4612 {
4613 rtx p, q, target_insn;
4614 rtx loop_start = loop->start;
4615 rtx loop_end = loop->end;
4616 rtx orig_loop_end = loop->end;
4617
4618 /* Stop before we get to the backward branch at the end of the loop. */
4619 loop_end = prev_nonnote_insn (loop_end);
4620 if (BARRIER_P (loop_end))
4621 loop_end = PREV_INSN (loop_end);
4622
4623 /* Check in case insn has been deleted, search forward for first non
4624 deleted insn following it. */
4625 while (INSN_DELETED_P (insn))
4626 insn = NEXT_INSN (insn);
4627
4628 /* Check for the case where insn is the last insn in the loop. Deal
4629 with the case where INSN was a deleted loop test insn, in which case
4630 it will now be the NOTE_LOOP_END. */
4631 if (insn == loop_end || insn == orig_loop_end)
4632 return 0;
4633
4634 for (p = NEXT_INSN (insn); p != loop_end; p = NEXT_INSN (p))
4635 {
4636 if (JUMP_P (p))
4637 {
4638 target_insn = JUMP_LABEL (p);
4639
4640 /* Search from loop_start to insn, to see if one of them is
4641 the target_insn. We can't use INSN_LUID comparisons here,
4642 since insn may not have an LUID entry. */
4643 for (q = loop_start; q != insn; q = NEXT_INSN (q))
4644 if (q == target_insn)
4645 return 1;
4646 }
4647 }
4648
4649 return 0;
4650 }
4651
4652 /* Scan the loop body and call FNCALL for each insn. In the addition to the
4653 LOOP and INSN parameters pass MAYBE_MULTIPLE and NOT_EVERY_ITERATION to the
4654 callback.
4655
4656 NOT_EVERY_ITERATION is 1 if current insn is not known to be executed at
4657 least once for every loop iteration except for the last one.
4658
4659 MAYBE_MULTIPLE is 1 if current insn may be executed more than once for every
4660 loop iteration.
4661 */
4662 typedef rtx (*loop_insn_callback) (struct loop *, rtx, int, int);
4663 static void
4664 for_each_insn_in_loop (struct loop *loop, loop_insn_callback fncall)
4665 {
4666 int not_every_iteration = 0;
4667 int maybe_multiple = 0;
4668 int past_loop_latch = 0;
4669 bool exit_test_is_entry = false;
4670 rtx p;
4671
4672 /* If loop_scan_start points to the loop exit test, the loop body
4673 cannot be counted on running on every iteration, and we have to
4674 be wary of subversive use of gotos inside expression
4675 statements. */
4676 if (prev_nonnote_insn (loop->scan_start) != prev_nonnote_insn (loop->start))
4677 {
4678 exit_test_is_entry = true;
4679 maybe_multiple = back_branch_in_range_p (loop, loop->scan_start);
4680 }
4681
4682 /* Scan through loop and update NOT_EVERY_ITERATION and MAYBE_MULTIPLE. */
4683 for (p = next_insn_in_loop (loop, loop->scan_start);
4684 p != NULL_RTX;
4685 p = next_insn_in_loop (loop, p))
4686 {
4687 p = fncall (loop, p, not_every_iteration, maybe_multiple);
4688
4689 /* Past CODE_LABEL, we get to insns that may be executed multiple
4690 times. The only way we can be sure that they can't is if every
4691 jump insn between here and the end of the loop either
4692 returns, exits the loop, is a jump to a location that is still
4693 behind the label, or is a jump to the loop start. */
4694
4695 if (LABEL_P (p))
4696 {
4697 rtx insn = p;
4698
4699 maybe_multiple = 0;
4700
4701 while (1)
4702 {
4703 insn = NEXT_INSN (insn);
4704 if (insn == loop->scan_start)
4705 break;
4706 if (insn == loop->end)
4707 {
4708 if (loop->top != 0)
4709 insn = loop->top;
4710 else
4711 break;
4712 if (insn == loop->scan_start)
4713 break;
4714 }
4715
4716 if (JUMP_P (insn)
4717 && GET_CODE (PATTERN (insn)) != RETURN
4718 && (!any_condjump_p (insn)
4719 || (JUMP_LABEL (insn) != 0
4720 && JUMP_LABEL (insn) != loop->scan_start
4721 && !loop_insn_first_p (p, JUMP_LABEL (insn)))))
4722 {
4723 maybe_multiple = 1;
4724 break;
4725 }
4726 }
4727 }
4728
4729 /* Past a jump, we get to insns for which we can't count
4730 on whether they will be executed during each iteration. */
4731 /* This code appears twice in strength_reduce. There is also similar
4732 code in scan_loop. */
4733 if (JUMP_P (p)
4734 /* If we enter the loop in the middle, and scan around to the
4735 beginning, don't set not_every_iteration for that.
4736 This can be any kind of jump, since we want to know if insns
4737 will be executed if the loop is executed. */
4738 && (exit_test_is_entry
4739 || !(JUMP_LABEL (p) == loop->top
4740 && ((NEXT_INSN (NEXT_INSN (p)) == loop->end
4741 && any_uncondjump_p (p))
4742 || (NEXT_INSN (p) == loop->end
4743 && any_condjump_p (p))))))
4744 {
4745 rtx label = 0;
4746
4747 /* If this is a jump outside the loop, then it also doesn't
4748 matter. Check to see if the target of this branch is on the
4749 loop->exits_labels list. */
4750
4751 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
4752 if (XEXP (label, 0) == JUMP_LABEL (p))
4753 break;
4754
4755 if (!label)
4756 not_every_iteration = 1;
4757 }
4758
4759 /* Note if we pass a loop latch. If we do, then we can not clear
4760 NOT_EVERY_ITERATION below when we pass the last CODE_LABEL in
4761 a loop since a jump before the last CODE_LABEL may have started
4762 a new loop iteration.
4763
4764 Note that LOOP_TOP is only set for rotated loops and we need
4765 this check for all loops, so compare against the CODE_LABEL
4766 which immediately follows LOOP_START. */
4767 if (JUMP_P (p)
4768 && JUMP_LABEL (p) == NEXT_INSN (loop->start))
4769 past_loop_latch = 1;
4770
4771 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
4772 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
4773 or not an insn is known to be executed each iteration of the
4774 loop, whether or not any iterations are known to occur.
4775
4776 Therefore, if we have just passed a label and have no more labels
4777 between here and the test insn of the loop, and we have not passed
4778 a jump to the top of the loop, then we know these insns will be
4779 executed each iteration. */
4780
4781 if (not_every_iteration
4782 && !past_loop_latch
4783 && LABEL_P (p)
4784 && no_labels_between_p (p, loop->end))
4785 not_every_iteration = 0;
4786 }
4787 }
4788 \f
4789 static void
4790 loop_bivs_find (struct loop *loop)
4791 {
4792 struct loop_regs *regs = LOOP_REGS (loop);
4793 struct loop_ivs *ivs = LOOP_IVS (loop);
4794 /* Temporary list pointers for traversing ivs->list. */
4795 struct iv_class *bl, **backbl;
4796
4797 ivs->list = 0;
4798
4799 for_each_insn_in_loop (loop, check_insn_for_bivs);
4800
4801 /* Scan ivs->list to remove all regs that proved not to be bivs.
4802 Make a sanity check against regs->n_times_set. */
4803 for (backbl = &ivs->list, bl = *backbl; bl; bl = bl->next)
4804 {
4805 if (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
4806 /* Above happens if register modified by subreg, etc. */
4807 /* Make sure it is not recognized as a basic induction var: */
4808 || regs->array[bl->regno].n_times_set != bl->biv_count
4809 /* If never incremented, it is invariant that we decided not to
4810 move. So leave it alone. */
4811 || ! bl->incremented)
4812 {
4813 if (loop_dump_stream)
4814 fprintf (loop_dump_stream, "Biv %d: discarded, %s\n",
4815 bl->regno,
4816 (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
4817 ? "not induction variable"
4818 : (! bl->incremented ? "never incremented"
4819 : "count error")));
4820
4821 REG_IV_TYPE (ivs, bl->regno) = NOT_BASIC_INDUCT;
4822 *backbl = bl->next;
4823 }
4824 else
4825 {
4826 backbl = &bl->next;
4827
4828 if (loop_dump_stream)
4829 fprintf (loop_dump_stream, "Biv %d: verified\n", bl->regno);
4830 }
4831 }
4832 }
4833
4834
4835 /* Determine how BIVS are initialized by looking through pre-header
4836 extended basic block. */
4837 static void
4838 loop_bivs_init_find (struct loop *loop)
4839 {
4840 struct loop_ivs *ivs = LOOP_IVS (loop);
4841 /* Temporary list pointers for traversing ivs->list. */
4842 struct iv_class *bl;
4843 int call_seen;
4844 rtx p;
4845
4846 /* Find initial value for each biv by searching backwards from loop_start,
4847 halting at first label. Also record any test condition. */
4848
4849 call_seen = 0;
4850 for (p = loop->start; p && !LABEL_P (p); p = PREV_INSN (p))
4851 {
4852 rtx test;
4853
4854 note_insn = p;
4855
4856 if (CALL_P (p))
4857 call_seen = 1;
4858
4859 if (INSN_P (p))
4860 note_stores (PATTERN (p), record_initial, ivs);
4861
4862 /* Record any test of a biv that branches around the loop if no store
4863 between it and the start of loop. We only care about tests with
4864 constants and registers and only certain of those. */
4865 if (JUMP_P (p)
4866 && JUMP_LABEL (p) != 0
4867 && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop->end)
4868 && (test = get_condition_for_loop (loop, p)) != 0
4869 && REG_P (XEXP (test, 0))
4870 && REGNO (XEXP (test, 0)) < max_reg_before_loop
4871 && (bl = REG_IV_CLASS (ivs, REGNO (XEXP (test, 0)))) != 0
4872 && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop->start)
4873 && bl->init_insn == 0)
4874 {
4875 /* If an NE test, we have an initial value! */
4876 if (GET_CODE (test) == NE)
4877 {
4878 bl->init_insn = p;
4879 bl->init_set = gen_rtx_SET (VOIDmode,
4880 XEXP (test, 0), XEXP (test, 1));
4881 }
4882 else
4883 bl->initial_test = test;
4884 }
4885 }
4886 }
4887
4888
4889 /* Look at the each biv and see if we can say anything better about its
4890 initial value from any initializing insns set up above. (This is done
4891 in two passes to avoid missing SETs in a PARALLEL.) */
4892 static void
4893 loop_bivs_check (struct loop *loop)
4894 {
4895 struct loop_ivs *ivs = LOOP_IVS (loop);
4896 /* Temporary list pointers for traversing ivs->list. */
4897 struct iv_class *bl;
4898 struct iv_class **backbl;
4899
4900 for (backbl = &ivs->list; (bl = *backbl); backbl = &bl->next)
4901 {
4902 rtx src;
4903 rtx note;
4904
4905 if (! bl->init_insn)
4906 continue;
4907
4908 /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value
4909 is a constant, use the value of that. */
4910 if (((note = find_reg_note (bl->init_insn, REG_EQUAL, 0)) != NULL
4911 && CONSTANT_P (XEXP (note, 0)))
4912 || ((note = find_reg_note (bl->init_insn, REG_EQUIV, 0)) != NULL
4913 && CONSTANT_P (XEXP (note, 0))))
4914 src = XEXP (note, 0);
4915 else
4916 src = SET_SRC (bl->init_set);
4917
4918 if (loop_dump_stream)
4919 fprintf (loop_dump_stream,
4920 "Biv %d: initialized at insn %d: initial value ",
4921 bl->regno, INSN_UID (bl->init_insn));
4922
4923 if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno])
4924 || GET_MODE (src) == VOIDmode)
4925 && valid_initial_value_p (src, bl->init_insn,
4926 LOOP_INFO (loop)->pre_header_has_call,
4927 loop->start))
4928 {
4929 bl->initial_value = src;
4930
4931 if (loop_dump_stream)
4932 {
4933 print_simple_rtl (loop_dump_stream, src);
4934 fputc ('\n', loop_dump_stream);
4935 }
4936 }
4937 /* If we can't make it a giv,
4938 let biv keep initial value of "itself". */
4939 else if (loop_dump_stream)
4940 fprintf (loop_dump_stream, "is complex\n");
4941 }
4942 }
4943
4944
4945 /* Search the loop for general induction variables. */
4946
4947 static void
4948 loop_givs_find (struct loop* loop)
4949 {
4950 for_each_insn_in_loop (loop, check_insn_for_givs);
4951 }
4952
4953
4954 /* For each giv for which we still don't know whether or not it is
4955 replaceable, check to see if it is replaceable because its final value
4956 can be calculated. */
4957
4958 static void
4959 loop_givs_check (struct loop *loop)
4960 {
4961 struct loop_ivs *ivs = LOOP_IVS (loop);
4962 struct iv_class *bl;
4963
4964 for (bl = ivs->list; bl; bl = bl->next)
4965 {
4966 struct induction *v;
4967
4968 for (v = bl->giv; v; v = v->next_iv)
4969 if (! v->replaceable && ! v->not_replaceable)
4970 check_final_value (loop, v);
4971 }
4972 }
4973
4974 /* Try to generate the simplest rtx for the expression
4975 (PLUS (MULT mult1 mult2) add1). This is used to calculate the initial
4976 value of giv's. */
4977
4978 static rtx
4979 fold_rtx_mult_add (rtx mult1, rtx mult2, rtx add1, enum machine_mode mode)
4980 {
4981 rtx temp, mult_res;
4982 rtx result;
4983
4984 /* The modes must all be the same. This should always be true. For now,
4985 check to make sure. */
4986 gcc_assert (GET_MODE (mult1) == mode || GET_MODE (mult1) == VOIDmode);
4987 gcc_assert (GET_MODE (mult2) == mode || GET_MODE (mult2) == VOIDmode);
4988 gcc_assert (GET_MODE (add1) == mode || GET_MODE (add1) == VOIDmode);
4989
4990 /* Ensure that if at least one of mult1/mult2 are constant, then mult2
4991 will be a constant. */
4992 if (GET_CODE (mult1) == CONST_INT)
4993 {
4994 temp = mult2;
4995 mult2 = mult1;
4996 mult1 = temp;
4997 }
4998
4999 mult_res = simplify_binary_operation (MULT, mode, mult1, mult2);
5000 if (! mult_res)
5001 mult_res = gen_rtx_MULT (mode, mult1, mult2);
5002
5003 /* Again, put the constant second. */
5004 if (GET_CODE (add1) == CONST_INT)
5005 {
5006 temp = add1;
5007 add1 = mult_res;
5008 mult_res = temp;
5009 }
5010
5011 result = simplify_binary_operation (PLUS, mode, add1, mult_res);
5012 if (! result)
5013 result = gen_rtx_PLUS (mode, add1, mult_res);
5014
5015 return result;
5016 }
5017
5018 /* Searches the list of induction struct's for the biv BL, to try to calculate
5019 the total increment value for one iteration of the loop as a constant.
5020
5021 Returns the increment value as an rtx, simplified as much as possible,
5022 if it can be calculated. Otherwise, returns 0. */
5023
5024 static rtx
5025 biv_total_increment (const struct iv_class *bl)
5026 {
5027 struct induction *v;
5028 rtx result;
5029
5030 /* For increment, must check every instruction that sets it. Each
5031 instruction must be executed only once each time through the loop.
5032 To verify this, we check that the insn is always executed, and that
5033 there are no backward branches after the insn that branch to before it.
5034 Also, the insn must have a mult_val of one (to make sure it really is
5035 an increment). */
5036
5037 result = const0_rtx;
5038 for (v = bl->biv; v; v = v->next_iv)
5039 {
5040 if (v->always_computable && v->mult_val == const1_rtx
5041 && ! v->maybe_multiple
5042 && SCALAR_INT_MODE_P (v->mode))
5043 {
5044 /* If we have already counted it, skip it. */
5045 if (v->same)
5046 continue;
5047
5048 result = fold_rtx_mult_add (result, const1_rtx, v->add_val, v->mode);
5049 }
5050 else
5051 return 0;
5052 }
5053
5054 return result;
5055 }
5056
5057 /* Try to prove that the register is dead after the loop exits. Trace every
5058 loop exit looking for an insn that will always be executed, which sets
5059 the register to some value, and appears before the first use of the register
5060 is found. If successful, then return 1, otherwise return 0. */
5061
5062 /* ?? Could be made more intelligent in the handling of jumps, so that
5063 it can search past if statements and other similar structures. */
5064
5065 static int
5066 reg_dead_after_loop (const struct loop *loop, rtx reg)
5067 {
5068 rtx insn, label;
5069 int jump_count = 0;
5070 int label_count = 0;
5071
5072 /* In addition to checking all exits of this loop, we must also check
5073 all exits of inner nested loops that would exit this loop. We don't
5074 have any way to identify those, so we just give up if there are any
5075 such inner loop exits. */
5076
5077 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
5078 label_count++;
5079
5080 if (label_count != loop->exit_count)
5081 return 0;
5082
5083 /* HACK: Must also search the loop fall through exit, create a label_ref
5084 here which points to the loop->end, and append the loop_number_exit_labels
5085 list to it. */
5086 label = gen_rtx_LABEL_REF (Pmode, loop->end);
5087 LABEL_NEXTREF (label) = loop->exit_labels;
5088
5089 for (; label; label = LABEL_NEXTREF (label))
5090 {
5091 /* Succeed if find an insn which sets the biv or if reach end of
5092 function. Fail if find an insn that uses the biv, or if come to
5093 a conditional jump. */
5094
5095 insn = NEXT_INSN (XEXP (label, 0));
5096 while (insn)
5097 {
5098 if (INSN_P (insn))
5099 {
5100 rtx set, note;
5101
5102 if (reg_referenced_p (reg, PATTERN (insn)))
5103 return 0;
5104
5105 note = find_reg_equal_equiv_note (insn);
5106 if (note && reg_overlap_mentioned_p (reg, XEXP (note, 0)))
5107 return 0;
5108
5109 set = single_set (insn);
5110 if (set && rtx_equal_p (SET_DEST (set), reg))
5111 break;
5112
5113 if (JUMP_P (insn))
5114 {
5115 if (GET_CODE (PATTERN (insn)) == RETURN)
5116 break;
5117 else if (!any_uncondjump_p (insn)
5118 /* Prevent infinite loop following infinite loops. */
5119 || jump_count++ > 20)
5120 return 0;
5121 else
5122 insn = JUMP_LABEL (insn);
5123 }
5124 }
5125
5126 insn = NEXT_INSN (insn);
5127 }
5128 }
5129
5130 /* Success, the register is dead on all loop exits. */
5131 return 1;
5132 }
5133
5134 /* Try to calculate the final value of the biv, the value it will have at
5135 the end of the loop. If we can do it, return that value. */
5136
5137 static rtx
5138 final_biv_value (const struct loop *loop, struct iv_class *bl)
5139 {
5140 unsigned HOST_WIDE_INT n_iterations = LOOP_INFO (loop)->n_iterations;
5141 rtx increment, tem;
5142
5143 /* ??? This only works for MODE_INT biv's. Reject all others for now. */
5144
5145 if (GET_MODE_CLASS (bl->biv->mode) != MODE_INT)
5146 return 0;
5147
5148 /* The final value for reversed bivs must be calculated differently than
5149 for ordinary bivs. In this case, there is already an insn after the
5150 loop which sets this biv's final value (if necessary), and there are
5151 no other loop exits, so we can return any value. */
5152 if (bl->reversed)
5153 {
5154 if (loop_dump_stream)
5155 fprintf (loop_dump_stream,
5156 "Final biv value for %d, reversed biv.\n", bl->regno);
5157
5158 return const0_rtx;
5159 }
5160
5161 /* Try to calculate the final value as initial value + (number of iterations
5162 * increment). For this to work, increment must be invariant, the only
5163 exit from the loop must be the fall through at the bottom (otherwise
5164 it may not have its final value when the loop exits), and the initial
5165 value of the biv must be invariant. */
5166
5167 if (n_iterations != 0
5168 && ! loop->exit_count
5169 && loop_invariant_p (loop, bl->initial_value))
5170 {
5171 increment = biv_total_increment (bl);
5172
5173 if (increment && loop_invariant_p (loop, increment))
5174 {
5175 /* Can calculate the loop exit value, emit insns after loop
5176 end to calculate this value into a temporary register in
5177 case it is needed later. */
5178
5179 tem = gen_reg_rtx (bl->biv->mode);
5180 record_base_value (REGNO (tem), bl->biv->add_val, 0);
5181 loop_iv_add_mult_sink (loop, increment, GEN_INT (n_iterations),
5182 bl->initial_value, tem);
5183
5184 if (loop_dump_stream)
5185 fprintf (loop_dump_stream,
5186 "Final biv value for %d, calculated.\n", bl->regno);
5187
5188 return tem;
5189 }
5190 }
5191
5192 /* Check to see if the biv is dead at all loop exits. */
5193 if (reg_dead_after_loop (loop, bl->biv->src_reg))
5194 {
5195 if (loop_dump_stream)
5196 fprintf (loop_dump_stream,
5197 "Final biv value for %d, biv dead after loop exit.\n",
5198 bl->regno);
5199
5200 return const0_rtx;
5201 }
5202
5203 return 0;
5204 }
5205
5206 /* Return nonzero if it is possible to eliminate the biv BL provided
5207 all givs are reduced. This is possible if either the reg is not
5208 used outside the loop, or we can compute what its final value will
5209 be. */
5210
5211 static int
5212 loop_biv_eliminable_p (struct loop *loop, struct iv_class *bl,
5213 int threshold, int insn_count)
5214 {
5215 /* For architectures with a decrement_and_branch_until_zero insn,
5216 don't do this if we put a REG_NONNEG note on the endtest for this
5217 biv. */
5218
5219 #ifdef HAVE_decrement_and_branch_until_zero
5220 if (bl->nonneg)
5221 {
5222 if (loop_dump_stream)
5223 fprintf (loop_dump_stream,
5224 "Cannot eliminate nonneg biv %d.\n", bl->regno);
5225 return 0;
5226 }
5227 #endif
5228
5229 /* Check that biv is used outside loop or if it has a final value.
5230 Compare against bl->init_insn rather than loop->start. We aren't
5231 concerned with any uses of the biv between init_insn and
5232 loop->start since these won't be affected by the value of the biv
5233 elsewhere in the function, so long as init_insn doesn't use the
5234 biv itself. */
5235
5236 if ((REGNO_LAST_LUID (bl->regno) < INSN_LUID (loop->end)
5237 && bl->init_insn
5238 && INSN_UID (bl->init_insn) < max_uid_for_loop
5239 && REGNO_FIRST_LUID (bl->regno) >= INSN_LUID (bl->init_insn)
5240 && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
5241 || (bl->final_value = final_biv_value (loop, bl)))
5242 return maybe_eliminate_biv (loop, bl, 0, threshold, insn_count);
5243
5244 if (loop_dump_stream)
5245 {
5246 fprintf (loop_dump_stream,
5247 "Cannot eliminate biv %d.\n",
5248 bl->regno);
5249 fprintf (loop_dump_stream,
5250 "First use: insn %d, last use: insn %d.\n",
5251 REGNO_FIRST_UID (bl->regno),
5252 REGNO_LAST_UID (bl->regno));
5253 }
5254 return 0;
5255 }
5256
5257
5258 /* Reduce each giv of BL that we have decided to reduce. */
5259
5260 static void
5261 loop_givs_reduce (struct loop *loop, struct iv_class *bl)
5262 {
5263 struct induction *v;
5264
5265 for (v = bl->giv; v; v = v->next_iv)
5266 {
5267 struct induction *tv;
5268 if (! v->ignore && v->same == 0)
5269 {
5270 int auto_inc_opt = 0;
5271
5272 /* If the code for derived givs immediately below has already
5273 allocated a new_reg, we must keep it. */
5274 if (! v->new_reg)
5275 v->new_reg = gen_reg_rtx (v->mode);
5276
5277 #ifdef AUTO_INC_DEC
5278 /* If the target has auto-increment addressing modes, and
5279 this is an address giv, then try to put the increment
5280 immediately after its use, so that flow can create an
5281 auto-increment addressing mode. */
5282 /* Don't do this for loops entered at the bottom, to avoid
5283 this invalid transformation:
5284 jmp L; -> jmp L;
5285 TOP: TOP:
5286 use giv use giv
5287 L: inc giv
5288 inc biv L:
5289 test biv test giv
5290 cbr TOP cbr TOP
5291 */
5292 if (v->giv_type == DEST_ADDR && bl->biv_count == 1
5293 && bl->biv->always_executed && ! bl->biv->maybe_multiple
5294 /* We don't handle reversed biv's because bl->biv->insn
5295 does not have a valid INSN_LUID. */
5296 && ! bl->reversed
5297 && v->always_executed && ! v->maybe_multiple
5298 && INSN_UID (v->insn) < max_uid_for_loop
5299 && !loop->top)
5300 {
5301 /* If other giv's have been combined with this one, then
5302 this will work only if all uses of the other giv's occur
5303 before this giv's insn. This is difficult to check.
5304
5305 We simplify this by looking for the common case where
5306 there is one DEST_REG giv, and this giv's insn is the
5307 last use of the dest_reg of that DEST_REG giv. If the
5308 increment occurs after the address giv, then we can
5309 perform the optimization. (Otherwise, the increment
5310 would have to go before other_giv, and we would not be
5311 able to combine it with the address giv to get an
5312 auto-inc address.) */
5313 if (v->combined_with)
5314 {
5315 struct induction *other_giv = 0;
5316
5317 for (tv = bl->giv; tv; tv = tv->next_iv)
5318 if (tv->same == v)
5319 {
5320 if (other_giv)
5321 break;
5322 else
5323 other_giv = tv;
5324 }
5325 if (! tv && other_giv
5326 && REGNO (other_giv->dest_reg) < max_reg_before_loop
5327 && (REGNO_LAST_UID (REGNO (other_giv->dest_reg))
5328 == INSN_UID (v->insn))
5329 && INSN_LUID (v->insn) < INSN_LUID (bl->biv->insn))
5330 auto_inc_opt = 1;
5331 }
5332 /* Check for case where increment is before the address
5333 giv. Do this test in "loop order". */
5334 else if ((INSN_LUID (v->insn) > INSN_LUID (bl->biv->insn)
5335 && (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
5336 || (INSN_LUID (bl->biv->insn)
5337 > INSN_LUID (loop->scan_start))))
5338 || (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
5339 && (INSN_LUID (loop->scan_start)
5340 < INSN_LUID (bl->biv->insn))))
5341 auto_inc_opt = -1;
5342 else
5343 auto_inc_opt = 1;
5344
5345 #ifdef HAVE_cc0
5346 {
5347 rtx prev;
5348
5349 /* We can't put an insn immediately after one setting
5350 cc0, or immediately before one using cc0. */
5351 if ((auto_inc_opt == 1 && sets_cc0_p (PATTERN (v->insn)))
5352 || (auto_inc_opt == -1
5353 && (prev = prev_nonnote_insn (v->insn)) != 0
5354 && INSN_P (prev)
5355 && sets_cc0_p (PATTERN (prev))))
5356 auto_inc_opt = 0;
5357 }
5358 #endif
5359
5360 if (auto_inc_opt)
5361 v->auto_inc_opt = 1;
5362 }
5363 #endif
5364
5365 /* For each place where the biv is incremented, add an insn
5366 to increment the new, reduced reg for the giv. */
5367 for (tv = bl->biv; tv; tv = tv->next_iv)
5368 {
5369 rtx insert_before;
5370
5371 /* Skip if location is the same as a previous one. */
5372 if (tv->same)
5373 continue;
5374 if (! auto_inc_opt)
5375 insert_before = NEXT_INSN (tv->insn);
5376 else if (auto_inc_opt == 1)
5377 insert_before = NEXT_INSN (v->insn);
5378 else
5379 insert_before = v->insn;
5380
5381 if (tv->mult_val == const1_rtx)
5382 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
5383 v->new_reg, v->new_reg,
5384 0, insert_before);
5385 else /* tv->mult_val == const0_rtx */
5386 /* A multiply is acceptable here
5387 since this is presumed to be seldom executed. */
5388 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
5389 v->add_val, v->new_reg,
5390 0, insert_before);
5391 }
5392
5393 /* Add code at loop start to initialize giv's reduced reg. */
5394
5395 loop_iv_add_mult_hoist (loop,
5396 extend_value_for_giv (v, bl->initial_value),
5397 v->mult_val, v->add_val, v->new_reg);
5398 }
5399 }
5400 }
5401
5402
5403 /* Check for givs whose first use is their definition and whose
5404 last use is the definition of another giv. If so, it is likely
5405 dead and should not be used to derive another giv nor to
5406 eliminate a biv. */
5407
5408 static void
5409 loop_givs_dead_check (struct loop *loop ATTRIBUTE_UNUSED, struct iv_class *bl)
5410 {
5411 struct induction *v;
5412
5413 for (v = bl->giv; v; v = v->next_iv)
5414 {
5415 if (v->ignore
5416 || (v->same && v->same->ignore))
5417 continue;
5418
5419 if (v->giv_type == DEST_REG
5420 && REGNO_FIRST_UID (REGNO (v->dest_reg)) == INSN_UID (v->insn))
5421 {
5422 struct induction *v1;
5423
5424 for (v1 = bl->giv; v1; v1 = v1->next_iv)
5425 if (REGNO_LAST_UID (REGNO (v->dest_reg)) == INSN_UID (v1->insn))
5426 v->maybe_dead = 1;
5427 }
5428 }
5429 }
5430
5431
5432 static void
5433 loop_givs_rescan (struct loop *loop, struct iv_class *bl, rtx *reg_map)
5434 {
5435 struct induction *v;
5436
5437 for (v = bl->giv; v; v = v->next_iv)
5438 {
5439 if (v->same && v->same->ignore)
5440 v->ignore = 1;
5441
5442 if (v->ignore)
5443 continue;
5444
5445 /* Update expression if this was combined, in case other giv was
5446 replaced. */
5447 if (v->same)
5448 v->new_reg = replace_rtx (v->new_reg,
5449 v->same->dest_reg, v->same->new_reg);
5450
5451 /* See if this register is known to be a pointer to something. If
5452 so, see if we can find the alignment. First see if there is a
5453 destination register that is a pointer. If so, this shares the
5454 alignment too. Next see if we can deduce anything from the
5455 computational information. If not, and this is a DEST_ADDR
5456 giv, at least we know that it's a pointer, though we don't know
5457 the alignment. */
5458 if (REG_P (v->new_reg)
5459 && v->giv_type == DEST_REG
5460 && REG_POINTER (v->dest_reg))
5461 mark_reg_pointer (v->new_reg,
5462 REGNO_POINTER_ALIGN (REGNO (v->dest_reg)));
5463 else if (REG_P (v->new_reg)
5464 && REG_POINTER (v->src_reg))
5465 {
5466 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->src_reg));
5467
5468 if (align == 0
5469 || GET_CODE (v->add_val) != CONST_INT
5470 || INTVAL (v->add_val) % (align / BITS_PER_UNIT) != 0)
5471 align = 0;
5472
5473 mark_reg_pointer (v->new_reg, align);
5474 }
5475 else if (REG_P (v->new_reg)
5476 && REG_P (v->add_val)
5477 && REG_POINTER (v->add_val))
5478 {
5479 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->add_val));
5480
5481 if (align == 0 || GET_CODE (v->mult_val) != CONST_INT
5482 || INTVAL (v->mult_val) % (align / BITS_PER_UNIT) != 0)
5483 align = 0;
5484
5485 mark_reg_pointer (v->new_reg, align);
5486 }
5487 else if (REG_P (v->new_reg) && v->giv_type == DEST_ADDR)
5488 mark_reg_pointer (v->new_reg, 0);
5489
5490 if (v->giv_type == DEST_ADDR)
5491 {
5492 /* Store reduced reg as the address in the memref where we found
5493 this giv. */
5494 if (validate_change_maybe_volatile (v->insn, v->location,
5495 v->new_reg))
5496 /* Yay, it worked! */;
5497 /* Not replaceable; emit an insn to set the original
5498 giv reg from the reduced giv. */
5499 else if (REG_P (*v->location))
5500 {
5501 rtx tem;
5502 start_sequence ();
5503 tem = force_operand (v->new_reg, *v->location);
5504 if (tem != *v->location)
5505 emit_move_insn (*v->location, tem);
5506 tem = get_insns ();
5507 end_sequence ();
5508 loop_insn_emit_before (loop, 0, v->insn, tem);
5509 }
5510 else if (GET_CODE (*v->location) == PLUS
5511 && REG_P (XEXP (*v->location, 0))
5512 && CONSTANT_P (XEXP (*v->location, 1)))
5513 {
5514 rtx tem;
5515 start_sequence ();
5516 tem = expand_simple_binop (GET_MODE (*v->location), MINUS,
5517 v->new_reg, XEXP (*v->location, 1),
5518 NULL_RTX, 0, OPTAB_LIB_WIDEN);
5519 emit_move_insn (XEXP (*v->location, 0), tem);
5520 tem = get_insns ();
5521 end_sequence ();
5522 loop_insn_emit_before (loop, 0, v->insn, tem);
5523 }
5524 else
5525 {
5526 /* If it wasn't a reg, create a pseudo and use that. */
5527 rtx reg, seq;
5528 start_sequence ();
5529 reg = force_reg (v->mode, *v->location);
5530 if (validate_change_maybe_volatile (v->insn, v->location, reg))
5531 {
5532 seq = get_insns ();
5533 end_sequence ();
5534 loop_insn_emit_before (loop, 0, v->insn, seq);
5535 }
5536 else
5537 {
5538 end_sequence ();
5539 if (loop_dump_stream)
5540 fprintf (loop_dump_stream,
5541 "unable to reduce iv in insn %d\n",
5542 INSN_UID (v->insn));
5543 bl->all_reduced = 0;
5544 v->ignore = 1;
5545 continue;
5546 }
5547 }
5548 }
5549 else if (v->replaceable)
5550 {
5551 reg_map[REGNO (v->dest_reg)] = v->new_reg;
5552 }
5553 else
5554 {
5555 rtx original_insn = v->insn;
5556 rtx note;
5557
5558 /* Not replaceable; emit an insn to set the original giv reg from
5559 the reduced giv, same as above. */
5560 v->insn = loop_insn_emit_after (loop, 0, original_insn,
5561 gen_move_insn (v->dest_reg,
5562 v->new_reg));
5563
5564 /* The original insn may have a REG_EQUAL note. This note is
5565 now incorrect and may result in invalid substitutions later.
5566 The original insn is dead, but may be part of a libcall
5567 sequence, which doesn't seem worth the bother of handling. */
5568 note = find_reg_note (original_insn, REG_EQUAL, NULL_RTX);
5569 if (note)
5570 remove_note (original_insn, note);
5571 }
5572
5573 /* When a loop is reversed, givs which depend on the reversed
5574 biv, and which are live outside the loop, must be set to their
5575 correct final value. This insn is only needed if the giv is
5576 not replaceable. The correct final value is the same as the
5577 value that the giv starts the reversed loop with. */
5578 if (bl->reversed && ! v->replaceable)
5579 loop_iv_add_mult_sink (loop,
5580 extend_value_for_giv (v, bl->initial_value),
5581 v->mult_val, v->add_val, v->dest_reg);
5582 else if (v->final_value)
5583 loop_insn_sink_or_swim (loop,
5584 gen_load_of_final_value (v->dest_reg,
5585 v->final_value));
5586
5587 if (loop_dump_stream)
5588 {
5589 fprintf (loop_dump_stream, "giv at %d reduced to ",
5590 INSN_UID (v->insn));
5591 print_simple_rtl (loop_dump_stream, v->new_reg);
5592 fprintf (loop_dump_stream, "\n");
5593 }
5594 }
5595 }
5596
5597
5598 static int
5599 loop_giv_reduce_benefit (struct loop *loop ATTRIBUTE_UNUSED,
5600 struct iv_class *bl, struct induction *v,
5601 rtx test_reg)
5602 {
5603 int add_cost;
5604 int benefit;
5605
5606 benefit = v->benefit;
5607 PUT_MODE (test_reg, v->mode);
5608 add_cost = iv_add_mult_cost (bl->biv->add_val, v->mult_val,
5609 test_reg, test_reg);
5610
5611 /* Reduce benefit if not replaceable, since we will insert a
5612 move-insn to replace the insn that calculates this giv. Don't do
5613 this unless the giv is a user variable, since it will often be
5614 marked non-replaceable because of the duplication of the exit
5615 code outside the loop. In such a case, the copies we insert are
5616 dead and will be deleted. So they don't have a cost. Similar
5617 situations exist. */
5618 /* ??? The new final_[bg]iv_value code does a much better job of
5619 finding replaceable giv's, and hence this code may no longer be
5620 necessary. */
5621 if (! v->replaceable && ! bl->eliminable
5622 && REG_USERVAR_P (v->dest_reg))
5623 benefit -= copy_cost;
5624
5625 /* Decrease the benefit to count the add-insns that we will insert
5626 to increment the reduced reg for the giv. ??? This can
5627 overestimate the run-time cost of the additional insns, e.g. if
5628 there are multiple basic blocks that increment the biv, but only
5629 one of these blocks is executed during each iteration. There is
5630 no good way to detect cases like this with the current structure
5631 of the loop optimizer. This code is more accurate for
5632 determining code size than run-time benefits. */
5633 benefit -= add_cost * bl->biv_count;
5634
5635 /* Decide whether to strength-reduce this giv or to leave the code
5636 unchanged (recompute it from the biv each time it is used). This
5637 decision can be made independently for each giv. */
5638
5639 #ifdef AUTO_INC_DEC
5640 /* Attempt to guess whether autoincrement will handle some of the
5641 new add insns; if so, increase BENEFIT (undo the subtraction of
5642 add_cost that was done above). */
5643 if (v->giv_type == DEST_ADDR
5644 /* Increasing the benefit is risky, since this is only a guess.
5645 Avoid increasing register pressure in cases where there would
5646 be no other benefit from reducing this giv. */
5647 && benefit > 0
5648 && GET_CODE (v->mult_val) == CONST_INT)
5649 {
5650 int size = GET_MODE_SIZE (GET_MODE (v->mem));
5651
5652 if (HAVE_POST_INCREMENT
5653 && INTVAL (v->mult_val) == size)
5654 benefit += add_cost * bl->biv_count;
5655 else if (HAVE_PRE_INCREMENT
5656 && INTVAL (v->mult_val) == size)
5657 benefit += add_cost * bl->biv_count;
5658 else if (HAVE_POST_DECREMENT
5659 && -INTVAL (v->mult_val) == size)
5660 benefit += add_cost * bl->biv_count;
5661 else if (HAVE_PRE_DECREMENT
5662 && -INTVAL (v->mult_val) == size)
5663 benefit += add_cost * bl->biv_count;
5664 }
5665 #endif
5666
5667 return benefit;
5668 }
5669
5670
5671 /* Free IV structures for LOOP. */
5672
5673 static void
5674 loop_ivs_free (struct loop *loop)
5675 {
5676 struct loop_ivs *ivs = LOOP_IVS (loop);
5677 struct iv_class *iv = ivs->list;
5678
5679 free (ivs->regs);
5680
5681 while (iv)
5682 {
5683 struct iv_class *next = iv->next;
5684 struct induction *induction;
5685 struct induction *next_induction;
5686
5687 for (induction = iv->biv; induction; induction = next_induction)
5688 {
5689 next_induction = induction->next_iv;
5690 free (induction);
5691 }
5692 for (induction = iv->giv; induction; induction = next_induction)
5693 {
5694 next_induction = induction->next_iv;
5695 free (induction);
5696 }
5697
5698 free (iv);
5699 iv = next;
5700 }
5701 }
5702
5703 /* Look back before LOOP->START for the insn that sets REG and return
5704 the equivalent constant if there is a REG_EQUAL note otherwise just
5705 the SET_SRC of REG. */
5706
5707 static rtx
5708 loop_find_equiv_value (const struct loop *loop, rtx reg)
5709 {
5710 rtx loop_start = loop->start;
5711 rtx insn, set;
5712 rtx ret;
5713
5714 ret = reg;
5715 for (insn = PREV_INSN (loop_start); insn; insn = PREV_INSN (insn))
5716 {
5717 if (LABEL_P (insn))
5718 break;
5719
5720 else if (INSN_P (insn) && reg_set_p (reg, insn))
5721 {
5722 /* We found the last insn before the loop that sets the register.
5723 If it sets the entire register, and has a REG_EQUAL note,
5724 then use the value of the REG_EQUAL note. */
5725 if ((set = single_set (insn))
5726 && (SET_DEST (set) == reg))
5727 {
5728 rtx note = find_reg_note (insn, REG_EQUAL, NULL_RTX);
5729
5730 /* Only use the REG_EQUAL note if it is a constant.
5731 Other things, divide in particular, will cause
5732 problems later if we use them. */
5733 if (note && GET_CODE (XEXP (note, 0)) != EXPR_LIST
5734 && CONSTANT_P (XEXP (note, 0)))
5735 ret = XEXP (note, 0);
5736 else
5737 ret = SET_SRC (set);
5738
5739 /* We cannot do this if it changes between the
5740 assignment and loop start though. */
5741 if (modified_between_p (ret, insn, loop_start))
5742 ret = reg;
5743 }
5744 break;
5745 }
5746 }
5747 return ret;
5748 }
5749
5750 /* Find and return register term common to both expressions OP0 and
5751 OP1 or NULL_RTX if no such term exists. Each expression must be a
5752 REG or a PLUS of a REG. */
5753
5754 static rtx
5755 find_common_reg_term (rtx op0, rtx op1)
5756 {
5757 if ((REG_P (op0) || GET_CODE (op0) == PLUS)
5758 && (REG_P (op1) || GET_CODE (op1) == PLUS))
5759 {
5760 rtx op00;
5761 rtx op01;
5762 rtx op10;
5763 rtx op11;
5764
5765 if (GET_CODE (op0) == PLUS)
5766 op01 = XEXP (op0, 1), op00 = XEXP (op0, 0);
5767 else
5768 op01 = const0_rtx, op00 = op0;
5769
5770 if (GET_CODE (op1) == PLUS)
5771 op11 = XEXP (op1, 1), op10 = XEXP (op1, 0);
5772 else
5773 op11 = const0_rtx, op10 = op1;
5774
5775 /* Find and return common register term if present. */
5776 if (REG_P (op00) && (op00 == op10 || op00 == op11))
5777 return op00;
5778 else if (REG_P (op01) && (op01 == op10 || op01 == op11))
5779 return op01;
5780 }
5781
5782 /* No common register term found. */
5783 return NULL_RTX;
5784 }
5785
5786 /* Determine the loop iterator and calculate the number of loop
5787 iterations. Returns the exact number of loop iterations if it can
5788 be calculated, otherwise returns zero. */
5789
5790 static unsigned HOST_WIDE_INT
5791 loop_iterations (struct loop *loop)
5792 {
5793 struct loop_info *loop_info = LOOP_INFO (loop);
5794 struct loop_ivs *ivs = LOOP_IVS (loop);
5795 rtx comparison, comparison_value;
5796 rtx iteration_var, initial_value, increment, final_value;
5797 enum rtx_code comparison_code;
5798 HOST_WIDE_INT inc;
5799 unsigned HOST_WIDE_INT abs_inc;
5800 unsigned HOST_WIDE_INT abs_diff;
5801 int off_by_one;
5802 int increment_dir;
5803 int unsigned_p, compare_dir, final_larger;
5804 rtx last_loop_insn;
5805 struct iv_class *bl;
5806
5807 loop_info->n_iterations = 0;
5808 loop_info->initial_value = 0;
5809 loop_info->initial_equiv_value = 0;
5810 loop_info->comparison_value = 0;
5811 loop_info->final_value = 0;
5812 loop_info->final_equiv_value = 0;
5813 loop_info->increment = 0;
5814 loop_info->iteration_var = 0;
5815 loop_info->iv = 0;
5816
5817 /* We used to use prev_nonnote_insn here, but that fails because it might
5818 accidentally get the branch for a contained loop if the branch for this
5819 loop was deleted. We can only trust branches immediately before the
5820 loop_end. */
5821 last_loop_insn = PREV_INSN (loop->end);
5822
5823 /* ??? We should probably try harder to find the jump insn
5824 at the end of the loop. The following code assumes that
5825 the last loop insn is a jump to the top of the loop. */
5826 if (!JUMP_P (last_loop_insn))
5827 {
5828 if (loop_dump_stream)
5829 fprintf (loop_dump_stream,
5830 "Loop iterations: No final conditional branch found.\n");
5831 return 0;
5832 }
5833
5834 /* If there is a more than a single jump to the top of the loop
5835 we cannot (easily) determine the iteration count. */
5836 if (LABEL_NUSES (JUMP_LABEL (last_loop_insn)) > 1)
5837 {
5838 if (loop_dump_stream)
5839 fprintf (loop_dump_stream,
5840 "Loop iterations: Loop has multiple back edges.\n");
5841 return 0;
5842 }
5843
5844 /* Find the iteration variable. If the last insn is a conditional
5845 branch, and the insn before tests a register value, make that the
5846 iteration variable. */
5847
5848 comparison = get_condition_for_loop (loop, last_loop_insn);
5849 if (comparison == 0)
5850 {
5851 if (loop_dump_stream)
5852 fprintf (loop_dump_stream,
5853 "Loop iterations: No final comparison found.\n");
5854 return 0;
5855 }
5856
5857 /* ??? Get_condition may switch position of induction variable and
5858 invariant register when it canonicalizes the comparison. */
5859
5860 comparison_code = GET_CODE (comparison);
5861 iteration_var = XEXP (comparison, 0);
5862 comparison_value = XEXP (comparison, 1);
5863
5864 if (!REG_P (iteration_var))
5865 {
5866 if (loop_dump_stream)
5867 fprintf (loop_dump_stream,
5868 "Loop iterations: Comparison not against register.\n");
5869 return 0;
5870 }
5871
5872 /* The only new registers that are created before loop iterations
5873 are givs made from biv increments or registers created by
5874 load_mems. In the latter case, it is possible that try_copy_prop
5875 will propagate a new pseudo into the old iteration register but
5876 this will be marked by having the REG_USERVAR_P bit set. */
5877
5878 gcc_assert ((unsigned) REGNO (iteration_var) < ivs->n_regs
5879 || REG_USERVAR_P (iteration_var));
5880
5881 /* Determine the initial value of the iteration variable, and the amount
5882 that it is incremented each loop. Use the tables constructed by
5883 the strength reduction pass to calculate these values. */
5884
5885 /* Clear the result values, in case no answer can be found. */
5886 initial_value = 0;
5887 increment = 0;
5888
5889 /* The iteration variable can be either a giv or a biv. Check to see
5890 which it is, and compute the variable's initial value, and increment
5891 value if possible. */
5892
5893 /* If this is a new register, can't handle it since we don't have any
5894 reg_iv_type entry for it. */
5895 if ((unsigned) REGNO (iteration_var) >= ivs->n_regs)
5896 {
5897 if (loop_dump_stream)
5898 fprintf (loop_dump_stream,
5899 "Loop iterations: No reg_iv_type entry for iteration var.\n");
5900 return 0;
5901 }
5902
5903 /* Reject iteration variables larger than the host wide int size, since they
5904 could result in a number of iterations greater than the range of our
5905 `unsigned HOST_WIDE_INT' variable loop_info->n_iterations. */
5906 else if ((GET_MODE_BITSIZE (GET_MODE (iteration_var))
5907 > HOST_BITS_PER_WIDE_INT))
5908 {
5909 if (loop_dump_stream)
5910 fprintf (loop_dump_stream,
5911 "Loop iterations: Iteration var rejected because mode too large.\n");
5912 return 0;
5913 }
5914 else if (GET_MODE_CLASS (GET_MODE (iteration_var)) != MODE_INT)
5915 {
5916 if (loop_dump_stream)
5917 fprintf (loop_dump_stream,
5918 "Loop iterations: Iteration var not an integer.\n");
5919 return 0;
5920 }
5921
5922 /* Try swapping the comparison to identify a suitable iv. */
5923 if (REG_IV_TYPE (ivs, REGNO (iteration_var)) != BASIC_INDUCT
5924 && REG_IV_TYPE (ivs, REGNO (iteration_var)) != GENERAL_INDUCT
5925 && REG_P (comparison_value)
5926 && REGNO (comparison_value) < ivs->n_regs)
5927 {
5928 rtx temp = comparison_value;
5929 comparison_code = swap_condition (comparison_code);
5930 comparison_value = iteration_var;
5931 iteration_var = temp;
5932 }
5933
5934 if (REG_IV_TYPE (ivs, REGNO (iteration_var)) == BASIC_INDUCT)
5935 {
5936 gcc_assert (REGNO (iteration_var) < ivs->n_regs);
5937
5938 /* Grab initial value, only useful if it is a constant. */
5939 bl = REG_IV_CLASS (ivs, REGNO (iteration_var));
5940 initial_value = bl->initial_value;
5941 if (!bl->biv->always_executed || bl->biv->maybe_multiple)
5942 {
5943 if (loop_dump_stream)
5944 fprintf (loop_dump_stream,
5945 "Loop iterations: Basic induction var not set once in each iteration.\n");
5946 return 0;
5947 }
5948
5949 increment = biv_total_increment (bl);
5950 }
5951 else if (REG_IV_TYPE (ivs, REGNO (iteration_var)) == GENERAL_INDUCT)
5952 {
5953 HOST_WIDE_INT offset = 0;
5954 struct induction *v = REG_IV_INFO (ivs, REGNO (iteration_var));
5955 rtx biv_initial_value;
5956
5957 gcc_assert (REGNO (v->src_reg) < ivs->n_regs);
5958
5959 if (!v->always_executed || v->maybe_multiple)
5960 {
5961 if (loop_dump_stream)
5962 fprintf (loop_dump_stream,
5963 "Loop iterations: General induction var not set once in each iteration.\n");
5964 return 0;
5965 }
5966
5967 bl = REG_IV_CLASS (ivs, REGNO (v->src_reg));
5968
5969 /* Increment value is mult_val times the increment value of the biv. */
5970
5971 increment = biv_total_increment (bl);
5972 if (increment)
5973 {
5974 struct induction *biv_inc;
5975
5976 increment = fold_rtx_mult_add (v->mult_val,
5977 extend_value_for_giv (v, increment),
5978 const0_rtx, v->mode);
5979 /* The caller assumes that one full increment has occurred at the
5980 first loop test. But that's not true when the biv is incremented
5981 after the giv is set (which is the usual case), e.g.:
5982 i = 6; do {;} while (i++ < 9) .
5983 Therefore, we bias the initial value by subtracting the amount of
5984 the increment that occurs between the giv set and the giv test. */
5985 for (biv_inc = bl->biv; biv_inc; biv_inc = biv_inc->next_iv)
5986 {
5987 if (loop_insn_first_p (v->insn, biv_inc->insn))
5988 {
5989 if (REG_P (biv_inc->add_val))
5990 {
5991 if (loop_dump_stream)
5992 fprintf (loop_dump_stream,
5993 "Loop iterations: Basic induction var add_val is REG %d.\n",
5994 REGNO (biv_inc->add_val));
5995 return 0;
5996 }
5997
5998 /* If we have already counted it, skip it. */
5999 if (biv_inc->same)
6000 continue;
6001
6002 offset -= INTVAL (biv_inc->add_val);
6003 }
6004 }
6005 }
6006 if (loop_dump_stream)
6007 fprintf (loop_dump_stream,
6008 "Loop iterations: Giv iterator, initial value bias %ld.\n",
6009 (long) offset);
6010
6011 /* Initial value is mult_val times the biv's initial value plus
6012 add_val. Only useful if it is a constant. */
6013 biv_initial_value = extend_value_for_giv (v, bl->initial_value);
6014 initial_value
6015 = fold_rtx_mult_add (v->mult_val,
6016 plus_constant (biv_initial_value, offset),
6017 v->add_val, v->mode);
6018 }
6019 else
6020 {
6021 if (loop_dump_stream)
6022 fprintf (loop_dump_stream,
6023 "Loop iterations: Not basic or general induction var.\n");
6024 return 0;
6025 }
6026
6027 if (initial_value == 0)
6028 return 0;
6029
6030 unsigned_p = 0;
6031 off_by_one = 0;
6032 switch (comparison_code)
6033 {
6034 case LEU:
6035 unsigned_p = 1;
6036 case LE:
6037 compare_dir = 1;
6038 off_by_one = 1;
6039 break;
6040 case GEU:
6041 unsigned_p = 1;
6042 case GE:
6043 compare_dir = -1;
6044 off_by_one = -1;
6045 break;
6046 case EQ:
6047 /* Cannot determine loop iterations with this case. */
6048 compare_dir = 0;
6049 break;
6050 case LTU:
6051 unsigned_p = 1;
6052 case LT:
6053 compare_dir = 1;
6054 break;
6055 case GTU:
6056 unsigned_p = 1;
6057 case GT:
6058 compare_dir = -1;
6059 break;
6060 case NE:
6061 compare_dir = 0;
6062 break;
6063 default:
6064 gcc_unreachable ();
6065 }
6066
6067 /* If the comparison value is an invariant register, then try to find
6068 its value from the insns before the start of the loop. */
6069
6070 final_value = comparison_value;
6071 if (REG_P (comparison_value)
6072 && loop_invariant_p (loop, comparison_value))
6073 {
6074 final_value = loop_find_equiv_value (loop, comparison_value);
6075
6076 /* If we don't get an invariant final value, we are better
6077 off with the original register. */
6078 if (! loop_invariant_p (loop, final_value))
6079 final_value = comparison_value;
6080 }
6081
6082 /* Calculate the approximate final value of the induction variable
6083 (on the last successful iteration). The exact final value
6084 depends on the branch operator, and increment sign. It will be
6085 wrong if the iteration variable is not incremented by one each
6086 time through the loop and (comparison_value + off_by_one -
6087 initial_value) % increment != 0.
6088 ??? Note that the final_value may overflow and thus final_larger
6089 will be bogus. A potentially infinite loop will be classified
6090 as immediate, e.g. for (i = 0x7ffffff0; i <= 0x7fffffff; i++) */
6091 if (off_by_one)
6092 final_value = plus_constant (final_value, off_by_one);
6093
6094 /* Save the calculated values describing this loop's bounds, in case
6095 precondition_loop_p will need them later. These values can not be
6096 recalculated inside precondition_loop_p because strength reduction
6097 optimizations may obscure the loop's structure.
6098
6099 These values are only required by precondition_loop_p and insert_bct
6100 whenever the number of iterations cannot be computed at compile time.
6101 Only the difference between final_value and initial_value is
6102 important. Note that final_value is only approximate. */
6103 loop_info->initial_value = initial_value;
6104 loop_info->comparison_value = comparison_value;
6105 loop_info->final_value = plus_constant (comparison_value, off_by_one);
6106 loop_info->increment = increment;
6107 loop_info->iteration_var = iteration_var;
6108 loop_info->comparison_code = comparison_code;
6109 loop_info->iv = bl;
6110
6111 /* Try to determine the iteration count for loops such
6112 as (for i = init; i < init + const; i++). When running the
6113 loop optimization twice, the first pass often converts simple
6114 loops into this form. */
6115
6116 if (REG_P (initial_value))
6117 {
6118 rtx reg1;
6119 rtx reg2;
6120 rtx const2;
6121
6122 reg1 = initial_value;
6123 if (GET_CODE (final_value) == PLUS)
6124 reg2 = XEXP (final_value, 0), const2 = XEXP (final_value, 1);
6125 else
6126 reg2 = final_value, const2 = const0_rtx;
6127
6128 /* Check for initial_value = reg1, final_value = reg2 + const2,
6129 where reg1 != reg2. */
6130 if (REG_P (reg2) && reg2 != reg1)
6131 {
6132 rtx temp;
6133
6134 /* Find what reg1 is equivalent to. Hopefully it will
6135 either be reg2 or reg2 plus a constant. */
6136 temp = loop_find_equiv_value (loop, reg1);
6137
6138 if (find_common_reg_term (temp, reg2))
6139 initial_value = temp;
6140 else if (loop_invariant_p (loop, reg2))
6141 {
6142 /* Find what reg2 is equivalent to. Hopefully it will
6143 either be reg1 or reg1 plus a constant. Let's ignore
6144 the latter case for now since it is not so common. */
6145 temp = loop_find_equiv_value (loop, reg2);
6146
6147 if (temp == loop_info->iteration_var)
6148 temp = initial_value;
6149 if (temp == reg1)
6150 final_value = (const2 == const0_rtx)
6151 ? reg1 : gen_rtx_PLUS (GET_MODE (reg1), reg1, const2);
6152 }
6153 }
6154 }
6155
6156 loop_info->initial_equiv_value = initial_value;
6157 loop_info->final_equiv_value = final_value;
6158
6159 /* For EQ comparison loops, we don't have a valid final value.
6160 Check this now so that we won't leave an invalid value if we
6161 return early for any other reason. */
6162 if (comparison_code == EQ)
6163 loop_info->final_equiv_value = loop_info->final_value = 0;
6164
6165 if (increment == 0)
6166 {
6167 if (loop_dump_stream)
6168 fprintf (loop_dump_stream,
6169 "Loop iterations: Increment value can't be calculated.\n");
6170 return 0;
6171 }
6172
6173 if (GET_CODE (increment) != CONST_INT)
6174 {
6175 /* If we have a REG, check to see if REG holds a constant value. */
6176 /* ??? Other RTL, such as (neg (reg)) is possible here, but it isn't
6177 clear if it is worthwhile to try to handle such RTL. */
6178 if (REG_P (increment) || GET_CODE (increment) == SUBREG)
6179 increment = loop_find_equiv_value (loop, increment);
6180
6181 if (GET_CODE (increment) != CONST_INT)
6182 {
6183 if (loop_dump_stream)
6184 {
6185 fprintf (loop_dump_stream,
6186 "Loop iterations: Increment value not constant ");
6187 print_simple_rtl (loop_dump_stream, increment);
6188 fprintf (loop_dump_stream, ".\n");
6189 }
6190 return 0;
6191 }
6192 loop_info->increment = increment;
6193 }
6194
6195 if (GET_CODE (initial_value) != CONST_INT)
6196 {
6197 if (loop_dump_stream)
6198 {
6199 fprintf (loop_dump_stream,
6200 "Loop iterations: Initial value not constant ");
6201 print_simple_rtl (loop_dump_stream, initial_value);
6202 fprintf (loop_dump_stream, ".\n");
6203 }
6204 return 0;
6205 }
6206 else if (GET_CODE (final_value) != CONST_INT)
6207 {
6208 if (loop_dump_stream)
6209 {
6210 fprintf (loop_dump_stream,
6211 "Loop iterations: Final value not constant ");
6212 print_simple_rtl (loop_dump_stream, final_value);
6213 fprintf (loop_dump_stream, ".\n");
6214 }
6215 return 0;
6216 }
6217 else if (comparison_code == EQ)
6218 {
6219 rtx inc_once;
6220
6221 if (loop_dump_stream)
6222 fprintf (loop_dump_stream, "Loop iterations: EQ comparison loop.\n");
6223
6224 inc_once = gen_int_mode (INTVAL (initial_value) + INTVAL (increment),
6225 GET_MODE (iteration_var));
6226
6227 if (inc_once == final_value)
6228 {
6229 /* The iterator value once through the loop is equal to the
6230 comparison value. Either we have an infinite loop, or
6231 we'll loop twice. */
6232 if (increment == const0_rtx)
6233 return 0;
6234 loop_info->n_iterations = 2;
6235 }
6236 else
6237 loop_info->n_iterations = 1;
6238
6239 if (GET_CODE (loop_info->initial_value) == CONST_INT)
6240 loop_info->final_value
6241 = gen_int_mode ((INTVAL (loop_info->initial_value)
6242 + loop_info->n_iterations * INTVAL (increment)),
6243 GET_MODE (iteration_var));
6244 else
6245 loop_info->final_value
6246 = plus_constant (loop_info->initial_value,
6247 loop_info->n_iterations * INTVAL (increment));
6248 loop_info->final_equiv_value
6249 = gen_int_mode ((INTVAL (initial_value)
6250 + loop_info->n_iterations * INTVAL (increment)),
6251 GET_MODE (iteration_var));
6252 return loop_info->n_iterations;
6253 }
6254
6255 /* Final_larger is 1 if final larger, 0 if they are equal, otherwise -1. */
6256 if (unsigned_p)
6257 final_larger
6258 = ((unsigned HOST_WIDE_INT) INTVAL (final_value)
6259 > (unsigned HOST_WIDE_INT) INTVAL (initial_value))
6260 - ((unsigned HOST_WIDE_INT) INTVAL (final_value)
6261 < (unsigned HOST_WIDE_INT) INTVAL (initial_value));
6262 else
6263 final_larger = (INTVAL (final_value) > INTVAL (initial_value))
6264 - (INTVAL (final_value) < INTVAL (initial_value));
6265
6266 if (INTVAL (increment) > 0)
6267 increment_dir = 1;
6268 else if (INTVAL (increment) == 0)
6269 increment_dir = 0;
6270 else
6271 increment_dir = -1;
6272
6273 /* There are 27 different cases: compare_dir = -1, 0, 1;
6274 final_larger = -1, 0, 1; increment_dir = -1, 0, 1.
6275 There are 4 normal cases, 4 reverse cases (where the iteration variable
6276 will overflow before the loop exits), 4 infinite loop cases, and 15
6277 immediate exit (0 or 1 iteration depending on loop type) cases.
6278 Only try to optimize the normal cases. */
6279
6280 /* (compare_dir/final_larger/increment_dir)
6281 Normal cases: (0/-1/-1), (0/1/1), (-1/-1/-1), (1/1/1)
6282 Reverse cases: (0/-1/1), (0/1/-1), (-1/-1/1), (1/1/-1)
6283 Infinite loops: (0/-1/0), (0/1/0), (-1/-1/0), (1/1/0)
6284 Immediate exit: (0/0/X), (-1/0/X), (-1/1/X), (1/0/X), (1/-1/X) */
6285
6286 /* ?? If the meaning of reverse loops (where the iteration variable
6287 will overflow before the loop exits) is undefined, then could
6288 eliminate all of these special checks, and just always assume
6289 the loops are normal/immediate/infinite. Note that this means
6290 the sign of increment_dir does not have to be known. Also,
6291 since it does not really hurt if immediate exit loops or infinite loops
6292 are optimized, then that case could be ignored also, and hence all
6293 loops can be optimized.
6294
6295 According to ANSI Spec, the reverse loop case result is undefined,
6296 because the action on overflow is undefined.
6297
6298 See also the special test for NE loops below. */
6299
6300 if (final_larger == increment_dir && final_larger != 0
6301 && (final_larger == compare_dir || compare_dir == 0))
6302 /* Normal case. */
6303 ;
6304 else
6305 {
6306 if (loop_dump_stream)
6307 fprintf (loop_dump_stream, "Loop iterations: Not normal loop.\n");
6308 return 0;
6309 }
6310
6311 /* Calculate the number of iterations, final_value is only an approximation,
6312 so correct for that. Note that abs_diff and n_iterations are
6313 unsigned, because they can be as large as 2^n - 1. */
6314
6315 inc = INTVAL (increment);
6316 gcc_assert (inc);
6317 if (inc > 0)
6318 {
6319 abs_diff = INTVAL (final_value) - INTVAL (initial_value);
6320 abs_inc = inc;
6321 }
6322 else
6323 {
6324 abs_diff = INTVAL (initial_value) - INTVAL (final_value);
6325 abs_inc = -inc;
6326 }
6327
6328 /* Given that iteration_var is going to iterate over its own mode,
6329 not HOST_WIDE_INT, disregard higher bits that might have come
6330 into the picture due to sign extension of initial and final
6331 values. */
6332 abs_diff &= ((unsigned HOST_WIDE_INT) 1
6333 << (GET_MODE_BITSIZE (GET_MODE (iteration_var)) - 1)
6334 << 1) - 1;
6335
6336 /* For NE tests, make sure that the iteration variable won't miss
6337 the final value. If abs_diff mod abs_incr is not zero, then the
6338 iteration variable will overflow before the loop exits, and we
6339 can not calculate the number of iterations. */
6340 if (compare_dir == 0 && (abs_diff % abs_inc) != 0)
6341 return 0;
6342
6343 /* Note that the number of iterations could be calculated using
6344 (abs_diff + abs_inc - 1) / abs_inc, provided care was taken to
6345 handle potential overflow of the summation. */
6346 loop_info->n_iterations = abs_diff / abs_inc + ((abs_diff % abs_inc) != 0);
6347 return loop_info->n_iterations;
6348 }
6349
6350 /* Perform strength reduction and induction variable elimination.
6351
6352 Pseudo registers created during this function will be beyond the
6353 last valid index in several tables including
6354 REGS->ARRAY[I].N_TIMES_SET and REGNO_LAST_UID. This does not cause a
6355 problem here, because the added registers cannot be givs outside of
6356 their loop, and hence will never be reconsidered. But scan_loop
6357 must check regnos to make sure they are in bounds. */
6358
6359 static void
6360 strength_reduce (struct loop *loop, int flags)
6361 {
6362 struct loop_info *loop_info = LOOP_INFO (loop);
6363 struct loop_regs *regs = LOOP_REGS (loop);
6364 struct loop_ivs *ivs = LOOP_IVS (loop);
6365 rtx p;
6366 /* Temporary list pointer for traversing ivs->list. */
6367 struct iv_class *bl;
6368 /* Ratio of extra register life span we can justify
6369 for saving an instruction. More if loop doesn't call subroutines
6370 since in that case saving an insn makes more difference
6371 and more registers are available. */
6372 /* ??? could set this to last value of threshold in move_movables */
6373 int threshold = (loop_info->has_call ? 1 : 2) * (3 + n_non_fixed_regs);
6374 /* Map of pseudo-register replacements. */
6375 rtx *reg_map = NULL;
6376 int reg_map_size;
6377 rtx test_reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
6378 int insn_count = count_insns_in_loop (loop);
6379
6380 addr_placeholder = gen_reg_rtx (Pmode);
6381
6382 ivs->n_regs = max_reg_before_loop;
6383 ivs->regs = xcalloc (ivs->n_regs, sizeof (struct iv));
6384
6385 /* Find all BIVs in loop. */
6386 loop_bivs_find (loop);
6387
6388 /* Exit if there are no bivs. */
6389 if (! ivs->list)
6390 {
6391 loop_ivs_free (loop);
6392 return;
6393 }
6394
6395 /* Determine how BIVS are initialized by looking through pre-header
6396 extended basic block. */
6397 loop_bivs_init_find (loop);
6398
6399 /* Look at the each biv and see if we can say anything better about its
6400 initial value from any initializing insns set up above. */
6401 loop_bivs_check (loop);
6402
6403 /* Search the loop for general induction variables. */
6404 loop_givs_find (loop);
6405
6406 /* Try to calculate and save the number of loop iterations. This is
6407 set to zero if the actual number can not be calculated. This must
6408 be called after all giv's have been identified, since otherwise it may
6409 fail if the iteration variable is a giv. */
6410 loop_iterations (loop);
6411
6412 #ifdef HAVE_prefetch
6413 if (flags & LOOP_PREFETCH)
6414 emit_prefetch_instructions (loop);
6415 #endif
6416
6417 /* Now for each giv for which we still don't know whether or not it is
6418 replaceable, check to see if it is replaceable because its final value
6419 can be calculated. This must be done after loop_iterations is called,
6420 so that final_giv_value will work correctly. */
6421 loop_givs_check (loop);
6422
6423 /* Try to prove that the loop counter variable (if any) is always
6424 nonnegative; if so, record that fact with a REG_NONNEG note
6425 so that "decrement and branch until zero" insn can be used. */
6426 check_dbra_loop (loop, insn_count);
6427
6428 /* Create reg_map to hold substitutions for replaceable giv regs.
6429 Some givs might have been made from biv increments, so look at
6430 ivs->reg_iv_type for a suitable size. */
6431 reg_map_size = ivs->n_regs;
6432 reg_map = xcalloc (reg_map_size, sizeof (rtx));
6433
6434 /* Examine each iv class for feasibility of strength reduction/induction
6435 variable elimination. */
6436
6437 for (bl = ivs->list; bl; bl = bl->next)
6438 {
6439 struct induction *v;
6440 int benefit;
6441
6442 /* Test whether it will be possible to eliminate this biv
6443 provided all givs are reduced. */
6444 bl->eliminable = loop_biv_eliminable_p (loop, bl, threshold, insn_count);
6445
6446 /* This will be true at the end, if all givs which depend on this
6447 biv have been strength reduced.
6448 We can't (currently) eliminate the biv unless this is so. */
6449 bl->all_reduced = 1;
6450
6451 /* Check each extension dependent giv in this class to see if its
6452 root biv is safe from wrapping in the interior mode. */
6453 check_ext_dependent_givs (loop, bl);
6454
6455 /* Combine all giv's for this iv_class. */
6456 combine_givs (regs, bl);
6457
6458 for (v = bl->giv; v; v = v->next_iv)
6459 {
6460 struct induction *tv;
6461
6462 if (v->ignore || v->same)
6463 continue;
6464
6465 benefit = loop_giv_reduce_benefit (loop, bl, v, test_reg);
6466
6467 /* If an insn is not to be strength reduced, then set its ignore
6468 flag, and clear bl->all_reduced. */
6469
6470 /* A giv that depends on a reversed biv must be reduced if it is
6471 used after the loop exit, otherwise, it would have the wrong
6472 value after the loop exit. To make it simple, just reduce all
6473 of such giv's whether or not we know they are used after the loop
6474 exit. */
6475
6476 if (v->lifetime * threshold * benefit < insn_count
6477 && ! bl->reversed)
6478 {
6479 if (loop_dump_stream)
6480 fprintf (loop_dump_stream,
6481 "giv of insn %d not worth while, %d vs %d.\n",
6482 INSN_UID (v->insn),
6483 v->lifetime * threshold * benefit, insn_count);
6484 v->ignore = 1;
6485 bl->all_reduced = 0;
6486 }
6487 else
6488 {
6489 /* Check that we can increment the reduced giv without a
6490 multiply insn. If not, reject it. */
6491
6492 for (tv = bl->biv; tv; tv = tv->next_iv)
6493 if (tv->mult_val == const1_rtx
6494 && ! product_cheap_p (tv->add_val, v->mult_val))
6495 {
6496 if (loop_dump_stream)
6497 fprintf (loop_dump_stream,
6498 "giv of insn %d: would need a multiply.\n",
6499 INSN_UID (v->insn));
6500 v->ignore = 1;
6501 bl->all_reduced = 0;
6502 break;
6503 }
6504 }
6505 }
6506
6507 /* Check for givs whose first use is their definition and whose
6508 last use is the definition of another giv. If so, it is likely
6509 dead and should not be used to derive another giv nor to
6510 eliminate a biv. */
6511 loop_givs_dead_check (loop, bl);
6512
6513 /* Reduce each giv that we decided to reduce. */
6514 loop_givs_reduce (loop, bl);
6515
6516 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
6517 as not reduced.
6518
6519 For each giv register that can be reduced now: if replaceable,
6520 substitute reduced reg wherever the old giv occurs;
6521 else add new move insn "giv_reg = reduced_reg". */
6522 loop_givs_rescan (loop, bl, reg_map);
6523
6524 /* All the givs based on the biv bl have been reduced if they
6525 merit it. */
6526
6527 /* For each giv not marked as maybe dead that has been combined with a
6528 second giv, clear any "maybe dead" mark on that second giv.
6529 v->new_reg will either be or refer to the register of the giv it
6530 combined with.
6531
6532 Doing this clearing avoids problems in biv elimination where
6533 a giv's new_reg is a complex value that can't be put in the
6534 insn but the giv combined with (with a reg as new_reg) is
6535 marked maybe_dead. Since the register will be used in either
6536 case, we'd prefer it be used from the simpler giv. */
6537
6538 for (v = bl->giv; v; v = v->next_iv)
6539 if (! v->maybe_dead && v->same)
6540 v->same->maybe_dead = 0;
6541
6542 /* Try to eliminate the biv, if it is a candidate.
6543 This won't work if ! bl->all_reduced,
6544 since the givs we planned to use might not have been reduced.
6545
6546 We have to be careful that we didn't initially think we could
6547 eliminate this biv because of a giv that we now think may be
6548 dead and shouldn't be used as a biv replacement.
6549
6550 Also, there is the possibility that we may have a giv that looks
6551 like it can be used to eliminate a biv, but the resulting insn
6552 isn't valid. This can happen, for example, on the 88k, where a
6553 JUMP_INSN can compare a register only with zero. Attempts to
6554 replace it with a compare with a constant will fail.
6555
6556 Note that in cases where this call fails, we may have replaced some
6557 of the occurrences of the biv with a giv, but no harm was done in
6558 doing so in the rare cases where it can occur. */
6559
6560 if (bl->all_reduced == 1 && bl->eliminable
6561 && maybe_eliminate_biv (loop, bl, 1, threshold, insn_count))
6562 {
6563 /* ?? If we created a new test to bypass the loop entirely,
6564 or otherwise drop straight in, based on this test, then
6565 we might want to rewrite it also. This way some later
6566 pass has more hope of removing the initialization of this
6567 biv entirely. */
6568
6569 /* If final_value != 0, then the biv may be used after loop end
6570 and we must emit an insn to set it just in case.
6571
6572 Reversed bivs already have an insn after the loop setting their
6573 value, so we don't need another one. We can't calculate the
6574 proper final value for such a biv here anyways. */
6575 if (bl->final_value && ! bl->reversed)
6576 loop_insn_sink_or_swim (loop,
6577 gen_load_of_final_value (bl->biv->dest_reg,
6578 bl->final_value));
6579
6580 if (loop_dump_stream)
6581 fprintf (loop_dump_stream, "Reg %d: biv eliminated\n",
6582 bl->regno);
6583 }
6584 /* See above note wrt final_value. But since we couldn't eliminate
6585 the biv, we must set the value after the loop instead of before. */
6586 else if (bl->final_value && ! bl->reversed)
6587 loop_insn_sink (loop, gen_load_of_final_value (bl->biv->dest_reg,
6588 bl->final_value));
6589 }
6590
6591 /* Go through all the instructions in the loop, making all the
6592 register substitutions scheduled in REG_MAP. */
6593
6594 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
6595 if (INSN_P (p))
6596 {
6597 replace_regs (PATTERN (p), reg_map, reg_map_size, 0);
6598 replace_regs (REG_NOTES (p), reg_map, reg_map_size, 0);
6599 INSN_CODE (p) = -1;
6600 }
6601
6602 if (loop_dump_stream)
6603 fprintf (loop_dump_stream, "\n");
6604
6605 loop_ivs_free (loop);
6606 if (reg_map)
6607 free (reg_map);
6608 }
6609 \f
6610 /*Record all basic induction variables calculated in the insn. */
6611 static rtx
6612 check_insn_for_bivs (struct loop *loop, rtx p, int not_every_iteration,
6613 int maybe_multiple)
6614 {
6615 struct loop_ivs *ivs = LOOP_IVS (loop);
6616 rtx set;
6617 rtx dest_reg;
6618 rtx inc_val;
6619 rtx mult_val;
6620 rtx *location;
6621
6622 if (NONJUMP_INSN_P (p)
6623 && (set = single_set (p))
6624 && REG_P (SET_DEST (set)))
6625 {
6626 dest_reg = SET_DEST (set);
6627 if (REGNO (dest_reg) < max_reg_before_loop
6628 && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER
6629 && REG_IV_TYPE (ivs, REGNO (dest_reg)) != NOT_BASIC_INDUCT)
6630 {
6631 if (basic_induction_var (loop, SET_SRC (set),
6632 GET_MODE (SET_SRC (set)),
6633 dest_reg, p, &inc_val, &mult_val,
6634 &location))
6635 {
6636 /* It is a possible basic induction variable.
6637 Create and initialize an induction structure for it. */
6638
6639 struct induction *v = xmalloc (sizeof (struct induction));
6640
6641 record_biv (loop, v, p, dest_reg, inc_val, mult_val, location,
6642 not_every_iteration, maybe_multiple);
6643 REG_IV_TYPE (ivs, REGNO (dest_reg)) = BASIC_INDUCT;
6644 }
6645 else if (REGNO (dest_reg) < ivs->n_regs)
6646 REG_IV_TYPE (ivs, REGNO (dest_reg)) = NOT_BASIC_INDUCT;
6647 }
6648 }
6649 return p;
6650 }
6651 \f
6652 /* Record all givs calculated in the insn.
6653 A register is a giv if: it is only set once, it is a function of a
6654 biv and a constant (or invariant), and it is not a biv. */
6655 static rtx
6656 check_insn_for_givs (struct loop *loop, rtx p, int not_every_iteration,
6657 int maybe_multiple)
6658 {
6659 struct loop_regs *regs = LOOP_REGS (loop);
6660
6661 rtx set;
6662 /* Look for a general induction variable in a register. */
6663 if (NONJUMP_INSN_P (p)
6664 && (set = single_set (p))
6665 && REG_P (SET_DEST (set))
6666 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
6667 {
6668 rtx src_reg;
6669 rtx dest_reg;
6670 rtx add_val;
6671 rtx mult_val;
6672 rtx ext_val;
6673 int benefit;
6674 rtx regnote = 0;
6675 rtx last_consec_insn;
6676
6677 dest_reg = SET_DEST (set);
6678 if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER)
6679 return p;
6680
6681 if (/* SET_SRC is a giv. */
6682 (general_induction_var (loop, SET_SRC (set), &src_reg, &add_val,
6683 &mult_val, &ext_val, 0, &benefit, VOIDmode)
6684 /* Equivalent expression is a giv. */
6685 || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX))
6686 && general_induction_var (loop, XEXP (regnote, 0), &src_reg,
6687 &add_val, &mult_val, &ext_val, 0,
6688 &benefit, VOIDmode)))
6689 /* Don't try to handle any regs made by loop optimization.
6690 We have nothing on them in regno_first_uid, etc. */
6691 && REGNO (dest_reg) < max_reg_before_loop
6692 /* Don't recognize a BASIC_INDUCT_VAR here. */
6693 && dest_reg != src_reg
6694 /* This must be the only place where the register is set. */
6695 && (regs->array[REGNO (dest_reg)].n_times_set == 1
6696 /* or all sets must be consecutive and make a giv. */
6697 || (benefit = consec_sets_giv (loop, benefit, p,
6698 src_reg, dest_reg,
6699 &add_val, &mult_val, &ext_val,
6700 &last_consec_insn))))
6701 {
6702 struct induction *v = xmalloc (sizeof (struct induction));
6703
6704 /* If this is a library call, increase benefit. */
6705 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
6706 benefit += libcall_benefit (p);
6707
6708 /* Skip the consecutive insns, if there are any. */
6709 if (regs->array[REGNO (dest_reg)].n_times_set != 1)
6710 p = last_consec_insn;
6711
6712 record_giv (loop, v, p, src_reg, dest_reg, mult_val, add_val,
6713 ext_val, benefit, DEST_REG, not_every_iteration,
6714 maybe_multiple, (rtx*) 0);
6715
6716 }
6717 }
6718
6719 /* Look for givs which are memory addresses. */
6720 if (NONJUMP_INSN_P (p))
6721 find_mem_givs (loop, PATTERN (p), p, not_every_iteration,
6722 maybe_multiple);
6723
6724 /* Update the status of whether giv can derive other givs. This can
6725 change when we pass a label or an insn that updates a biv. */
6726 if (INSN_P (p) || LABEL_P (p))
6727 update_giv_derive (loop, p);
6728 return p;
6729 }
6730 \f
6731 /* Return 1 if X is a valid source for an initial value (or as value being
6732 compared against in an initial test).
6733
6734 X must be either a register or constant and must not be clobbered between
6735 the current insn and the start of the loop.
6736
6737 INSN is the insn containing X. */
6738
6739 static int
6740 valid_initial_value_p (rtx x, rtx insn, int call_seen, rtx loop_start)
6741 {
6742 if (CONSTANT_P (x))
6743 return 1;
6744
6745 /* Only consider pseudos we know about initialized in insns whose luids
6746 we know. */
6747 if (!REG_P (x)
6748 || REGNO (x) >= max_reg_before_loop)
6749 return 0;
6750
6751 /* Don't use call-clobbered registers across a call which clobbers it. On
6752 some machines, don't use any hard registers at all. */
6753 if (REGNO (x) < FIRST_PSEUDO_REGISTER
6754 && (SMALL_REGISTER_CLASSES
6755 || (call_seen && call_used_regs[REGNO (x)])))
6756 return 0;
6757
6758 /* Don't use registers that have been clobbered before the start of the
6759 loop. */
6760 if (reg_set_between_p (x, insn, loop_start))
6761 return 0;
6762
6763 return 1;
6764 }
6765 \f
6766 /* Scan X for memory refs and check each memory address
6767 as a possible giv. INSN is the insn whose pattern X comes from.
6768 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
6769 every loop iteration. MAYBE_MULTIPLE is 1 if the insn might be executed
6770 more than once in each loop iteration. */
6771
6772 static void
6773 find_mem_givs (const struct loop *loop, rtx x, rtx insn,
6774 int not_every_iteration, int maybe_multiple)
6775 {
6776 int i, j;
6777 enum rtx_code code;
6778 const char *fmt;
6779
6780 if (x == 0)
6781 return;
6782
6783 code = GET_CODE (x);
6784 switch (code)
6785 {
6786 case REG:
6787 case CONST_INT:
6788 case CONST:
6789 case CONST_DOUBLE:
6790 case SYMBOL_REF:
6791 case LABEL_REF:
6792 case PC:
6793 case CC0:
6794 case ADDR_VEC:
6795 case ADDR_DIFF_VEC:
6796 case USE:
6797 case CLOBBER:
6798 return;
6799
6800 case MEM:
6801 {
6802 rtx src_reg;
6803 rtx add_val;
6804 rtx mult_val;
6805 rtx ext_val;
6806 int benefit;
6807
6808 /* This code used to disable creating GIVs with mult_val == 1 and
6809 add_val == 0. However, this leads to lost optimizations when
6810 it comes time to combine a set of related DEST_ADDR GIVs, since
6811 this one would not be seen. */
6812
6813 if (general_induction_var (loop, XEXP (x, 0), &src_reg, &add_val,
6814 &mult_val, &ext_val, 1, &benefit,
6815 GET_MODE (x)))
6816 {
6817 /* Found one; record it. */
6818 struct induction *v = xmalloc (sizeof (struct induction));
6819
6820 record_giv (loop, v, insn, src_reg, addr_placeholder, mult_val,
6821 add_val, ext_val, benefit, DEST_ADDR,
6822 not_every_iteration, maybe_multiple, &XEXP (x, 0));
6823
6824 v->mem = x;
6825 }
6826 }
6827 return;
6828
6829 default:
6830 break;
6831 }
6832
6833 /* Recursively scan the subexpressions for other mem refs. */
6834
6835 fmt = GET_RTX_FORMAT (code);
6836 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
6837 if (fmt[i] == 'e')
6838 find_mem_givs (loop, XEXP (x, i), insn, not_every_iteration,
6839 maybe_multiple);
6840 else if (fmt[i] == 'E')
6841 for (j = 0; j < XVECLEN (x, i); j++)
6842 find_mem_givs (loop, XVECEXP (x, i, j), insn, not_every_iteration,
6843 maybe_multiple);
6844 }
6845 \f
6846 /* Fill in the data about one biv update.
6847 V is the `struct induction' in which we record the biv. (It is
6848 allocated by the caller, with alloca.)
6849 INSN is the insn that sets it.
6850 DEST_REG is the biv's reg.
6851
6852 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
6853 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
6854 being set to INC_VAL.
6855
6856 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
6857 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
6858 can be executed more than once per iteration. If MAYBE_MULTIPLE
6859 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
6860 executed exactly once per iteration. */
6861
6862 static void
6863 record_biv (struct loop *loop, struct induction *v, rtx insn, rtx dest_reg,
6864 rtx inc_val, rtx mult_val, rtx *location,
6865 int not_every_iteration, int maybe_multiple)
6866 {
6867 struct loop_ivs *ivs = LOOP_IVS (loop);
6868 struct iv_class *bl;
6869
6870 v->insn = insn;
6871 v->src_reg = dest_reg;
6872 v->dest_reg = dest_reg;
6873 v->mult_val = mult_val;
6874 v->add_val = inc_val;
6875 v->ext_dependent = NULL_RTX;
6876 v->location = location;
6877 v->mode = GET_MODE (dest_reg);
6878 v->always_computable = ! not_every_iteration;
6879 v->always_executed = ! not_every_iteration;
6880 v->maybe_multiple = maybe_multiple;
6881 v->same = 0;
6882
6883 /* Add this to the reg's iv_class, creating a class
6884 if this is the first incrementation of the reg. */
6885
6886 bl = REG_IV_CLASS (ivs, REGNO (dest_reg));
6887 if (bl == 0)
6888 {
6889 /* Create and initialize new iv_class. */
6890
6891 bl = xmalloc (sizeof (struct iv_class));
6892
6893 bl->regno = REGNO (dest_reg);
6894 bl->biv = 0;
6895 bl->giv = 0;
6896 bl->biv_count = 0;
6897 bl->giv_count = 0;
6898
6899 /* Set initial value to the reg itself. */
6900 bl->initial_value = dest_reg;
6901 bl->final_value = 0;
6902 /* We haven't seen the initializing insn yet. */
6903 bl->init_insn = 0;
6904 bl->init_set = 0;
6905 bl->initial_test = 0;
6906 bl->incremented = 0;
6907 bl->eliminable = 0;
6908 bl->nonneg = 0;
6909 bl->reversed = 0;
6910 bl->total_benefit = 0;
6911
6912 /* Add this class to ivs->list. */
6913 bl->next = ivs->list;
6914 ivs->list = bl;
6915
6916 /* Put it in the array of biv register classes. */
6917 REG_IV_CLASS (ivs, REGNO (dest_reg)) = bl;
6918 }
6919 else
6920 {
6921 /* Check if location is the same as a previous one. */
6922 struct induction *induction;
6923 for (induction = bl->biv; induction; induction = induction->next_iv)
6924 if (location == induction->location)
6925 {
6926 v->same = induction;
6927 break;
6928 }
6929 }
6930
6931 /* Update IV_CLASS entry for this biv. */
6932 v->next_iv = bl->biv;
6933 bl->biv = v;
6934 bl->biv_count++;
6935 if (mult_val == const1_rtx)
6936 bl->incremented = 1;
6937
6938 if (loop_dump_stream)
6939 loop_biv_dump (v, loop_dump_stream, 0);
6940 }
6941 \f
6942 /* Fill in the data about one giv.
6943 V is the `struct induction' in which we record the giv. (It is
6944 allocated by the caller, with alloca.)
6945 INSN is the insn that sets it.
6946 BENEFIT estimates the savings from deleting this insn.
6947 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
6948 into a register or is used as a memory address.
6949
6950 SRC_REG is the biv reg which the giv is computed from.
6951 DEST_REG is the giv's reg (if the giv is stored in a reg).
6952 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
6953 LOCATION points to the place where this giv's value appears in INSN. */
6954
6955 static void
6956 record_giv (const struct loop *loop, struct induction *v, rtx insn,
6957 rtx src_reg, rtx dest_reg, rtx mult_val, rtx add_val,
6958 rtx ext_val, int benefit, enum g_types type,
6959 int not_every_iteration, int maybe_multiple, rtx *location)
6960 {
6961 struct loop_ivs *ivs = LOOP_IVS (loop);
6962 struct induction *b;
6963 struct iv_class *bl;
6964 rtx set = single_set (insn);
6965 rtx temp;
6966
6967 /* Attempt to prove constantness of the values. Don't let simplify_rtx
6968 undo the MULT canonicalization that we performed earlier. */
6969 temp = simplify_rtx (add_val);
6970 if (temp
6971 && ! (GET_CODE (add_val) == MULT
6972 && GET_CODE (temp) == ASHIFT))
6973 add_val = temp;
6974
6975 v->insn = insn;
6976 v->src_reg = src_reg;
6977 v->giv_type = type;
6978 v->dest_reg = dest_reg;
6979 v->mult_val = mult_val;
6980 v->add_val = add_val;
6981 v->ext_dependent = ext_val;
6982 v->benefit = benefit;
6983 v->location = location;
6984 v->cant_derive = 0;
6985 v->combined_with = 0;
6986 v->maybe_multiple = maybe_multiple;
6987 v->maybe_dead = 0;
6988 v->derive_adjustment = 0;
6989 v->same = 0;
6990 v->ignore = 0;
6991 v->new_reg = 0;
6992 v->final_value = 0;
6993 v->same_insn = 0;
6994 v->auto_inc_opt = 0;
6995 v->shared = 0;
6996
6997 /* The v->always_computable field is used in update_giv_derive, to
6998 determine whether a giv can be used to derive another giv. For a
6999 DEST_REG giv, INSN computes a new value for the giv, so its value
7000 isn't computable if INSN insn't executed every iteration.
7001 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
7002 it does not compute a new value. Hence the value is always computable
7003 regardless of whether INSN is executed each iteration. */
7004
7005 if (type == DEST_ADDR)
7006 v->always_computable = 1;
7007 else
7008 v->always_computable = ! not_every_iteration;
7009
7010 v->always_executed = ! not_every_iteration;
7011
7012 if (type == DEST_ADDR)
7013 {
7014 v->mode = GET_MODE (*location);
7015 v->lifetime = 1;
7016 }
7017 else /* type == DEST_REG */
7018 {
7019 v->mode = GET_MODE (SET_DEST (set));
7020
7021 v->lifetime = LOOP_REG_LIFETIME (loop, REGNO (dest_reg));
7022
7023 /* If the lifetime is zero, it means that this register is
7024 really a dead store. So mark this as a giv that can be
7025 ignored. This will not prevent the biv from being eliminated. */
7026 if (v->lifetime == 0)
7027 v->ignore = 1;
7028
7029 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
7030 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
7031 }
7032
7033 /* Add the giv to the class of givs computed from one biv. */
7034
7035 bl = REG_IV_CLASS (ivs, REGNO (src_reg));
7036 gcc_assert (bl);
7037 v->next_iv = bl->giv;
7038 bl->giv = v;
7039
7040 /* Don't count DEST_ADDR. This is supposed to count the number of
7041 insns that calculate givs. */
7042 if (type == DEST_REG)
7043 bl->giv_count++;
7044 bl->total_benefit += benefit;
7045
7046 if (type == DEST_ADDR)
7047 {
7048 v->replaceable = 1;
7049 v->not_replaceable = 0;
7050 }
7051 else
7052 {
7053 /* The giv can be replaced outright by the reduced register only if all
7054 of the following conditions are true:
7055 - the insn that sets the giv is always executed on any iteration
7056 on which the giv is used at all
7057 (there are two ways to deduce this:
7058 either the insn is executed on every iteration,
7059 or all uses follow that insn in the same basic block),
7060 - the giv is not used outside the loop
7061 - no assignments to the biv occur during the giv's lifetime. */
7062
7063 if (REGNO_FIRST_UID (REGNO (dest_reg)) == INSN_UID (insn)
7064 /* Previous line always fails if INSN was moved by loop opt. */
7065 && REGNO_LAST_LUID (REGNO (dest_reg))
7066 < INSN_LUID (loop->end)
7067 && (! not_every_iteration
7068 || last_use_this_basic_block (dest_reg, insn)))
7069 {
7070 /* Now check that there are no assignments to the biv within the
7071 giv's lifetime. This requires two separate checks. */
7072
7073 /* Check each biv update, and fail if any are between the first
7074 and last use of the giv.
7075
7076 If this loop contains an inner loop that was unrolled, then
7077 the insn modifying the biv may have been emitted by the loop
7078 unrolling code, and hence does not have a valid luid. Just
7079 mark the biv as not replaceable in this case. It is not very
7080 useful as a biv, because it is used in two different loops.
7081 It is very unlikely that we would be able to optimize the giv
7082 using this biv anyways. */
7083
7084 v->replaceable = 1;
7085 v->not_replaceable = 0;
7086 for (b = bl->biv; b; b = b->next_iv)
7087 {
7088 if (INSN_UID (b->insn) >= max_uid_for_loop
7089 || ((INSN_LUID (b->insn)
7090 >= REGNO_FIRST_LUID (REGNO (dest_reg)))
7091 && (INSN_LUID (b->insn)
7092 <= REGNO_LAST_LUID (REGNO (dest_reg)))))
7093 {
7094 v->replaceable = 0;
7095 v->not_replaceable = 1;
7096 break;
7097 }
7098 }
7099
7100 /* If there are any backwards branches that go from after the
7101 biv update to before it, then this giv is not replaceable. */
7102 if (v->replaceable)
7103 for (b = bl->biv; b; b = b->next_iv)
7104 if (back_branch_in_range_p (loop, b->insn))
7105 {
7106 v->replaceable = 0;
7107 v->not_replaceable = 1;
7108 break;
7109 }
7110 }
7111 else
7112 {
7113 /* May still be replaceable, we don't have enough info here to
7114 decide. */
7115 v->replaceable = 0;
7116 v->not_replaceable = 0;
7117 }
7118 }
7119
7120 /* Record whether the add_val contains a const_int, for later use by
7121 combine_givs. */
7122 {
7123 rtx tem = add_val;
7124
7125 v->no_const_addval = 1;
7126 if (tem == const0_rtx)
7127 ;
7128 else if (CONSTANT_P (add_val))
7129 v->no_const_addval = 0;
7130 if (GET_CODE (tem) == PLUS)
7131 {
7132 while (1)
7133 {
7134 if (GET_CODE (XEXP (tem, 0)) == PLUS)
7135 tem = XEXP (tem, 0);
7136 else if (GET_CODE (XEXP (tem, 1)) == PLUS)
7137 tem = XEXP (tem, 1);
7138 else
7139 break;
7140 }
7141 if (CONSTANT_P (XEXP (tem, 1)))
7142 v->no_const_addval = 0;
7143 }
7144 }
7145
7146 if (loop_dump_stream)
7147 loop_giv_dump (v, loop_dump_stream, 0);
7148 }
7149
7150 /* Try to calculate the final value of the giv, the value it will have at
7151 the end of the loop. If we can do it, return that value. */
7152
7153 static rtx
7154 final_giv_value (const struct loop *loop, struct induction *v)
7155 {
7156 struct loop_ivs *ivs = LOOP_IVS (loop);
7157 struct iv_class *bl;
7158 rtx insn;
7159 rtx increment, tem;
7160 rtx seq;
7161 rtx loop_end = loop->end;
7162 unsigned HOST_WIDE_INT n_iterations = LOOP_INFO (loop)->n_iterations;
7163
7164 bl = REG_IV_CLASS (ivs, REGNO (v->src_reg));
7165
7166 /* The final value for givs which depend on reversed bivs must be calculated
7167 differently than for ordinary givs. In this case, there is already an
7168 insn after the loop which sets this giv's final value (if necessary),
7169 and there are no other loop exits, so we can return any value. */
7170 if (bl->reversed)
7171 {
7172 if (loop_dump_stream)
7173 fprintf (loop_dump_stream,
7174 "Final giv value for %d, depends on reversed biv\n",
7175 REGNO (v->dest_reg));
7176 return const0_rtx;
7177 }
7178
7179 /* Try to calculate the final value as a function of the biv it depends
7180 upon. The only exit from the loop must be the fall through at the bottom
7181 and the insn that sets the giv must be executed on every iteration
7182 (otherwise the giv may not have its final value when the loop exits). */
7183
7184 /* ??? Can calculate the final giv value by subtracting off the
7185 extra biv increments times the giv's mult_val. The loop must have
7186 only one exit for this to work, but the loop iterations does not need
7187 to be known. */
7188
7189 if (n_iterations != 0
7190 && ! loop->exit_count
7191 && v->always_executed)
7192 {
7193 /* ?? It is tempting to use the biv's value here since these insns will
7194 be put after the loop, and hence the biv will have its final value
7195 then. However, this fails if the biv is subsequently eliminated.
7196 Perhaps determine whether biv's are eliminable before trying to
7197 determine whether giv's are replaceable so that we can use the
7198 biv value here if it is not eliminable. */
7199
7200 /* We are emitting code after the end of the loop, so we must make
7201 sure that bl->initial_value is still valid then. It will still
7202 be valid if it is invariant. */
7203
7204 increment = biv_total_increment (bl);
7205
7206 if (increment && loop_invariant_p (loop, increment)
7207 && loop_invariant_p (loop, bl->initial_value))
7208 {
7209 /* Can calculate the loop exit value of its biv as
7210 (n_iterations * increment) + initial_value */
7211
7212 /* The loop exit value of the giv is then
7213 (final_biv_value - extra increments) * mult_val + add_val.
7214 The extra increments are any increments to the biv which
7215 occur in the loop after the giv's value is calculated.
7216 We must search from the insn that sets the giv to the end
7217 of the loop to calculate this value. */
7218
7219 /* Put the final biv value in tem. */
7220 tem = gen_reg_rtx (v->mode);
7221 record_base_value (REGNO (tem), bl->biv->add_val, 0);
7222 loop_iv_add_mult_sink (loop, extend_value_for_giv (v, increment),
7223 GEN_INT (n_iterations),
7224 extend_value_for_giv (v, bl->initial_value),
7225 tem);
7226
7227 /* Subtract off extra increments as we find them. */
7228 for (insn = NEXT_INSN (v->insn); insn != loop_end;
7229 insn = NEXT_INSN (insn))
7230 {
7231 struct induction *biv;
7232
7233 for (biv = bl->biv; biv; biv = biv->next_iv)
7234 if (biv->insn == insn)
7235 {
7236 start_sequence ();
7237 tem = expand_simple_binop (GET_MODE (tem), MINUS, tem,
7238 biv->add_val, NULL_RTX, 0,
7239 OPTAB_LIB_WIDEN);
7240 seq = get_insns ();
7241 end_sequence ();
7242 loop_insn_sink (loop, seq);
7243 }
7244 }
7245
7246 /* Now calculate the giv's final value. */
7247 loop_iv_add_mult_sink (loop, tem, v->mult_val, v->add_val, tem);
7248
7249 if (loop_dump_stream)
7250 fprintf (loop_dump_stream,
7251 "Final giv value for %d, calc from biv's value.\n",
7252 REGNO (v->dest_reg));
7253
7254 return tem;
7255 }
7256 }
7257
7258 /* Replaceable giv's should never reach here. */
7259 gcc_assert (!v->replaceable);
7260
7261 /* Check to see if the biv is dead at all loop exits. */
7262 if (reg_dead_after_loop (loop, v->dest_reg))
7263 {
7264 if (loop_dump_stream)
7265 fprintf (loop_dump_stream,
7266 "Final giv value for %d, giv dead after loop exit.\n",
7267 REGNO (v->dest_reg));
7268
7269 return const0_rtx;
7270 }
7271
7272 return 0;
7273 }
7274
7275 /* All this does is determine whether a giv can be made replaceable because
7276 its final value can be calculated. This code can not be part of record_giv
7277 above, because final_giv_value requires that the number of loop iterations
7278 be known, and that can not be accurately calculated until after all givs
7279 have been identified. */
7280
7281 static void
7282 check_final_value (const struct loop *loop, struct induction *v)
7283 {
7284 rtx final_value = 0;
7285
7286 /* DEST_ADDR givs will never reach here, because they are always marked
7287 replaceable above in record_giv. */
7288
7289 /* The giv can be replaced outright by the reduced register only if all
7290 of the following conditions are true:
7291 - the insn that sets the giv is always executed on any iteration
7292 on which the giv is used at all
7293 (there are two ways to deduce this:
7294 either the insn is executed on every iteration,
7295 or all uses follow that insn in the same basic block),
7296 - its final value can be calculated (this condition is different
7297 than the one above in record_giv)
7298 - it's not used before the it's set
7299 - no assignments to the biv occur during the giv's lifetime. */
7300
7301 #if 0
7302 /* This is only called now when replaceable is known to be false. */
7303 /* Clear replaceable, so that it won't confuse final_giv_value. */
7304 v->replaceable = 0;
7305 #endif
7306
7307 if ((final_value = final_giv_value (loop, v))
7308 && (v->always_executed
7309 || last_use_this_basic_block (v->dest_reg, v->insn)))
7310 {
7311 int biv_increment_seen = 0, before_giv_insn = 0;
7312 rtx p = v->insn;
7313 rtx last_giv_use;
7314
7315 v->replaceable = 1;
7316 v->not_replaceable = 0;
7317
7318 /* When trying to determine whether or not a biv increment occurs
7319 during the lifetime of the giv, we can ignore uses of the variable
7320 outside the loop because final_value is true. Hence we can not
7321 use regno_last_uid and regno_first_uid as above in record_giv. */
7322
7323 /* Search the loop to determine whether any assignments to the
7324 biv occur during the giv's lifetime. Start with the insn
7325 that sets the giv, and search around the loop until we come
7326 back to that insn again.
7327
7328 Also fail if there is a jump within the giv's lifetime that jumps
7329 to somewhere outside the lifetime but still within the loop. This
7330 catches spaghetti code where the execution order is not linear, and
7331 hence the above test fails. Here we assume that the giv lifetime
7332 does not extend from one iteration of the loop to the next, so as
7333 to make the test easier. Since the lifetime isn't known yet,
7334 this requires two loops. See also record_giv above. */
7335
7336 last_giv_use = v->insn;
7337
7338 while (1)
7339 {
7340 p = NEXT_INSN (p);
7341 if (p == loop->end)
7342 {
7343 before_giv_insn = 1;
7344 p = NEXT_INSN (loop->start);
7345 }
7346 if (p == v->insn)
7347 break;
7348
7349 if (INSN_P (p))
7350 {
7351 /* It is possible for the BIV increment to use the GIV if we
7352 have a cycle. Thus we must be sure to check each insn for
7353 both BIV and GIV uses, and we must check for BIV uses
7354 first. */
7355
7356 if (! biv_increment_seen
7357 && reg_set_p (v->src_reg, PATTERN (p)))
7358 biv_increment_seen = 1;
7359
7360 if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
7361 {
7362 if (biv_increment_seen || before_giv_insn)
7363 {
7364 v->replaceable = 0;
7365 v->not_replaceable = 1;
7366 break;
7367 }
7368 last_giv_use = p;
7369 }
7370 }
7371 }
7372
7373 /* Now that the lifetime of the giv is known, check for branches
7374 from within the lifetime to outside the lifetime if it is still
7375 replaceable. */
7376
7377 if (v->replaceable)
7378 {
7379 p = v->insn;
7380 while (1)
7381 {
7382 p = NEXT_INSN (p);
7383 if (p == loop->end)
7384 p = NEXT_INSN (loop->start);
7385 if (p == last_giv_use)
7386 break;
7387
7388 if (JUMP_P (p) && JUMP_LABEL (p)
7389 && LABEL_NAME (JUMP_LABEL (p))
7390 && ((loop_insn_first_p (JUMP_LABEL (p), v->insn)
7391 && loop_insn_first_p (loop->start, JUMP_LABEL (p)))
7392 || (loop_insn_first_p (last_giv_use, JUMP_LABEL (p))
7393 && loop_insn_first_p (JUMP_LABEL (p), loop->end))))
7394 {
7395 v->replaceable = 0;
7396 v->not_replaceable = 1;
7397
7398 if (loop_dump_stream)
7399 fprintf (loop_dump_stream,
7400 "Found branch outside giv lifetime.\n");
7401
7402 break;
7403 }
7404 }
7405 }
7406
7407 /* If it is replaceable, then save the final value. */
7408 if (v->replaceable)
7409 v->final_value = final_value;
7410 }
7411
7412 if (loop_dump_stream && v->replaceable)
7413 fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n",
7414 INSN_UID (v->insn), REGNO (v->dest_reg));
7415 }
7416 \f
7417 /* Update the status of whether a giv can derive other givs.
7418
7419 We need to do something special if there is or may be an update to the biv
7420 between the time the giv is defined and the time it is used to derive
7421 another giv.
7422
7423 In addition, a giv that is only conditionally set is not allowed to
7424 derive another giv once a label has been passed.
7425
7426 The cases we look at are when a label or an update to a biv is passed. */
7427
7428 static void
7429 update_giv_derive (const struct loop *loop, rtx p)
7430 {
7431 struct loop_ivs *ivs = LOOP_IVS (loop);
7432 struct iv_class *bl;
7433 struct induction *biv, *giv;
7434 rtx tem;
7435 int dummy;
7436
7437 /* Search all IV classes, then all bivs, and finally all givs.
7438
7439 There are three cases we are concerned with. First we have the situation
7440 of a giv that is only updated conditionally. In that case, it may not
7441 derive any givs after a label is passed.
7442
7443 The second case is when a biv update occurs, or may occur, after the
7444 definition of a giv. For certain biv updates (see below) that are
7445 known to occur between the giv definition and use, we can adjust the
7446 giv definition. For others, or when the biv update is conditional,
7447 we must prevent the giv from deriving any other givs. There are two
7448 sub-cases within this case.
7449
7450 If this is a label, we are concerned with any biv update that is done
7451 conditionally, since it may be done after the giv is defined followed by
7452 a branch here (actually, we need to pass both a jump and a label, but
7453 this extra tracking doesn't seem worth it).
7454
7455 If this is a jump, we are concerned about any biv update that may be
7456 executed multiple times. We are actually only concerned about
7457 backward jumps, but it is probably not worth performing the test
7458 on the jump again here.
7459
7460 If this is a biv update, we must adjust the giv status to show that a
7461 subsequent biv update was performed. If this adjustment cannot be done,
7462 the giv cannot derive further givs. */
7463
7464 for (bl = ivs->list; bl; bl = bl->next)
7465 for (biv = bl->biv; biv; biv = biv->next_iv)
7466 if (LABEL_P (p) || JUMP_P (p)
7467 || biv->insn == p)
7468 {
7469 /* Skip if location is the same as a previous one. */
7470 if (biv->same)
7471 continue;
7472
7473 for (giv = bl->giv; giv; giv = giv->next_iv)
7474 {
7475 /* If cant_derive is already true, there is no point in
7476 checking all of these conditions again. */
7477 if (giv->cant_derive)
7478 continue;
7479
7480 /* If this giv is conditionally set and we have passed a label,
7481 it cannot derive anything. */
7482 if (LABEL_P (p) && ! giv->always_computable)
7483 giv->cant_derive = 1;
7484
7485 /* Skip givs that have mult_val == 0, since
7486 they are really invariants. Also skip those that are
7487 replaceable, since we know their lifetime doesn't contain
7488 any biv update. */
7489 else if (giv->mult_val == const0_rtx || giv->replaceable)
7490 continue;
7491
7492 /* The only way we can allow this giv to derive another
7493 is if this is a biv increment and we can form the product
7494 of biv->add_val and giv->mult_val. In this case, we will
7495 be able to compute a compensation. */
7496 else if (biv->insn == p)
7497 {
7498 rtx ext_val_dummy;
7499
7500 tem = 0;
7501 if (biv->mult_val == const1_rtx)
7502 tem = simplify_giv_expr (loop,
7503 gen_rtx_MULT (giv->mode,
7504 biv->add_val,
7505 giv->mult_val),
7506 &ext_val_dummy, &dummy);
7507
7508 if (tem && giv->derive_adjustment)
7509 tem = simplify_giv_expr
7510 (loop,
7511 gen_rtx_PLUS (giv->mode, tem, giv->derive_adjustment),
7512 &ext_val_dummy, &dummy);
7513
7514 if (tem)
7515 giv->derive_adjustment = tem;
7516 else
7517 giv->cant_derive = 1;
7518 }
7519 else if ((LABEL_P (p) && ! biv->always_computable)
7520 || (JUMP_P (p) && biv->maybe_multiple))
7521 giv->cant_derive = 1;
7522 }
7523 }
7524 }
7525 \f
7526 /* Check whether an insn is an increment legitimate for a basic induction var.
7527 X is the source of insn P, or a part of it.
7528 MODE is the mode in which X should be interpreted.
7529
7530 DEST_REG is the putative biv, also the destination of the insn.
7531 We accept patterns of these forms:
7532 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
7533 REG = INVARIANT + REG
7534
7535 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
7536 store the additive term into *INC_VAL, and store the place where
7537 we found the additive term into *LOCATION.
7538
7539 If X is an assignment of an invariant into DEST_REG, we set
7540 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
7541
7542 We also want to detect a BIV when it corresponds to a variable
7543 whose mode was promoted. In that case, an increment
7544 of the variable may be a PLUS that adds a SUBREG of that variable to
7545 an invariant and then sign- or zero-extends the result of the PLUS
7546 into the variable.
7547
7548 Most GIVs in such cases will be in the promoted mode, since that is the
7549 probably the natural computation mode (and almost certainly the mode
7550 used for addresses) on the machine. So we view the pseudo-reg containing
7551 the variable as the BIV, as if it were simply incremented.
7552
7553 Note that treating the entire pseudo as a BIV will result in making
7554 simple increments to any GIVs based on it. However, if the variable
7555 overflows in its declared mode but not its promoted mode, the result will
7556 be incorrect. This is acceptable if the variable is signed, since
7557 overflows in such cases are undefined, but not if it is unsigned, since
7558 those overflows are defined. So we only check for SIGN_EXTEND and
7559 not ZERO_EXTEND.
7560
7561 If we cannot find a biv, we return 0. */
7562
7563 static int
7564 basic_induction_var (const struct loop *loop, rtx x, enum machine_mode mode,
7565 rtx dest_reg, rtx p, rtx *inc_val, rtx *mult_val,
7566 rtx **location)
7567 {
7568 enum rtx_code code;
7569 rtx *argp, arg;
7570 rtx insn, set = 0, last, inc;
7571
7572 code = GET_CODE (x);
7573 *location = NULL;
7574 switch (code)
7575 {
7576 case PLUS:
7577 if (rtx_equal_p (XEXP (x, 0), dest_reg)
7578 || (GET_CODE (XEXP (x, 0)) == SUBREG
7579 && SUBREG_PROMOTED_VAR_P (XEXP (x, 0))
7580 && SUBREG_REG (XEXP (x, 0)) == dest_reg))
7581 {
7582 argp = &XEXP (x, 1);
7583 }
7584 else if (rtx_equal_p (XEXP (x, 1), dest_reg)
7585 || (GET_CODE (XEXP (x, 1)) == SUBREG
7586 && SUBREG_PROMOTED_VAR_P (XEXP (x, 1))
7587 && SUBREG_REG (XEXP (x, 1)) == dest_reg))
7588 {
7589 argp = &XEXP (x, 0);
7590 }
7591 else
7592 return 0;
7593
7594 arg = *argp;
7595 if (loop_invariant_p (loop, arg) != 1)
7596 return 0;
7597
7598 /* convert_modes can emit new instructions, e.g. when arg is a loop
7599 invariant MEM and dest_reg has a different mode.
7600 These instructions would be emitted after the end of the function
7601 and then *inc_val would be an uninitialized pseudo.
7602 Detect this and bail in this case.
7603 Other alternatives to solve this can be introducing a convert_modes
7604 variant which is allowed to fail but not allowed to emit new
7605 instructions, emit these instructions before loop start and let
7606 it be garbage collected if *inc_val is never used or saving the
7607 *inc_val initialization sequence generated here and when *inc_val
7608 is going to be actually used, emit it at some suitable place. */
7609 last = get_last_insn ();
7610 inc = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
7611 if (get_last_insn () != last)
7612 {
7613 delete_insns_since (last);
7614 return 0;
7615 }
7616
7617 *inc_val = inc;
7618 *mult_val = const1_rtx;
7619 *location = argp;
7620 return 1;
7621
7622 case SUBREG:
7623 /* If what's inside the SUBREG is a BIV, then the SUBREG. This will
7624 handle addition of promoted variables.
7625 ??? The comment at the start of this function is wrong: promoted
7626 variable increments don't look like it says they do. */
7627 return basic_induction_var (loop, SUBREG_REG (x),
7628 GET_MODE (SUBREG_REG (x)),
7629 dest_reg, p, inc_val, mult_val, location);
7630
7631 case REG:
7632 /* If this register is assigned in a previous insn, look at its
7633 source, but don't go outside the loop or past a label. */
7634
7635 /* If this sets a register to itself, we would repeat any previous
7636 biv increment if we applied this strategy blindly. */
7637 if (rtx_equal_p (dest_reg, x))
7638 return 0;
7639
7640 insn = p;
7641 while (1)
7642 {
7643 rtx dest;
7644 do
7645 {
7646 insn = PREV_INSN (insn);
7647 }
7648 while (insn && NOTE_P (insn)
7649 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
7650
7651 if (!insn)
7652 break;
7653 set = single_set (insn);
7654 if (set == 0)
7655 break;
7656 dest = SET_DEST (set);
7657 if (dest == x
7658 || (GET_CODE (dest) == SUBREG
7659 && (GET_MODE_SIZE (GET_MODE (dest)) <= UNITS_PER_WORD)
7660 && (GET_MODE_CLASS (GET_MODE (dest)) == MODE_INT)
7661 && SUBREG_REG (dest) == x))
7662 return basic_induction_var (loop, SET_SRC (set),
7663 (GET_MODE (SET_SRC (set)) == VOIDmode
7664 ? GET_MODE (x)
7665 : GET_MODE (SET_SRC (set))),
7666 dest_reg, insn,
7667 inc_val, mult_val, location);
7668
7669 while (GET_CODE (dest) == SUBREG
7670 || GET_CODE (dest) == ZERO_EXTRACT
7671 || GET_CODE (dest) == STRICT_LOW_PART)
7672 dest = XEXP (dest, 0);
7673 if (dest == x)
7674 break;
7675 }
7676 /* Fall through. */
7677
7678 /* Can accept constant setting of biv only when inside inner most loop.
7679 Otherwise, a biv of an inner loop may be incorrectly recognized
7680 as a biv of the outer loop,
7681 causing code to be moved INTO the inner loop. */
7682 case MEM:
7683 if (loop_invariant_p (loop, x) != 1)
7684 return 0;
7685 case CONST_INT:
7686 case SYMBOL_REF:
7687 case CONST:
7688 /* convert_modes dies if we try to convert to or from CCmode, so just
7689 exclude that case. It is very unlikely that a condition code value
7690 would be a useful iterator anyways. convert_modes dies if we try to
7691 convert a float mode to non-float or vice versa too. */
7692 if (loop->level == 1
7693 && GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (dest_reg))
7694 && GET_MODE_CLASS (mode) != MODE_CC)
7695 {
7696 /* Possible bug here? Perhaps we don't know the mode of X. */
7697 last = get_last_insn ();
7698 inc = convert_modes (GET_MODE (dest_reg), mode, x, 0);
7699 if (get_last_insn () != last)
7700 {
7701 delete_insns_since (last);
7702 return 0;
7703 }
7704
7705 *inc_val = inc;
7706 *mult_val = const0_rtx;
7707 return 1;
7708 }
7709 else
7710 return 0;
7711
7712 case SIGN_EXTEND:
7713 /* Ignore this BIV if signed arithmetic overflow is defined. */
7714 if (flag_wrapv)
7715 return 0;
7716 return basic_induction_var (loop, XEXP (x, 0), GET_MODE (XEXP (x, 0)),
7717 dest_reg, p, inc_val, mult_val, location);
7718
7719 case ASHIFTRT:
7720 /* Similar, since this can be a sign extension. */
7721 for (insn = PREV_INSN (p);
7722 (insn && NOTE_P (insn)
7723 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
7724 insn = PREV_INSN (insn))
7725 ;
7726
7727 if (insn)
7728 set = single_set (insn);
7729
7730 if (! rtx_equal_p (dest_reg, XEXP (x, 0))
7731 && set && SET_DEST (set) == XEXP (x, 0)
7732 && GET_CODE (XEXP (x, 1)) == CONST_INT
7733 && INTVAL (XEXP (x, 1)) >= 0
7734 && GET_CODE (SET_SRC (set)) == ASHIFT
7735 && XEXP (x, 1) == XEXP (SET_SRC (set), 1))
7736 return basic_induction_var (loop, XEXP (SET_SRC (set), 0),
7737 GET_MODE (XEXP (x, 0)),
7738 dest_reg, insn, inc_val, mult_val,
7739 location);
7740 return 0;
7741
7742 default:
7743 return 0;
7744 }
7745 }
7746 \f
7747 /* A general induction variable (giv) is any quantity that is a linear
7748 function of a basic induction variable,
7749 i.e. giv = biv * mult_val + add_val.
7750 The coefficients can be any loop invariant quantity.
7751 A giv need not be computed directly from the biv;
7752 it can be computed by way of other givs. */
7753
7754 /* Determine whether X computes a giv.
7755 If it does, return a nonzero value
7756 which is the benefit from eliminating the computation of X;
7757 set *SRC_REG to the register of the biv that it is computed from;
7758 set *ADD_VAL and *MULT_VAL to the coefficients,
7759 such that the value of X is biv * mult + add; */
7760
7761 static int
7762 general_induction_var (const struct loop *loop, rtx x, rtx *src_reg,
7763 rtx *add_val, rtx *mult_val, rtx *ext_val,
7764 int is_addr, int *pbenefit,
7765 enum machine_mode addr_mode)
7766 {
7767 struct loop_ivs *ivs = LOOP_IVS (loop);
7768 rtx orig_x = x;
7769
7770 /* If this is an invariant, forget it, it isn't a giv. */
7771 if (loop_invariant_p (loop, x) == 1)
7772 return 0;
7773
7774 *pbenefit = 0;
7775 *ext_val = NULL_RTX;
7776 x = simplify_giv_expr (loop, x, ext_val, pbenefit);
7777 if (x == 0)
7778 return 0;
7779
7780 switch (GET_CODE (x))
7781 {
7782 case USE:
7783 case CONST_INT:
7784 /* Since this is now an invariant and wasn't before, it must be a giv
7785 with MULT_VAL == 0. It doesn't matter which BIV we associate this
7786 with. */
7787 *src_reg = ivs->list->biv->dest_reg;
7788 *mult_val = const0_rtx;
7789 *add_val = x;
7790 break;
7791
7792 case REG:
7793 /* This is equivalent to a BIV. */
7794 *src_reg = x;
7795 *mult_val = const1_rtx;
7796 *add_val = const0_rtx;
7797 break;
7798
7799 case PLUS:
7800 /* Either (plus (biv) (invar)) or
7801 (plus (mult (biv) (invar_1)) (invar_2)). */
7802 if (GET_CODE (XEXP (x, 0)) == MULT)
7803 {
7804 *src_reg = XEXP (XEXP (x, 0), 0);
7805 *mult_val = XEXP (XEXP (x, 0), 1);
7806 }
7807 else
7808 {
7809 *src_reg = XEXP (x, 0);
7810 *mult_val = const1_rtx;
7811 }
7812 *add_val = XEXP (x, 1);
7813 break;
7814
7815 case MULT:
7816 /* ADD_VAL is zero. */
7817 *src_reg = XEXP (x, 0);
7818 *mult_val = XEXP (x, 1);
7819 *add_val = const0_rtx;
7820 break;
7821
7822 default:
7823 gcc_unreachable ();
7824 }
7825
7826 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
7827 unless they are CONST_INT). */
7828 if (GET_CODE (*add_val) == USE)
7829 *add_val = XEXP (*add_val, 0);
7830 if (GET_CODE (*mult_val) == USE)
7831 *mult_val = XEXP (*mult_val, 0);
7832
7833 if (is_addr)
7834 *pbenefit += address_cost (orig_x, addr_mode) - reg_address_cost;
7835 else
7836 *pbenefit += rtx_cost (orig_x, SET);
7837
7838 /* Always return true if this is a giv so it will be detected as such,
7839 even if the benefit is zero or negative. This allows elimination
7840 of bivs that might otherwise not be eliminated. */
7841 return 1;
7842 }
7843 \f
7844 /* Given an expression, X, try to form it as a linear function of a biv.
7845 We will canonicalize it to be of the form
7846 (plus (mult (BIV) (invar_1))
7847 (invar_2))
7848 with possible degeneracies.
7849
7850 The invariant expressions must each be of a form that can be used as a
7851 machine operand. We surround then with a USE rtx (a hack, but localized
7852 and certainly unambiguous!) if not a CONST_INT for simplicity in this
7853 routine; it is the caller's responsibility to strip them.
7854
7855 If no such canonicalization is possible (i.e., two biv's are used or an
7856 expression that is neither invariant nor a biv or giv), this routine
7857 returns 0.
7858
7859 For a nonzero return, the result will have a code of CONST_INT, USE,
7860 REG (for a BIV), PLUS, or MULT. No other codes will occur.
7861
7862 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
7863
7864 static rtx sge_plus (enum machine_mode, rtx, rtx);
7865 static rtx sge_plus_constant (rtx, rtx);
7866
7867 static rtx
7868 simplify_giv_expr (const struct loop *loop, rtx x, rtx *ext_val, int *benefit)
7869 {
7870 struct loop_ivs *ivs = LOOP_IVS (loop);
7871 struct loop_regs *regs = LOOP_REGS (loop);
7872 enum machine_mode mode = GET_MODE (x);
7873 rtx arg0, arg1;
7874 rtx tem;
7875
7876 /* If this is not an integer mode, or if we cannot do arithmetic in this
7877 mode, this can't be a giv. */
7878 if (mode != VOIDmode
7879 && (GET_MODE_CLASS (mode) != MODE_INT
7880 || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT))
7881 return NULL_RTX;
7882
7883 switch (GET_CODE (x))
7884 {
7885 case PLUS:
7886 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
7887 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
7888 if (arg0 == 0 || arg1 == 0)
7889 return NULL_RTX;
7890
7891 /* Put constant last, CONST_INT last if both constant. */
7892 if ((GET_CODE (arg0) == USE
7893 || GET_CODE (arg0) == CONST_INT)
7894 && ! ((GET_CODE (arg0) == USE
7895 && GET_CODE (arg1) == USE)
7896 || GET_CODE (arg1) == CONST_INT))
7897 tem = arg0, arg0 = arg1, arg1 = tem;
7898
7899 /* Handle addition of zero, then addition of an invariant. */
7900 if (arg1 == const0_rtx)
7901 return arg0;
7902 else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE)
7903 switch (GET_CODE (arg0))
7904 {
7905 case CONST_INT:
7906 case USE:
7907 /* Adding two invariants must result in an invariant, so enclose
7908 addition operation inside a USE and return it. */
7909 if (GET_CODE (arg0) == USE)
7910 arg0 = XEXP (arg0, 0);
7911 if (GET_CODE (arg1) == USE)
7912 arg1 = XEXP (arg1, 0);
7913
7914 if (GET_CODE (arg0) == CONST_INT)
7915 tem = arg0, arg0 = arg1, arg1 = tem;
7916 if (GET_CODE (arg1) == CONST_INT)
7917 tem = sge_plus_constant (arg0, arg1);
7918 else
7919 tem = sge_plus (mode, arg0, arg1);
7920
7921 if (GET_CODE (tem) != CONST_INT)
7922 tem = gen_rtx_USE (mode, tem);
7923 return tem;
7924
7925 case REG:
7926 case MULT:
7927 /* biv + invar or mult + invar. Return sum. */
7928 return gen_rtx_PLUS (mode, arg0, arg1);
7929
7930 case PLUS:
7931 /* (a + invar_1) + invar_2. Associate. */
7932 return
7933 simplify_giv_expr (loop,
7934 gen_rtx_PLUS (mode,
7935 XEXP (arg0, 0),
7936 gen_rtx_PLUS (mode,
7937 XEXP (arg0, 1),
7938 arg1)),
7939 ext_val, benefit);
7940
7941 default:
7942 gcc_unreachable ();
7943 }
7944
7945 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
7946 MULT to reduce cases. */
7947 if (REG_P (arg0))
7948 arg0 = gen_rtx_MULT (mode, arg0, const1_rtx);
7949 if (REG_P (arg1))
7950 arg1 = gen_rtx_MULT (mode, arg1, const1_rtx);
7951
7952 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
7953 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
7954 Recurse to associate the second PLUS. */
7955 if (GET_CODE (arg1) == MULT)
7956 tem = arg0, arg0 = arg1, arg1 = tem;
7957
7958 if (GET_CODE (arg1) == PLUS)
7959 return
7960 simplify_giv_expr (loop,
7961 gen_rtx_PLUS (mode,
7962 gen_rtx_PLUS (mode, arg0,
7963 XEXP (arg1, 0)),
7964 XEXP (arg1, 1)),
7965 ext_val, benefit);
7966
7967 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
7968 if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT)
7969 return NULL_RTX;
7970
7971 if (!rtx_equal_p (arg0, arg1))
7972 return NULL_RTX;
7973
7974 return simplify_giv_expr (loop,
7975 gen_rtx_MULT (mode,
7976 XEXP (arg0, 0),
7977 gen_rtx_PLUS (mode,
7978 XEXP (arg0, 1),
7979 XEXP (arg1, 1))),
7980 ext_val, benefit);
7981
7982 case MINUS:
7983 /* Handle "a - b" as "a + b * (-1)". */
7984 return simplify_giv_expr (loop,
7985 gen_rtx_PLUS (mode,
7986 XEXP (x, 0),
7987 gen_rtx_MULT (mode,
7988 XEXP (x, 1),
7989 constm1_rtx)),
7990 ext_val, benefit);
7991
7992 case MULT:
7993 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
7994 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
7995 if (arg0 == 0 || arg1 == 0)
7996 return NULL_RTX;
7997
7998 /* Put constant last, CONST_INT last if both constant. */
7999 if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT)
8000 && GET_CODE (arg1) != CONST_INT)
8001 tem = arg0, arg0 = arg1, arg1 = tem;
8002
8003 /* If second argument is not now constant, not giv. */
8004 if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT)
8005 return NULL_RTX;
8006
8007 /* Handle multiply by 0 or 1. */
8008 if (arg1 == const0_rtx)
8009 return const0_rtx;
8010
8011 else if (arg1 == const1_rtx)
8012 return arg0;
8013
8014 switch (GET_CODE (arg0))
8015 {
8016 case REG:
8017 /* biv * invar. Done. */
8018 return gen_rtx_MULT (mode, arg0, arg1);
8019
8020 case CONST_INT:
8021 /* Product of two constants. */
8022 return GEN_INT (INTVAL (arg0) * INTVAL (arg1));
8023
8024 case USE:
8025 /* invar * invar is a giv, but attempt to simplify it somehow. */
8026 if (GET_CODE (arg1) != CONST_INT)
8027 return NULL_RTX;
8028
8029 arg0 = XEXP (arg0, 0);
8030 if (GET_CODE (arg0) == MULT)
8031 {
8032 /* (invar_0 * invar_1) * invar_2. Associate. */
8033 return simplify_giv_expr (loop,
8034 gen_rtx_MULT (mode,
8035 XEXP (arg0, 0),
8036 gen_rtx_MULT (mode,
8037 XEXP (arg0,
8038 1),
8039 arg1)),
8040 ext_val, benefit);
8041 }
8042 /* Propagate the MULT expressions to the innermost nodes. */
8043 else if (GET_CODE (arg0) == PLUS)
8044 {
8045 /* (invar_0 + invar_1) * invar_2. Distribute. */
8046 return simplify_giv_expr (loop,
8047 gen_rtx_PLUS (mode,
8048 gen_rtx_MULT (mode,
8049 XEXP (arg0,
8050 0),
8051 arg1),
8052 gen_rtx_MULT (mode,
8053 XEXP (arg0,
8054 1),
8055 arg1)),
8056 ext_val, benefit);
8057 }
8058 return gen_rtx_USE (mode, gen_rtx_MULT (mode, arg0, arg1));
8059
8060 case MULT:
8061 /* (a * invar_1) * invar_2. Associate. */
8062 return simplify_giv_expr (loop,
8063 gen_rtx_MULT (mode,
8064 XEXP (arg0, 0),
8065 gen_rtx_MULT (mode,
8066 XEXP (arg0, 1),
8067 arg1)),
8068 ext_val, benefit);
8069
8070 case PLUS:
8071 /* (a + invar_1) * invar_2. Distribute. */
8072 return simplify_giv_expr (loop,
8073 gen_rtx_PLUS (mode,
8074 gen_rtx_MULT (mode,
8075 XEXP (arg0, 0),
8076 arg1),
8077 gen_rtx_MULT (mode,
8078 XEXP (arg0, 1),
8079 arg1)),
8080 ext_val, benefit);
8081
8082 default:
8083 gcc_unreachable ();
8084 }
8085
8086 case ASHIFT:
8087 /* Shift by constant is multiply by power of two. */
8088 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
8089 return 0;
8090
8091 return
8092 simplify_giv_expr (loop,
8093 gen_rtx_MULT (mode,
8094 XEXP (x, 0),
8095 GEN_INT ((HOST_WIDE_INT) 1
8096 << INTVAL (XEXP (x, 1)))),
8097 ext_val, benefit);
8098
8099 case NEG:
8100 /* "-a" is "a * (-1)" */
8101 return simplify_giv_expr (loop,
8102 gen_rtx_MULT (mode, XEXP (x, 0), constm1_rtx),
8103 ext_val, benefit);
8104
8105 case NOT:
8106 /* "~a" is "-a - 1". Silly, but easy. */
8107 return simplify_giv_expr (loop,
8108 gen_rtx_MINUS (mode,
8109 gen_rtx_NEG (mode, XEXP (x, 0)),
8110 const1_rtx),
8111 ext_val, benefit);
8112
8113 case USE:
8114 /* Already in proper form for invariant. */
8115 return x;
8116
8117 case SIGN_EXTEND:
8118 case ZERO_EXTEND:
8119 case TRUNCATE:
8120 /* Conditionally recognize extensions of simple IVs. After we've
8121 computed loop traversal counts and verified the range of the
8122 source IV, we'll reevaluate this as a GIV. */
8123 if (*ext_val == NULL_RTX)
8124 {
8125 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
8126 if (arg0 && *ext_val == NULL_RTX && REG_P (arg0))
8127 {
8128 *ext_val = gen_rtx_fmt_e (GET_CODE (x), mode, arg0);
8129 return arg0;
8130 }
8131 }
8132 goto do_default;
8133
8134 case REG:
8135 /* If this is a new register, we can't deal with it. */
8136 if (REGNO (x) >= max_reg_before_loop)
8137 return 0;
8138
8139 /* Check for biv or giv. */
8140 switch (REG_IV_TYPE (ivs, REGNO (x)))
8141 {
8142 case BASIC_INDUCT:
8143 return x;
8144 case GENERAL_INDUCT:
8145 {
8146 struct induction *v = REG_IV_INFO (ivs, REGNO (x));
8147
8148 /* Form expression from giv and add benefit. Ensure this giv
8149 can derive another and subtract any needed adjustment if so. */
8150
8151 /* Increasing the benefit here is risky. The only case in which it
8152 is arguably correct is if this is the only use of V. In other
8153 cases, this will artificially inflate the benefit of the current
8154 giv, and lead to suboptimal code. Thus, it is disabled, since
8155 potentially not reducing an only marginally beneficial giv is
8156 less harmful than reducing many givs that are not really
8157 beneficial. */
8158 {
8159 rtx single_use = regs->array[REGNO (x)].single_usage;
8160 if (single_use && single_use != const0_rtx)
8161 *benefit += v->benefit;
8162 }
8163
8164 if (v->cant_derive)
8165 return 0;
8166
8167 tem = gen_rtx_PLUS (mode, gen_rtx_MULT (mode,
8168 v->src_reg, v->mult_val),
8169 v->add_val);
8170
8171 if (v->derive_adjustment)
8172 tem = gen_rtx_MINUS (mode, tem, v->derive_adjustment);
8173 arg0 = simplify_giv_expr (loop, tem, ext_val, benefit);
8174 if (*ext_val)
8175 {
8176 if (!v->ext_dependent)
8177 return arg0;
8178 }
8179 else
8180 {
8181 *ext_val = v->ext_dependent;
8182 return arg0;
8183 }
8184 return 0;
8185 }
8186
8187 default:
8188 do_default:
8189 /* If it isn't an induction variable, and it is invariant, we
8190 may be able to simplify things further by looking through
8191 the bits we just moved outside the loop. */
8192 if (loop_invariant_p (loop, x) == 1)
8193 {
8194 struct movable *m;
8195 struct loop_movables *movables = LOOP_MOVABLES (loop);
8196
8197 for (m = movables->head; m; m = m->next)
8198 if (rtx_equal_p (x, m->set_dest))
8199 {
8200 /* Ok, we found a match. Substitute and simplify. */
8201
8202 /* If we match another movable, we must use that, as
8203 this one is going away. */
8204 if (m->match)
8205 return simplify_giv_expr (loop, m->match->set_dest,
8206 ext_val, benefit);
8207
8208 /* If consec is nonzero, this is a member of a group of
8209 instructions that were moved together. We handle this
8210 case only to the point of seeking to the last insn and
8211 looking for a REG_EQUAL. Fail if we don't find one. */
8212 if (m->consec != 0)
8213 {
8214 int i = m->consec;
8215 tem = m->insn;
8216 do
8217 {
8218 tem = NEXT_INSN (tem);
8219 }
8220 while (--i > 0);
8221
8222 tem = find_reg_note (tem, REG_EQUAL, NULL_RTX);
8223 if (tem)
8224 tem = XEXP (tem, 0);
8225 }
8226 else
8227 {
8228 tem = single_set (m->insn);
8229 if (tem)
8230 tem = SET_SRC (tem);
8231 }
8232
8233 if (tem)
8234 {
8235 /* What we are most interested in is pointer
8236 arithmetic on invariants -- only take
8237 patterns we may be able to do something with. */
8238 if (GET_CODE (tem) == PLUS
8239 || GET_CODE (tem) == MULT
8240 || GET_CODE (tem) == ASHIFT
8241 || GET_CODE (tem) == CONST_INT
8242 || GET_CODE (tem) == SYMBOL_REF)
8243 {
8244 tem = simplify_giv_expr (loop, tem, ext_val,
8245 benefit);
8246 if (tem)
8247 return tem;
8248 }
8249 else if (GET_CODE (tem) == CONST
8250 && GET_CODE (XEXP (tem, 0)) == PLUS
8251 && GET_CODE (XEXP (XEXP (tem, 0), 0)) == SYMBOL_REF
8252 && GET_CODE (XEXP (XEXP (tem, 0), 1)) == CONST_INT)
8253 {
8254 tem = simplify_giv_expr (loop, XEXP (tem, 0),
8255 ext_val, benefit);
8256 if (tem)
8257 return tem;
8258 }
8259 }
8260 break;
8261 }
8262 }
8263 break;
8264 }
8265
8266 /* Fall through to general case. */
8267 default:
8268 /* If invariant, return as USE (unless CONST_INT).
8269 Otherwise, not giv. */
8270 if (GET_CODE (x) == USE)
8271 x = XEXP (x, 0);
8272
8273 if (loop_invariant_p (loop, x) == 1)
8274 {
8275 if (GET_CODE (x) == CONST_INT)
8276 return x;
8277 if (GET_CODE (x) == CONST
8278 && GET_CODE (XEXP (x, 0)) == PLUS
8279 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
8280 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
8281 x = XEXP (x, 0);
8282 return gen_rtx_USE (mode, x);
8283 }
8284 else
8285 return 0;
8286 }
8287 }
8288
8289 /* This routine folds invariants such that there is only ever one
8290 CONST_INT in the summation. It is only used by simplify_giv_expr. */
8291
8292 static rtx
8293 sge_plus_constant (rtx x, rtx c)
8294 {
8295 if (GET_CODE (x) == CONST_INT)
8296 return GEN_INT (INTVAL (x) + INTVAL (c));
8297 else if (GET_CODE (x) != PLUS)
8298 return gen_rtx_PLUS (GET_MODE (x), x, c);
8299 else if (GET_CODE (XEXP (x, 1)) == CONST_INT)
8300 {
8301 return gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
8302 GEN_INT (INTVAL (XEXP (x, 1)) + INTVAL (c)));
8303 }
8304 else if (GET_CODE (XEXP (x, 0)) == PLUS
8305 || GET_CODE (XEXP (x, 1)) != PLUS)
8306 {
8307 return gen_rtx_PLUS (GET_MODE (x),
8308 sge_plus_constant (XEXP (x, 0), c), XEXP (x, 1));
8309 }
8310 else
8311 {
8312 return gen_rtx_PLUS (GET_MODE (x),
8313 sge_plus_constant (XEXP (x, 1), c), XEXP (x, 0));
8314 }
8315 }
8316
8317 static rtx
8318 sge_plus (enum machine_mode mode, rtx x, rtx y)
8319 {
8320 while (GET_CODE (y) == PLUS)
8321 {
8322 rtx a = XEXP (y, 0);
8323 if (GET_CODE (a) == CONST_INT)
8324 x = sge_plus_constant (x, a);
8325 else
8326 x = gen_rtx_PLUS (mode, x, a);
8327 y = XEXP (y, 1);
8328 }
8329 if (GET_CODE (y) == CONST_INT)
8330 x = sge_plus_constant (x, y);
8331 else
8332 x = gen_rtx_PLUS (mode, x, y);
8333 return x;
8334 }
8335 \f
8336 /* Help detect a giv that is calculated by several consecutive insns;
8337 for example,
8338 giv = biv * M
8339 giv = giv + A
8340 The caller has already identified the first insn P as having a giv as dest;
8341 we check that all other insns that set the same register follow
8342 immediately after P, that they alter nothing else,
8343 and that the result of the last is still a giv.
8344
8345 The value is 0 if the reg set in P is not really a giv.
8346 Otherwise, the value is the amount gained by eliminating
8347 all the consecutive insns that compute the value.
8348
8349 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
8350 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
8351
8352 The coefficients of the ultimate giv value are stored in
8353 *MULT_VAL and *ADD_VAL. */
8354
8355 static int
8356 consec_sets_giv (const struct loop *loop, int first_benefit, rtx p,
8357 rtx src_reg, rtx dest_reg, rtx *add_val, rtx *mult_val,
8358 rtx *ext_val, rtx *last_consec_insn)
8359 {
8360 struct loop_ivs *ivs = LOOP_IVS (loop);
8361 struct loop_regs *regs = LOOP_REGS (loop);
8362 int count;
8363 enum rtx_code code;
8364 int benefit;
8365 rtx temp;
8366 rtx set;
8367
8368 /* Indicate that this is a giv so that we can update the value produced in
8369 each insn of the multi-insn sequence.
8370
8371 This induction structure will be used only by the call to
8372 general_induction_var below, so we can allocate it on our stack.
8373 If this is a giv, our caller will replace the induct var entry with
8374 a new induction structure. */
8375 struct induction *v;
8376
8377 if (REG_IV_TYPE (ivs, REGNO (dest_reg)) != UNKNOWN_INDUCT)
8378 return 0;
8379
8380 v = alloca (sizeof (struct induction));
8381 v->src_reg = src_reg;
8382 v->mult_val = *mult_val;
8383 v->add_val = *add_val;
8384 v->benefit = first_benefit;
8385 v->cant_derive = 0;
8386 v->derive_adjustment = 0;
8387 v->ext_dependent = NULL_RTX;
8388
8389 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
8390 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
8391
8392 count = regs->array[REGNO (dest_reg)].n_times_set - 1;
8393
8394 while (count > 0)
8395 {
8396 p = NEXT_INSN (p);
8397 code = GET_CODE (p);
8398
8399 /* If libcall, skip to end of call sequence. */
8400 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
8401 p = XEXP (temp, 0);
8402
8403 if (code == INSN
8404 && (set = single_set (p))
8405 && REG_P (SET_DEST (set))
8406 && SET_DEST (set) == dest_reg
8407 && (general_induction_var (loop, SET_SRC (set), &src_reg,
8408 add_val, mult_val, ext_val, 0,
8409 &benefit, VOIDmode)
8410 /* Giv created by equivalent expression. */
8411 || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
8412 && general_induction_var (loop, XEXP (temp, 0), &src_reg,
8413 add_val, mult_val, ext_val, 0,
8414 &benefit, VOIDmode)))
8415 && src_reg == v->src_reg)
8416 {
8417 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
8418 benefit += libcall_benefit (p);
8419
8420 count--;
8421 v->mult_val = *mult_val;
8422 v->add_val = *add_val;
8423 v->benefit += benefit;
8424 }
8425 else if (code != NOTE)
8426 {
8427 /* Allow insns that set something other than this giv to a
8428 constant. Such insns are needed on machines which cannot
8429 include long constants and should not disqualify a giv. */
8430 if (code == INSN
8431 && (set = single_set (p))
8432 && SET_DEST (set) != dest_reg
8433 && CONSTANT_P (SET_SRC (set)))
8434 continue;
8435
8436 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
8437 return 0;
8438 }
8439 }
8440
8441 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
8442 *last_consec_insn = p;
8443 return v->benefit;
8444 }
8445 \f
8446 /* Return an rtx, if any, that expresses giv G2 as a function of the register
8447 represented by G1. If no such expression can be found, or it is clear that
8448 it cannot possibly be a valid address, 0 is returned.
8449
8450 To perform the computation, we note that
8451 G1 = x * v + a and
8452 G2 = y * v + b
8453 where `v' is the biv.
8454
8455 So G2 = (y/b) * G1 + (b - a*y/x).
8456
8457 Note that MULT = y/x.
8458
8459 Update: A and B are now allowed to be additive expressions such that
8460 B contains all variables in A. That is, computing B-A will not require
8461 subtracting variables. */
8462
8463 static rtx
8464 express_from_1 (rtx a, rtx b, rtx mult)
8465 {
8466 /* If MULT is zero, then A*MULT is zero, and our expression is B. */
8467
8468 if (mult == const0_rtx)
8469 return b;
8470
8471 /* If MULT is not 1, we cannot handle A with non-constants, since we
8472 would then be required to subtract multiples of the registers in A.
8473 This is theoretically possible, and may even apply to some Fortran
8474 constructs, but it is a lot of work and we do not attempt it here. */
8475
8476 if (mult != const1_rtx && GET_CODE (a) != CONST_INT)
8477 return NULL_RTX;
8478
8479 /* In general these structures are sorted top to bottom (down the PLUS
8480 chain), but not left to right across the PLUS. If B is a higher
8481 order giv than A, we can strip one level and recurse. If A is higher
8482 order, we'll eventually bail out, but won't know that until the end.
8483 If they are the same, we'll strip one level around this loop. */
8484
8485 while (GET_CODE (a) == PLUS && GET_CODE (b) == PLUS)
8486 {
8487 rtx ra, rb, oa, ob, tmp;
8488
8489 ra = XEXP (a, 0), oa = XEXP (a, 1);
8490 if (GET_CODE (ra) == PLUS)
8491 tmp = ra, ra = oa, oa = tmp;
8492
8493 rb = XEXP (b, 0), ob = XEXP (b, 1);
8494 if (GET_CODE (rb) == PLUS)
8495 tmp = rb, rb = ob, ob = tmp;
8496
8497 if (rtx_equal_p (ra, rb))
8498 /* We matched: remove one reg completely. */
8499 a = oa, b = ob;
8500 else if (GET_CODE (ob) != PLUS && rtx_equal_p (ra, ob))
8501 /* An alternate match. */
8502 a = oa, b = rb;
8503 else if (GET_CODE (oa) != PLUS && rtx_equal_p (oa, rb))
8504 /* An alternate match. */
8505 a = ra, b = ob;
8506 else
8507 {
8508 /* Indicates an extra register in B. Strip one level from B and
8509 recurse, hoping B was the higher order expression. */
8510 ob = express_from_1 (a, ob, mult);
8511 if (ob == NULL_RTX)
8512 return NULL_RTX;
8513 return gen_rtx_PLUS (GET_MODE (b), rb, ob);
8514 }
8515 }
8516
8517 /* Here we are at the last level of A, go through the cases hoping to
8518 get rid of everything but a constant. */
8519
8520 if (GET_CODE (a) == PLUS)
8521 {
8522 rtx ra, oa;
8523
8524 ra = XEXP (a, 0), oa = XEXP (a, 1);
8525 if (rtx_equal_p (oa, b))
8526 oa = ra;
8527 else if (!rtx_equal_p (ra, b))
8528 return NULL_RTX;
8529
8530 if (GET_CODE (oa) != CONST_INT)
8531 return NULL_RTX;
8532
8533 return GEN_INT (-INTVAL (oa) * INTVAL (mult));
8534 }
8535 else if (GET_CODE (a) == CONST_INT)
8536 {
8537 return plus_constant (b, -INTVAL (a) * INTVAL (mult));
8538 }
8539 else if (CONSTANT_P (a))
8540 {
8541 enum machine_mode mode_a = GET_MODE (a);
8542 enum machine_mode mode_b = GET_MODE (b);
8543 enum machine_mode mode = mode_b == VOIDmode ? mode_a : mode_b;
8544 return simplify_gen_binary (MINUS, mode, b, a);
8545 }
8546 else if (GET_CODE (b) == PLUS)
8547 {
8548 if (rtx_equal_p (a, XEXP (b, 0)))
8549 return XEXP (b, 1);
8550 else if (rtx_equal_p (a, XEXP (b, 1)))
8551 return XEXP (b, 0);
8552 else
8553 return NULL_RTX;
8554 }
8555 else if (rtx_equal_p (a, b))
8556 return const0_rtx;
8557
8558 return NULL_RTX;
8559 }
8560
8561 static rtx
8562 express_from (struct induction *g1, struct induction *g2)
8563 {
8564 rtx mult, add;
8565
8566 /* The value that G1 will be multiplied by must be a constant integer. Also,
8567 the only chance we have of getting a valid address is if b*c/a (see above
8568 for notation) is also an integer. */
8569 if (GET_CODE (g1->mult_val) == CONST_INT
8570 && GET_CODE (g2->mult_val) == CONST_INT)
8571 {
8572 if (g1->mult_val == const0_rtx
8573 || (g1->mult_val == constm1_rtx
8574 && INTVAL (g2->mult_val)
8575 == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1))
8576 || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0)
8577 return NULL_RTX;
8578 mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val));
8579 }
8580 else if (rtx_equal_p (g1->mult_val, g2->mult_val))
8581 mult = const1_rtx;
8582 else
8583 {
8584 /* ??? Find out if the one is a multiple of the other? */
8585 return NULL_RTX;
8586 }
8587
8588 add = express_from_1 (g1->add_val, g2->add_val, mult);
8589 if (add == NULL_RTX)
8590 {
8591 /* Failed. If we've got a multiplication factor between G1 and G2,
8592 scale G1's addend and try again. */
8593 if (INTVAL (mult) > 1)
8594 {
8595 rtx g1_add_val = g1->add_val;
8596 if (GET_CODE (g1_add_val) == MULT
8597 && GET_CODE (XEXP (g1_add_val, 1)) == CONST_INT)
8598 {
8599 HOST_WIDE_INT m;
8600 m = INTVAL (mult) * INTVAL (XEXP (g1_add_val, 1));
8601 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val),
8602 XEXP (g1_add_val, 0), GEN_INT (m));
8603 }
8604 else
8605 {
8606 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val), g1_add_val,
8607 mult);
8608 }
8609
8610 add = express_from_1 (g1_add_val, g2->add_val, const1_rtx);
8611 }
8612 }
8613 if (add == NULL_RTX)
8614 return NULL_RTX;
8615
8616 /* Form simplified final result. */
8617 if (mult == const0_rtx)
8618 return add;
8619 else if (mult == const1_rtx)
8620 mult = g1->dest_reg;
8621 else
8622 mult = gen_rtx_MULT (g2->mode, g1->dest_reg, mult);
8623
8624 if (add == const0_rtx)
8625 return mult;
8626 else
8627 {
8628 if (GET_CODE (add) == PLUS
8629 && CONSTANT_P (XEXP (add, 1)))
8630 {
8631 rtx tem = XEXP (add, 1);
8632 mult = gen_rtx_PLUS (g2->mode, mult, XEXP (add, 0));
8633 add = tem;
8634 }
8635
8636 return gen_rtx_PLUS (g2->mode, mult, add);
8637 }
8638 }
8639 \f
8640 /* Return an rtx, if any, that expresses giv G2 as a function of the register
8641 represented by G1. This indicates that G2 should be combined with G1 and
8642 that G2 can use (either directly or via an address expression) a register
8643 used to represent G1. */
8644
8645 static rtx
8646 combine_givs_p (struct induction *g1, struct induction *g2)
8647 {
8648 rtx comb, ret;
8649
8650 /* With the introduction of ext dependent givs, we must care for modes.
8651 G2 must not use a wider mode than G1. */
8652 if (GET_MODE_SIZE (g1->mode) < GET_MODE_SIZE (g2->mode))
8653 return NULL_RTX;
8654
8655 ret = comb = express_from (g1, g2);
8656 if (comb == NULL_RTX)
8657 return NULL_RTX;
8658 if (g1->mode != g2->mode)
8659 ret = gen_lowpart (g2->mode, comb);
8660
8661 /* If these givs are identical, they can be combined. We use the results
8662 of express_from because the addends are not in a canonical form, so
8663 rtx_equal_p is a weaker test. */
8664 /* But don't combine a DEST_REG giv with a DEST_ADDR giv; we want the
8665 combination to be the other way round. */
8666 if (comb == g1->dest_reg
8667 && (g1->giv_type == DEST_REG || g2->giv_type == DEST_ADDR))
8668 {
8669 return ret;
8670 }
8671
8672 /* If G2 can be expressed as a function of G1 and that function is valid
8673 as an address and no more expensive than using a register for G2,
8674 the expression of G2 in terms of G1 can be used. */
8675 if (ret != NULL_RTX
8676 && g2->giv_type == DEST_ADDR
8677 && memory_address_p (GET_MODE (g2->mem), ret))
8678 return ret;
8679
8680 return NULL_RTX;
8681 }
8682 \f
8683 /* See if BL is monotonic and has a constant per-iteration increment.
8684 Return the increment if so, otherwise return 0. */
8685
8686 static HOST_WIDE_INT
8687 get_monotonic_increment (struct iv_class *bl)
8688 {
8689 struct induction *v;
8690 rtx incr;
8691
8692 /* Get the total increment and check that it is constant. */
8693 incr = biv_total_increment (bl);
8694 if (incr == 0 || GET_CODE (incr) != CONST_INT)
8695 return 0;
8696
8697 for (v = bl->biv; v != 0; v = v->next_iv)
8698 {
8699 if (GET_CODE (v->add_val) != CONST_INT)
8700 return 0;
8701
8702 if (INTVAL (v->add_val) < 0 && INTVAL (incr) >= 0)
8703 return 0;
8704
8705 if (INTVAL (v->add_val) > 0 && INTVAL (incr) <= 0)
8706 return 0;
8707 }
8708 return INTVAL (incr);
8709 }
8710
8711
8712 /* Subroutine of biv_fits_mode_p. Return true if biv BL, when biased by
8713 BIAS, will never exceed the unsigned range of MODE. LOOP is the loop
8714 to which the biv belongs and INCR is its per-iteration increment. */
8715
8716 static bool
8717 biased_biv_fits_mode_p (const struct loop *loop, struct iv_class *bl,
8718 HOST_WIDE_INT incr, enum machine_mode mode,
8719 unsigned HOST_WIDE_INT bias)
8720 {
8721 unsigned HOST_WIDE_INT initial, maximum, span, delta;
8722
8723 /* We need to be able to manipulate MODE-size constants. */
8724 if (HOST_BITS_PER_WIDE_INT < GET_MODE_BITSIZE (mode))
8725 return false;
8726
8727 /* The number of loop iterations must be constant. */
8728 if (LOOP_INFO (loop)->n_iterations == 0)
8729 return false;
8730
8731 /* So must the biv's initial value. */
8732 if (bl->initial_value == 0 || GET_CODE (bl->initial_value) != CONST_INT)
8733 return false;
8734
8735 initial = bias + INTVAL (bl->initial_value);
8736 maximum = GET_MODE_MASK (mode);
8737
8738 /* Make sure that the initial value is within range. */
8739 if (initial > maximum)
8740 return false;
8741
8742 /* Set up DELTA and SPAN such that the number of iterations * DELTA
8743 (calculated to arbitrary precision) must be <= SPAN. */
8744 if (incr < 0)
8745 {
8746 delta = -incr;
8747 span = initial;
8748 }
8749 else
8750 {
8751 delta = incr;
8752 /* Handle the special case in which MAXIMUM is the largest
8753 unsigned HOST_WIDE_INT and INITIAL is 0. */
8754 if (maximum + 1 == initial)
8755 span = LOOP_INFO (loop)->n_iterations * delta;
8756 else
8757 span = maximum + 1 - initial;
8758 }
8759 return (span / LOOP_INFO (loop)->n_iterations >= delta);
8760 }
8761
8762
8763 /* Return true if biv BL will never exceed the bounds of MODE. LOOP is
8764 the loop to which BL belongs and INCR is its per-iteration increment.
8765 UNSIGNEDP is true if the biv should be treated as unsigned. */
8766
8767 static bool
8768 biv_fits_mode_p (const struct loop *loop, struct iv_class *bl,
8769 HOST_WIDE_INT incr, enum machine_mode mode, bool unsignedp)
8770 {
8771 struct loop_info *loop_info;
8772 unsigned HOST_WIDE_INT bias;
8773
8774 /* A biv's value will always be limited to its natural mode.
8775 Larger modes will observe the same wrap-around. */
8776 if (GET_MODE_SIZE (mode) > GET_MODE_SIZE (GET_MODE (bl->biv->src_reg)))
8777 mode = GET_MODE (bl->biv->src_reg);
8778
8779 loop_info = LOOP_INFO (loop);
8780
8781 bias = (unsignedp ? 0 : (GET_MODE_MASK (mode) >> 1) + 1);
8782 if (biased_biv_fits_mode_p (loop, bl, incr, mode, bias))
8783 return true;
8784
8785 if (mode == GET_MODE (bl->biv->src_reg)
8786 && bl->biv->src_reg == loop_info->iteration_var
8787 && loop_info->comparison_value
8788 && loop_invariant_p (loop, loop_info->comparison_value))
8789 {
8790 /* If the increment is +1, and the exit test is a <, the BIV
8791 cannot overflow. (For <=, we have the problematic case that
8792 the comparison value might be the maximum value of the range.) */
8793 if (incr == 1)
8794 {
8795 if (loop_info->comparison_code == LT)
8796 return true;
8797 if (loop_info->comparison_code == LTU && unsignedp)
8798 return true;
8799 }
8800
8801 /* Likewise for increment -1 and exit test >. */
8802 if (incr == -1)
8803 {
8804 if (loop_info->comparison_code == GT)
8805 return true;
8806 if (loop_info->comparison_code == GTU && unsignedp)
8807 return true;
8808 }
8809 }
8810 return false;
8811 }
8812
8813
8814 /* Return false iff it is provable that biv BL plus BIAS will not wrap
8815 at any point in its update sequence. Note that at the rtl level we
8816 may not have information about the signedness of BL; in that case,
8817 check for both signed and unsigned overflow. */
8818
8819 static bool
8820 biased_biv_may_wrap_p (const struct loop *loop, struct iv_class *bl,
8821 unsigned HOST_WIDE_INT bias)
8822 {
8823 HOST_WIDE_INT incr;
8824 bool check_signed, check_unsigned;
8825 enum machine_mode mode;
8826
8827 /* If the increment is not monotonic, we'd have to check separately
8828 at each increment step. Not Worth It. */
8829 incr = get_monotonic_increment (bl);
8830 if (incr == 0)
8831 return true;
8832
8833 /* If this biv is the loop iteration variable, then we may be able to
8834 deduce a sign based on the loop condition. */
8835 /* ??? This is not 100% reliable; consider an unsigned biv that is cast
8836 to signed for the comparison. However, this same bug appears all
8837 through loop.c. */
8838 check_signed = check_unsigned = true;
8839 if (bl->biv->src_reg == LOOP_INFO (loop)->iteration_var)
8840 {
8841 switch (LOOP_INFO (loop)->comparison_code)
8842 {
8843 case GTU: case GEU: case LTU: case LEU:
8844 check_signed = false;
8845 break;
8846 case GT: case GE: case LT: case LE:
8847 check_unsigned = false;
8848 break;
8849 default:
8850 break;
8851 }
8852 }
8853
8854 mode = GET_MODE (bl->biv->src_reg);
8855
8856 if (check_unsigned
8857 && !biased_biv_fits_mode_p (loop, bl, incr, mode, bias))
8858 return true;
8859
8860 if (check_signed)
8861 {
8862 bias += (GET_MODE_MASK (mode) >> 1) + 1;
8863 if (!biased_biv_fits_mode_p (loop, bl, incr, mode, bias))
8864 return true;
8865 }
8866
8867 return false;
8868 }
8869
8870
8871 /* Given that X is an extension or truncation of BL, return true
8872 if it is unaffected by overflow. LOOP is the loop to which
8873 BL belongs and INCR is its per-iteration increment. */
8874
8875 static bool
8876 extension_within_bounds_p (const struct loop *loop, struct iv_class *bl,
8877 HOST_WIDE_INT incr, rtx x)
8878 {
8879 enum machine_mode mode;
8880 bool signedp, unsignedp;
8881
8882 switch (GET_CODE (x))
8883 {
8884 case SIGN_EXTEND:
8885 case ZERO_EXTEND:
8886 mode = GET_MODE (XEXP (x, 0));
8887 signedp = (GET_CODE (x) == SIGN_EXTEND);
8888 unsignedp = (GET_CODE (x) == ZERO_EXTEND);
8889 break;
8890
8891 case TRUNCATE:
8892 /* We don't know whether this value is being used as signed
8893 or unsigned, so check the conditions for both. */
8894 mode = GET_MODE (x);
8895 signedp = unsignedp = true;
8896 break;
8897
8898 default:
8899 gcc_unreachable ();
8900 }
8901
8902 return ((!signedp || biv_fits_mode_p (loop, bl, incr, mode, false))
8903 && (!unsignedp || biv_fits_mode_p (loop, bl, incr, mode, true)));
8904 }
8905
8906
8907 /* Check each extension dependent giv in this class to see if its
8908 root biv is safe from wrapping in the interior mode, which would
8909 make the giv illegal. */
8910
8911 static void
8912 check_ext_dependent_givs (const struct loop *loop, struct iv_class *bl)
8913 {
8914 struct induction *v;
8915 HOST_WIDE_INT incr;
8916
8917 incr = get_monotonic_increment (bl);
8918
8919 /* Invalidate givs that fail the tests. */
8920 for (v = bl->giv; v; v = v->next_iv)
8921 if (v->ext_dependent)
8922 {
8923 if (incr != 0
8924 && extension_within_bounds_p (loop, bl, incr, v->ext_dependent))
8925 {
8926 if (loop_dump_stream)
8927 fprintf (loop_dump_stream,
8928 "Verified ext dependent giv at %d of reg %d\n",
8929 INSN_UID (v->insn), bl->regno);
8930 }
8931 else
8932 {
8933 if (loop_dump_stream)
8934 fprintf (loop_dump_stream,
8935 "Failed ext dependent giv at %d\n",
8936 INSN_UID (v->insn));
8937
8938 v->ignore = 1;
8939 bl->all_reduced = 0;
8940 }
8941 }
8942 }
8943
8944 /* Generate a version of VALUE in a mode appropriate for initializing V. */
8945
8946 static rtx
8947 extend_value_for_giv (struct induction *v, rtx value)
8948 {
8949 rtx ext_dep = v->ext_dependent;
8950
8951 if (! ext_dep)
8952 return value;
8953
8954 /* Recall that check_ext_dependent_givs verified that the known bounds
8955 of a biv did not overflow or wrap with respect to the extension for
8956 the giv. Therefore, constants need no additional adjustment. */
8957 if (CONSTANT_P (value) && GET_MODE (value) == VOIDmode)
8958 return value;
8959
8960 /* Otherwise, we must adjust the value to compensate for the
8961 differing modes of the biv and the giv. */
8962 return gen_rtx_fmt_e (GET_CODE (ext_dep), GET_MODE (ext_dep), value);
8963 }
8964 \f
8965 struct combine_givs_stats
8966 {
8967 int giv_number;
8968 int total_benefit;
8969 };
8970
8971 static int
8972 cmp_combine_givs_stats (const void *xp, const void *yp)
8973 {
8974 const struct combine_givs_stats * const x =
8975 (const struct combine_givs_stats *) xp;
8976 const struct combine_givs_stats * const y =
8977 (const struct combine_givs_stats *) yp;
8978 int d;
8979 d = y->total_benefit - x->total_benefit;
8980 /* Stabilize the sort. */
8981 if (!d)
8982 d = x->giv_number - y->giv_number;
8983 return d;
8984 }
8985
8986 /* Check all pairs of givs for iv_class BL and see if any can be combined with
8987 any other. If so, point SAME to the giv combined with and set NEW_REG to
8988 be an expression (in terms of the other giv's DEST_REG) equivalent to the
8989 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
8990
8991 static void
8992 combine_givs (struct loop_regs *regs, struct iv_class *bl)
8993 {
8994 /* Additional benefit to add for being combined multiple times. */
8995 const int extra_benefit = 3;
8996
8997 struct induction *g1, *g2, **giv_array;
8998 int i, j, k, giv_count;
8999 struct combine_givs_stats *stats;
9000 rtx *can_combine;
9001
9002 /* Count givs, because bl->giv_count is incorrect here. */
9003 giv_count = 0;
9004 for (g1 = bl->giv; g1; g1 = g1->next_iv)
9005 if (!g1->ignore)
9006 giv_count++;
9007
9008 giv_array = alloca (giv_count * sizeof (struct induction *));
9009 i = 0;
9010 for (g1 = bl->giv; g1; g1 = g1->next_iv)
9011 if (!g1->ignore)
9012 giv_array[i++] = g1;
9013
9014 stats = xcalloc (giv_count, sizeof (*stats));
9015 can_combine = xcalloc (giv_count, giv_count * sizeof (rtx));
9016
9017 for (i = 0; i < giv_count; i++)
9018 {
9019 int this_benefit;
9020 rtx single_use;
9021
9022 g1 = giv_array[i];
9023 stats[i].giv_number = i;
9024
9025 /* If a DEST_REG GIV is used only once, do not allow it to combine
9026 with anything, for in doing so we will gain nothing that cannot
9027 be had by simply letting the GIV with which we would have combined
9028 to be reduced on its own. The lossage shows up in particular with
9029 DEST_ADDR targets on hosts with reg+reg addressing, though it can
9030 be seen elsewhere as well. */
9031 if (g1->giv_type == DEST_REG
9032 && (single_use = regs->array[REGNO (g1->dest_reg)].single_usage)
9033 && single_use != const0_rtx)
9034 continue;
9035
9036 this_benefit = g1->benefit;
9037 /* Add an additional weight for zero addends. */
9038 if (g1->no_const_addval)
9039 this_benefit += 1;
9040
9041 for (j = 0; j < giv_count; j++)
9042 {
9043 rtx this_combine;
9044
9045 g2 = giv_array[j];
9046 if (g1 != g2
9047 && (this_combine = combine_givs_p (g1, g2)) != NULL_RTX)
9048 {
9049 can_combine[i * giv_count + j] = this_combine;
9050 this_benefit += g2->benefit + extra_benefit;
9051 }
9052 }
9053 stats[i].total_benefit = this_benefit;
9054 }
9055
9056 /* Iterate, combining until we can't. */
9057 restart:
9058 qsort (stats, giv_count, sizeof (*stats), cmp_combine_givs_stats);
9059
9060 if (loop_dump_stream)
9061 {
9062 fprintf (loop_dump_stream, "Sorted combine statistics:\n");
9063 for (k = 0; k < giv_count; k++)
9064 {
9065 g1 = giv_array[stats[k].giv_number];
9066 if (!g1->combined_with && !g1->same)
9067 fprintf (loop_dump_stream, " {%d, %d}",
9068 INSN_UID (giv_array[stats[k].giv_number]->insn),
9069 stats[k].total_benefit);
9070 }
9071 putc ('\n', loop_dump_stream);
9072 }
9073
9074 for (k = 0; k < giv_count; k++)
9075 {
9076 int g1_add_benefit = 0;
9077
9078 i = stats[k].giv_number;
9079 g1 = giv_array[i];
9080
9081 /* If it has already been combined, skip. */
9082 if (g1->combined_with || g1->same)
9083 continue;
9084
9085 for (j = 0; j < giv_count; j++)
9086 {
9087 g2 = giv_array[j];
9088 if (g1 != g2 && can_combine[i * giv_count + j]
9089 /* If it has already been combined, skip. */
9090 && ! g2->same && ! g2->combined_with)
9091 {
9092 int l;
9093
9094 g2->new_reg = can_combine[i * giv_count + j];
9095 g2->same = g1;
9096 /* For destination, we now may replace by mem expression instead
9097 of register. This changes the costs considerably, so add the
9098 compensation. */
9099 if (g2->giv_type == DEST_ADDR)
9100 g2->benefit = (g2->benefit + reg_address_cost
9101 - address_cost (g2->new_reg,
9102 GET_MODE (g2->mem)));
9103 g1->combined_with++;
9104 g1->lifetime += g2->lifetime;
9105
9106 g1_add_benefit += g2->benefit;
9107
9108 /* ??? The new final_[bg]iv_value code does a much better job
9109 of finding replaceable giv's, and hence this code may no
9110 longer be necessary. */
9111 if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg))
9112 g1_add_benefit -= copy_cost;
9113
9114 /* To help optimize the next set of combinations, remove
9115 this giv from the benefits of other potential mates. */
9116 for (l = 0; l < giv_count; ++l)
9117 {
9118 int m = stats[l].giv_number;
9119 if (can_combine[m * giv_count + j])
9120 stats[l].total_benefit -= g2->benefit + extra_benefit;
9121 }
9122
9123 if (loop_dump_stream)
9124 fprintf (loop_dump_stream,
9125 "giv at %d combined with giv at %d; new benefit %d + %d, lifetime %d\n",
9126 INSN_UID (g2->insn), INSN_UID (g1->insn),
9127 g1->benefit, g1_add_benefit, g1->lifetime);
9128 }
9129 }
9130
9131 /* To help optimize the next set of combinations, remove
9132 this giv from the benefits of other potential mates. */
9133 if (g1->combined_with)
9134 {
9135 for (j = 0; j < giv_count; ++j)
9136 {
9137 int m = stats[j].giv_number;
9138 if (can_combine[m * giv_count + i])
9139 stats[j].total_benefit -= g1->benefit + extra_benefit;
9140 }
9141
9142 g1->benefit += g1_add_benefit;
9143
9144 /* We've finished with this giv, and everything it touched.
9145 Restart the combination so that proper weights for the
9146 rest of the givs are properly taken into account. */
9147 /* ??? Ideally we would compact the arrays at this point, so
9148 as to not cover old ground. But sanely compacting
9149 can_combine is tricky. */
9150 goto restart;
9151 }
9152 }
9153
9154 /* Clean up. */
9155 free (stats);
9156 free (can_combine);
9157 }
9158 \f
9159 /* Generate sequence for REG = B * M + A. B is the initial value of
9160 the basic induction variable, M a multiplicative constant, A an
9161 additive constant and REG the destination register. */
9162
9163 static rtx
9164 gen_add_mult (rtx b, rtx m, rtx a, rtx reg)
9165 {
9166 rtx seq;
9167 rtx result;
9168
9169 start_sequence ();
9170 /* Use unsigned arithmetic. */
9171 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
9172 if (reg != result)
9173 emit_move_insn (reg, result);
9174 seq = get_insns ();
9175 end_sequence ();
9176
9177 return seq;
9178 }
9179
9180
9181 /* Update registers created in insn sequence SEQ. */
9182
9183 static void
9184 loop_regs_update (const struct loop *loop ATTRIBUTE_UNUSED, rtx seq)
9185 {
9186 rtx insn;
9187
9188 /* Update register info for alias analysis. */
9189
9190 insn = seq;
9191 while (insn != NULL_RTX)
9192 {
9193 rtx set = single_set (insn);
9194
9195 if (set && REG_P (SET_DEST (set)))
9196 record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0);
9197
9198 insn = NEXT_INSN (insn);
9199 }
9200 }
9201
9202
9203 /* EMIT code before BEFORE_BB/BEFORE_INSN to set REG = B * M + A. B
9204 is the initial value of the basic induction variable, M a
9205 multiplicative constant, A an additive constant and REG the
9206 destination register. */
9207
9208 static void
9209 loop_iv_add_mult_emit_before (const struct loop *loop, rtx b, rtx m, rtx a,
9210 rtx reg, basic_block before_bb, rtx before_insn)
9211 {
9212 rtx seq;
9213
9214 if (! before_insn)
9215 {
9216 loop_iv_add_mult_hoist (loop, b, m, a, reg);
9217 return;
9218 }
9219
9220 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
9221 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
9222
9223 /* Increase the lifetime of any invariants moved further in code. */
9224 update_reg_last_use (a, before_insn);
9225 update_reg_last_use (b, before_insn);
9226 update_reg_last_use (m, before_insn);
9227
9228 /* It is possible that the expansion created lots of new registers.
9229 Iterate over the sequence we just created and record them all. We
9230 must do this before inserting the sequence. */
9231 loop_regs_update (loop, seq);
9232
9233 loop_insn_emit_before (loop, before_bb, before_insn, seq);
9234 }
9235
9236
9237 /* Emit insns in loop pre-header to set REG = B * M + A. B is the
9238 initial value of the basic induction variable, M a multiplicative
9239 constant, A an additive constant and REG the destination
9240 register. */
9241
9242 static void
9243 loop_iv_add_mult_sink (const struct loop *loop, rtx b, rtx m, rtx a, rtx reg)
9244 {
9245 rtx seq;
9246
9247 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
9248 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
9249
9250 /* Increase the lifetime of any invariants moved further in code.
9251 ???? Is this really necessary? */
9252 update_reg_last_use (a, loop->sink);
9253 update_reg_last_use (b, loop->sink);
9254 update_reg_last_use (m, loop->sink);
9255
9256 /* It is possible that the expansion created lots of new registers.
9257 Iterate over the sequence we just created and record them all. We
9258 must do this before inserting the sequence. */
9259 loop_regs_update (loop, seq);
9260
9261 loop_insn_sink (loop, seq);
9262 }
9263
9264
9265 /* Emit insns after loop to set REG = B * M + A. B is the initial
9266 value of the basic induction variable, M a multiplicative constant,
9267 A an additive constant and REG the destination register. */
9268
9269 static void
9270 loop_iv_add_mult_hoist (const struct loop *loop, rtx b, rtx m, rtx a, rtx reg)
9271 {
9272 rtx seq;
9273
9274 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
9275 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
9276
9277 /* It is possible that the expansion created lots of new registers.
9278 Iterate over the sequence we just created and record them all. We
9279 must do this before inserting the sequence. */
9280 loop_regs_update (loop, seq);
9281
9282 loop_insn_hoist (loop, seq);
9283 }
9284
9285
9286
9287 /* Similar to gen_add_mult, but compute cost rather than generating
9288 sequence. */
9289
9290 static int
9291 iv_add_mult_cost (rtx b, rtx m, rtx a, rtx reg)
9292 {
9293 int cost = 0;
9294 rtx last, result;
9295
9296 start_sequence ();
9297 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
9298 if (reg != result)
9299 emit_move_insn (reg, result);
9300 last = get_last_insn ();
9301 while (last)
9302 {
9303 rtx t = single_set (last);
9304 if (t)
9305 cost += rtx_cost (SET_SRC (t), SET);
9306 last = PREV_INSN (last);
9307 }
9308 end_sequence ();
9309 return cost;
9310 }
9311 \f
9312 /* Test whether A * B can be computed without
9313 an actual multiply insn. Value is 1 if so.
9314
9315 ??? This function stinks because it generates a ton of wasted RTL
9316 ??? and as a result fragments GC memory to no end. There are other
9317 ??? places in the compiler which are invoked a lot and do the same
9318 ??? thing, generate wasted RTL just to see if something is possible. */
9319
9320 static int
9321 product_cheap_p (rtx a, rtx b)
9322 {
9323 rtx tmp;
9324 int win, n_insns;
9325
9326 /* If only one is constant, make it B. */
9327 if (GET_CODE (a) == CONST_INT)
9328 tmp = a, a = b, b = tmp;
9329
9330 /* If first constant, both constant, so don't need multiply. */
9331 if (GET_CODE (a) == CONST_INT)
9332 return 1;
9333
9334 /* If second not constant, neither is constant, so would need multiply. */
9335 if (GET_CODE (b) != CONST_INT)
9336 return 0;
9337
9338 /* One operand is constant, so might not need multiply insn. Generate the
9339 code for the multiply and see if a call or multiply, or long sequence
9340 of insns is generated. */
9341
9342 start_sequence ();
9343 expand_mult (GET_MODE (a), a, b, NULL_RTX, 1);
9344 tmp = get_insns ();
9345 end_sequence ();
9346
9347 win = 1;
9348 if (tmp == NULL_RTX)
9349 ;
9350 else if (INSN_P (tmp))
9351 {
9352 n_insns = 0;
9353 while (tmp != NULL_RTX)
9354 {
9355 rtx next = NEXT_INSN (tmp);
9356
9357 if (++n_insns > 3
9358 || !NONJUMP_INSN_P (tmp)
9359 || (GET_CODE (PATTERN (tmp)) == SET
9360 && GET_CODE (SET_SRC (PATTERN (tmp))) == MULT)
9361 || (GET_CODE (PATTERN (tmp)) == PARALLEL
9362 && GET_CODE (XVECEXP (PATTERN (tmp), 0, 0)) == SET
9363 && GET_CODE (SET_SRC (XVECEXP (PATTERN (tmp), 0, 0))) == MULT))
9364 {
9365 win = 0;
9366 break;
9367 }
9368
9369 tmp = next;
9370 }
9371 }
9372 else if (GET_CODE (tmp) == SET
9373 && GET_CODE (SET_SRC (tmp)) == MULT)
9374 win = 0;
9375 else if (GET_CODE (tmp) == PARALLEL
9376 && GET_CODE (XVECEXP (tmp, 0, 0)) == SET
9377 && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT)
9378 win = 0;
9379
9380 return win;
9381 }
9382 \f
9383 /* Check to see if loop can be terminated by a "decrement and branch until
9384 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
9385 Also try reversing an increment loop to a decrement loop
9386 to see if the optimization can be performed.
9387 Value is nonzero if optimization was performed. */
9388
9389 /* This is useful even if the architecture doesn't have such an insn,
9390 because it might change a loops which increments from 0 to n to a loop
9391 which decrements from n to 0. A loop that decrements to zero is usually
9392 faster than one that increments from zero. */
9393
9394 /* ??? This could be rewritten to use some of the loop unrolling procedures,
9395 such as approx_final_value, biv_total_increment, loop_iterations, and
9396 final_[bg]iv_value. */
9397
9398 static int
9399 check_dbra_loop (struct loop *loop, int insn_count)
9400 {
9401 struct loop_info *loop_info = LOOP_INFO (loop);
9402 struct loop_regs *regs = LOOP_REGS (loop);
9403 struct loop_ivs *ivs = LOOP_IVS (loop);
9404 struct iv_class *bl;
9405 rtx reg;
9406 enum machine_mode mode;
9407 rtx jump_label;
9408 rtx final_value;
9409 rtx start_value;
9410 rtx new_add_val;
9411 rtx comparison;
9412 rtx before_comparison;
9413 rtx p;
9414 rtx jump;
9415 rtx first_compare;
9416 int compare_and_branch;
9417 rtx loop_start = loop->start;
9418 rtx loop_end = loop->end;
9419
9420 /* If last insn is a conditional branch, and the insn before tests a
9421 register value, try to optimize it. Otherwise, we can't do anything. */
9422
9423 jump = PREV_INSN (loop_end);
9424 comparison = get_condition_for_loop (loop, jump);
9425 if (comparison == 0)
9426 return 0;
9427 if (!onlyjump_p (jump))
9428 return 0;
9429
9430 /* Try to compute whether the compare/branch at the loop end is one or
9431 two instructions. */
9432 get_condition (jump, &first_compare, false, true);
9433 if (first_compare == jump)
9434 compare_and_branch = 1;
9435 else if (first_compare == prev_nonnote_insn (jump))
9436 compare_and_branch = 2;
9437 else
9438 return 0;
9439
9440 {
9441 /* If more than one condition is present to control the loop, then
9442 do not proceed, as this function does not know how to rewrite
9443 loop tests with more than one condition.
9444
9445 Look backwards from the first insn in the last comparison
9446 sequence and see if we've got another comparison sequence. */
9447
9448 rtx jump1;
9449 if ((jump1 = prev_nonnote_insn (first_compare))
9450 && JUMP_P (jump1))
9451 return 0;
9452 }
9453
9454 /* Check all of the bivs to see if the compare uses one of them.
9455 Skip biv's set more than once because we can't guarantee that
9456 it will be zero on the last iteration. Also skip if the biv is
9457 used between its update and the test insn. */
9458
9459 for (bl = ivs->list; bl; bl = bl->next)
9460 {
9461 if (bl->biv_count == 1
9462 && ! bl->biv->maybe_multiple
9463 && bl->biv->dest_reg == XEXP (comparison, 0)
9464 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
9465 first_compare))
9466 break;
9467 }
9468
9469 /* Try swapping the comparison to identify a suitable biv. */
9470 if (!bl)
9471 for (bl = ivs->list; bl; bl = bl->next)
9472 if (bl->biv_count == 1
9473 && ! bl->biv->maybe_multiple
9474 && bl->biv->dest_reg == XEXP (comparison, 1)
9475 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
9476 first_compare))
9477 {
9478 comparison = gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)),
9479 VOIDmode,
9480 XEXP (comparison, 1),
9481 XEXP (comparison, 0));
9482 break;
9483 }
9484
9485 if (! bl)
9486 return 0;
9487
9488 /* Look for the case where the basic induction variable is always
9489 nonnegative, and equals zero on the last iteration.
9490 In this case, add a reg_note REG_NONNEG, which allows the
9491 m68k DBRA instruction to be used. */
9492
9493 if (((GET_CODE (comparison) == GT && XEXP (comparison, 1) == constm1_rtx)
9494 || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx))
9495 && GET_CODE (bl->biv->add_val) == CONST_INT
9496 && INTVAL (bl->biv->add_val) < 0)
9497 {
9498 /* Initial value must be greater than 0,
9499 init_val % -dec_value == 0 to ensure that it equals zero on
9500 the last iteration */
9501
9502 if (GET_CODE (bl->initial_value) == CONST_INT
9503 && INTVAL (bl->initial_value) > 0
9504 && (INTVAL (bl->initial_value)
9505 % (-INTVAL (bl->biv->add_val))) == 0)
9506 {
9507 /* Register always nonnegative, add REG_NOTE to branch. */
9508 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
9509 REG_NOTES (jump)
9510 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
9511 REG_NOTES (jump));
9512 bl->nonneg = 1;
9513
9514 return 1;
9515 }
9516
9517 /* If the decrement is 1 and the value was tested as >= 0 before
9518 the loop, then we can safely optimize. */
9519 for (p = loop_start; p; p = PREV_INSN (p))
9520 {
9521 if (LABEL_P (p))
9522 break;
9523 if (!JUMP_P (p))
9524 continue;
9525
9526 before_comparison = get_condition_for_loop (loop, p);
9527 if (before_comparison
9528 && XEXP (before_comparison, 0) == bl->biv->dest_reg
9529 && (GET_CODE (before_comparison) == LT
9530 || GET_CODE (before_comparison) == LTU)
9531 && XEXP (before_comparison, 1) == const0_rtx
9532 && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start)
9533 && INTVAL (bl->biv->add_val) == -1)
9534 {
9535 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
9536 REG_NOTES (jump)
9537 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
9538 REG_NOTES (jump));
9539 bl->nonneg = 1;
9540
9541 return 1;
9542 }
9543 }
9544 }
9545 else if (GET_CODE (bl->biv->add_val) == CONST_INT
9546 && INTVAL (bl->biv->add_val) > 0)
9547 {
9548 /* Try to change inc to dec, so can apply above optimization. */
9549 /* Can do this if:
9550 all registers modified are induction variables or invariant,
9551 all memory references have non-overlapping addresses
9552 (obviously true if only one write)
9553 allow 2 insns for the compare/jump at the end of the loop. */
9554 /* Also, we must avoid any instructions which use both the reversed
9555 biv and another biv. Such instructions will fail if the loop is
9556 reversed. We meet this condition by requiring that either
9557 no_use_except_counting is true, or else that there is only
9558 one biv. */
9559 int num_nonfixed_reads = 0;
9560 /* 1 if the iteration var is used only to count iterations. */
9561 int no_use_except_counting = 0;
9562 /* 1 if the loop has no memory store, or it has a single memory store
9563 which is reversible. */
9564 int reversible_mem_store = 1;
9565
9566 if (bl->giv_count == 0
9567 && !loop->exit_count
9568 && !loop_info->has_multiple_exit_targets)
9569 {
9570 rtx bivreg = regno_reg_rtx[bl->regno];
9571 struct iv_class *blt;
9572
9573 /* If there are no givs for this biv, and the only exit is the
9574 fall through at the end of the loop, then
9575 see if perhaps there are no uses except to count. */
9576 no_use_except_counting = 1;
9577 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
9578 if (INSN_P (p))
9579 {
9580 rtx set = single_set (p);
9581
9582 if (set && REG_P (SET_DEST (set))
9583 && REGNO (SET_DEST (set)) == bl->regno)
9584 /* An insn that sets the biv is okay. */
9585 ;
9586 else if (!reg_mentioned_p (bivreg, PATTERN (p)))
9587 /* An insn that doesn't mention the biv is okay. */
9588 ;
9589 else if (p == prev_nonnote_insn (prev_nonnote_insn (loop_end))
9590 || p == prev_nonnote_insn (loop_end))
9591 {
9592 /* If either of these insns uses the biv and sets a pseudo
9593 that has more than one usage, then the biv has uses
9594 other than counting since it's used to derive a value
9595 that is used more than one time. */
9596 note_stores (PATTERN (p), note_set_pseudo_multiple_uses,
9597 regs);
9598 if (regs->multiple_uses)
9599 {
9600 no_use_except_counting = 0;
9601 break;
9602 }
9603 }
9604 else
9605 {
9606 no_use_except_counting = 0;
9607 break;
9608 }
9609 }
9610
9611 /* A biv has uses besides counting if it is used to set
9612 another biv. */
9613 for (blt = ivs->list; blt; blt = blt->next)
9614 if (blt->init_set
9615 && reg_mentioned_p (bivreg, SET_SRC (blt->init_set)))
9616 {
9617 no_use_except_counting = 0;
9618 break;
9619 }
9620 }
9621
9622 if (no_use_except_counting)
9623 /* No need to worry about MEMs. */
9624 ;
9625 else if (loop_info->num_mem_sets <= 1)
9626 {
9627 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
9628 if (INSN_P (p))
9629 num_nonfixed_reads += count_nonfixed_reads (loop, PATTERN (p));
9630
9631 /* If the loop has a single store, and the destination address is
9632 invariant, then we can't reverse the loop, because this address
9633 might then have the wrong value at loop exit.
9634 This would work if the source was invariant also, however, in that
9635 case, the insn should have been moved out of the loop. */
9636
9637 if (loop_info->num_mem_sets == 1)
9638 {
9639 struct induction *v;
9640
9641 /* If we could prove that each of the memory locations
9642 written to was different, then we could reverse the
9643 store -- but we don't presently have any way of
9644 knowing that. */
9645 reversible_mem_store = 0;
9646
9647 /* If the store depends on a register that is set after the
9648 store, it depends on the initial value, and is thus not
9649 reversible. */
9650 for (v = bl->giv; reversible_mem_store && v; v = v->next_iv)
9651 {
9652 if (v->giv_type == DEST_REG
9653 && reg_mentioned_p (v->dest_reg,
9654 PATTERN (loop_info->first_loop_store_insn))
9655 && loop_insn_first_p (loop_info->first_loop_store_insn,
9656 v->insn))
9657 reversible_mem_store = 0;
9658 }
9659 }
9660 }
9661 else
9662 return 0;
9663
9664 /* This code only acts for innermost loops. Also it simplifies
9665 the memory address check by only reversing loops with
9666 zero or one memory access.
9667 Two memory accesses could involve parts of the same array,
9668 and that can't be reversed.
9669 If the biv is used only for counting, than we don't need to worry
9670 about all these things. */
9671
9672 if ((num_nonfixed_reads <= 1
9673 && ! loop_info->has_nonconst_call
9674 && ! loop_info->has_prefetch
9675 && ! loop_info->has_volatile
9676 && reversible_mem_store
9677 && (bl->giv_count + bl->biv_count + loop_info->num_mem_sets
9678 + num_unmoved_movables (loop) + compare_and_branch == insn_count)
9679 && (bl == ivs->list && bl->next == 0))
9680 || (no_use_except_counting && ! loop_info->has_prefetch))
9681 {
9682 rtx tem;
9683
9684 /* Loop can be reversed. */
9685 if (loop_dump_stream)
9686 fprintf (loop_dump_stream, "Can reverse loop\n");
9687
9688 /* Now check other conditions:
9689
9690 The increment must be a constant, as must the initial value,
9691 and the comparison code must be LT.
9692
9693 This test can probably be improved since +/- 1 in the constant
9694 can be obtained by changing LT to LE and vice versa; this is
9695 confusing. */
9696
9697 if (comparison
9698 /* for constants, LE gets turned into LT */
9699 && (GET_CODE (comparison) == LT
9700 || (GET_CODE (comparison) == LE
9701 && no_use_except_counting)
9702 || GET_CODE (comparison) == LTU))
9703 {
9704 HOST_WIDE_INT add_val, add_adjust, comparison_val = 0;
9705 rtx initial_value, comparison_value;
9706 int nonneg = 0;
9707 enum rtx_code cmp_code;
9708 int comparison_const_width;
9709 unsigned HOST_WIDE_INT comparison_sign_mask;
9710 bool keep_first_compare;
9711
9712 add_val = INTVAL (bl->biv->add_val);
9713 comparison_value = XEXP (comparison, 1);
9714 if (GET_MODE (comparison_value) == VOIDmode)
9715 comparison_const_width
9716 = GET_MODE_BITSIZE (GET_MODE (XEXP (comparison, 0)));
9717 else
9718 comparison_const_width
9719 = GET_MODE_BITSIZE (GET_MODE (comparison_value));
9720 if (comparison_const_width > HOST_BITS_PER_WIDE_INT)
9721 comparison_const_width = HOST_BITS_PER_WIDE_INT;
9722 comparison_sign_mask
9723 = (unsigned HOST_WIDE_INT) 1 << (comparison_const_width - 1);
9724
9725 /* If the comparison value is not a loop invariant, then we
9726 can not reverse this loop.
9727
9728 ??? If the insns which initialize the comparison value as
9729 a whole compute an invariant result, then we could move
9730 them out of the loop and proceed with loop reversal. */
9731 if (! loop_invariant_p (loop, comparison_value))
9732 return 0;
9733
9734 if (GET_CODE (comparison_value) == CONST_INT)
9735 comparison_val = INTVAL (comparison_value);
9736 initial_value = bl->initial_value;
9737
9738 /* Normalize the initial value if it is an integer and
9739 has no other use except as a counter. This will allow
9740 a few more loops to be reversed. */
9741 if (no_use_except_counting
9742 && GET_CODE (comparison_value) == CONST_INT
9743 && GET_CODE (initial_value) == CONST_INT)
9744 {
9745 comparison_val = comparison_val - INTVAL (bl->initial_value);
9746 /* The code below requires comparison_val to be a multiple
9747 of add_val in order to do the loop reversal, so
9748 round up comparison_val to a multiple of add_val.
9749 Since comparison_value is constant, we know that the
9750 current comparison code is LT. */
9751 comparison_val = comparison_val + add_val - 1;
9752 comparison_val
9753 -= (unsigned HOST_WIDE_INT) comparison_val % add_val;
9754 /* We postpone overflow checks for COMPARISON_VAL here;
9755 even if there is an overflow, we might still be able to
9756 reverse the loop, if converting the loop exit test to
9757 NE is possible. */
9758 initial_value = const0_rtx;
9759 }
9760
9761 /* First check if we can do a vanilla loop reversal. */
9762 if (initial_value == const0_rtx
9763 && GET_CODE (comparison_value) == CONST_INT
9764 /* Now do postponed overflow checks on COMPARISON_VAL. */
9765 && ! (((comparison_val - add_val) ^ INTVAL (comparison_value))
9766 & comparison_sign_mask))
9767 {
9768 /* Register will always be nonnegative, with value
9769 0 on last iteration */
9770 add_adjust = add_val;
9771 nonneg = 1;
9772 cmp_code = GE;
9773 }
9774 else
9775 return 0;
9776
9777 if (GET_CODE (comparison) == LE)
9778 add_adjust -= add_val;
9779
9780 /* If the initial value is not zero, or if the comparison
9781 value is not an exact multiple of the increment, then we
9782 can not reverse this loop. */
9783 if (initial_value == const0_rtx
9784 && GET_CODE (comparison_value) == CONST_INT)
9785 {
9786 if (((unsigned HOST_WIDE_INT) comparison_val % add_val) != 0)
9787 return 0;
9788 }
9789 else
9790 {
9791 if (! no_use_except_counting || add_val != 1)
9792 return 0;
9793 }
9794
9795 final_value = comparison_value;
9796
9797 /* Reset these in case we normalized the initial value
9798 and comparison value above. */
9799 if (GET_CODE (comparison_value) == CONST_INT
9800 && GET_CODE (initial_value) == CONST_INT)
9801 {
9802 comparison_value = GEN_INT (comparison_val);
9803 final_value
9804 = GEN_INT (comparison_val + INTVAL (bl->initial_value));
9805 }
9806 bl->initial_value = initial_value;
9807
9808 /* Save some info needed to produce the new insns. */
9809 reg = bl->biv->dest_reg;
9810 mode = GET_MODE (reg);
9811 jump_label = condjump_label (PREV_INSN (loop_end));
9812 new_add_val = GEN_INT (-INTVAL (bl->biv->add_val));
9813
9814 /* Set start_value; if this is not a CONST_INT, we need
9815 to generate a SUB.
9816 Initialize biv to start_value before loop start.
9817 The old initializing insn will be deleted as a
9818 dead store by flow.c. */
9819 if (initial_value == const0_rtx
9820 && GET_CODE (comparison_value) == CONST_INT)
9821 {
9822 start_value
9823 = gen_int_mode (comparison_val - add_adjust, mode);
9824 loop_insn_hoist (loop, gen_move_insn (reg, start_value));
9825 }
9826 else if (GET_CODE (initial_value) == CONST_INT)
9827 {
9828 rtx offset = GEN_INT (-INTVAL (initial_value) - add_adjust);
9829 rtx add_insn = gen_add3_insn (reg, comparison_value, offset);
9830
9831 if (add_insn == 0)
9832 return 0;
9833
9834 start_value
9835 = gen_rtx_PLUS (mode, comparison_value, offset);
9836 loop_insn_hoist (loop, add_insn);
9837 if (GET_CODE (comparison) == LE)
9838 final_value = gen_rtx_PLUS (mode, comparison_value,
9839 GEN_INT (add_val));
9840 }
9841 else if (! add_adjust)
9842 {
9843 rtx sub_insn = gen_sub3_insn (reg, comparison_value,
9844 initial_value);
9845
9846 if (sub_insn == 0)
9847 return 0;
9848 start_value
9849 = gen_rtx_MINUS (mode, comparison_value, initial_value);
9850 loop_insn_hoist (loop, sub_insn);
9851 }
9852 else
9853 /* We could handle the other cases too, but it'll be
9854 better to have a testcase first. */
9855 return 0;
9856
9857 /* We may not have a single insn which can increment a reg, so
9858 create a sequence to hold all the insns from expand_inc. */
9859 start_sequence ();
9860 expand_inc (reg, new_add_val);
9861 tem = get_insns ();
9862 end_sequence ();
9863
9864 p = loop_insn_emit_before (loop, 0, bl->biv->insn, tem);
9865 delete_insn (bl->biv->insn);
9866
9867 /* Update biv info to reflect its new status. */
9868 bl->biv->insn = p;
9869 bl->initial_value = start_value;
9870 bl->biv->add_val = new_add_val;
9871
9872 /* Update loop info. */
9873 loop_info->initial_value = reg;
9874 loop_info->initial_equiv_value = reg;
9875 loop_info->final_value = const0_rtx;
9876 loop_info->final_equiv_value = const0_rtx;
9877 loop_info->comparison_value = const0_rtx;
9878 loop_info->comparison_code = cmp_code;
9879 loop_info->increment = new_add_val;
9880
9881 /* Inc LABEL_NUSES so that delete_insn will
9882 not delete the label. */
9883 LABEL_NUSES (XEXP (jump_label, 0))++;
9884
9885 /* If we have a separate comparison insn that does more
9886 than just set cc0, the result of the comparison might
9887 be used outside the loop. */
9888 keep_first_compare = (compare_and_branch == 2
9889 #ifdef HAVE_CC0
9890 && sets_cc0_p (first_compare) <= 0
9891 #endif
9892 );
9893
9894 /* Emit an insn after the end of the loop to set the biv's
9895 proper exit value if it is used anywhere outside the loop. */
9896 if (keep_first_compare
9897 || (REGNO_LAST_UID (bl->regno) != INSN_UID (first_compare))
9898 || ! bl->init_insn
9899 || REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn))
9900 loop_insn_sink (loop, gen_load_of_final_value (reg, final_value));
9901
9902 if (keep_first_compare)
9903 loop_insn_sink (loop, PATTERN (first_compare));
9904
9905 /* Delete compare/branch at end of loop. */
9906 delete_related_insns (PREV_INSN (loop_end));
9907 if (compare_and_branch == 2)
9908 delete_related_insns (first_compare);
9909
9910 /* Add new compare/branch insn at end of loop. */
9911 start_sequence ();
9912 emit_cmp_and_jump_insns (reg, const0_rtx, cmp_code, NULL_RTX,
9913 mode, 0,
9914 XEXP (jump_label, 0));
9915 tem = get_insns ();
9916 end_sequence ();
9917 emit_jump_insn_before (tem, loop_end);
9918
9919 for (tem = PREV_INSN (loop_end);
9920 tem && !JUMP_P (tem);
9921 tem = PREV_INSN (tem))
9922 ;
9923
9924 if (tem)
9925 JUMP_LABEL (tem) = XEXP (jump_label, 0);
9926
9927 if (nonneg)
9928 {
9929 if (tem)
9930 {
9931 /* Increment of LABEL_NUSES done above. */
9932 /* Register is now always nonnegative,
9933 so add REG_NONNEG note to the branch. */
9934 REG_NOTES (tem) = gen_rtx_EXPR_LIST (REG_NONNEG, reg,
9935 REG_NOTES (tem));
9936 }
9937 bl->nonneg = 1;
9938 }
9939
9940 /* No insn may reference both the reversed and another biv or it
9941 will fail (see comment near the top of the loop reversal
9942 code).
9943 Earlier on, we have verified that the biv has no use except
9944 counting, or it is the only biv in this function.
9945 However, the code that computes no_use_except_counting does
9946 not verify reg notes. It's possible to have an insn that
9947 references another biv, and has a REG_EQUAL note with an
9948 expression based on the reversed biv. To avoid this case,
9949 remove all REG_EQUAL notes based on the reversed biv
9950 here. */
9951 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
9952 if (INSN_P (p))
9953 {
9954 rtx *pnote;
9955 rtx set = single_set (p);
9956 /* If this is a set of a GIV based on the reversed biv, any
9957 REG_EQUAL notes should still be correct. */
9958 if (! set
9959 || !REG_P (SET_DEST (set))
9960 || (size_t) REGNO (SET_DEST (set)) >= ivs->n_regs
9961 || REG_IV_TYPE (ivs, REGNO (SET_DEST (set))) != GENERAL_INDUCT
9962 || REG_IV_INFO (ivs, REGNO (SET_DEST (set)))->src_reg != bl->biv->src_reg)
9963 for (pnote = &REG_NOTES (p); *pnote;)
9964 {
9965 if (REG_NOTE_KIND (*pnote) == REG_EQUAL
9966 && reg_mentioned_p (regno_reg_rtx[bl->regno],
9967 XEXP (*pnote, 0)))
9968 *pnote = XEXP (*pnote, 1);
9969 else
9970 pnote = &XEXP (*pnote, 1);
9971 }
9972 }
9973
9974 /* Mark that this biv has been reversed. Each giv which depends
9975 on this biv, and which is also live past the end of the loop
9976 will have to be fixed up. */
9977
9978 bl->reversed = 1;
9979
9980 if (loop_dump_stream)
9981 {
9982 fprintf (loop_dump_stream, "Reversed loop");
9983 if (bl->nonneg)
9984 fprintf (loop_dump_stream, " and added reg_nonneg\n");
9985 else
9986 fprintf (loop_dump_stream, "\n");
9987 }
9988
9989 return 1;
9990 }
9991 }
9992 }
9993
9994 return 0;
9995 }
9996 \f
9997 /* Verify whether the biv BL appears to be eliminable,
9998 based on the insns in the loop that refer to it.
9999
10000 If ELIMINATE_P is nonzero, actually do the elimination.
10001
10002 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
10003 determine whether invariant insns should be placed inside or at the
10004 start of the loop. */
10005
10006 static int
10007 maybe_eliminate_biv (const struct loop *loop, struct iv_class *bl,
10008 int eliminate_p, int threshold, int insn_count)
10009 {
10010 struct loop_ivs *ivs = LOOP_IVS (loop);
10011 rtx reg = bl->biv->dest_reg;
10012 rtx p;
10013
10014 /* Scan all insns in the loop, stopping if we find one that uses the
10015 biv in a way that we cannot eliminate. */
10016
10017 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
10018 {
10019 enum rtx_code code = GET_CODE (p);
10020 basic_block where_bb = 0;
10021 rtx where_insn = threshold >= insn_count ? 0 : p;
10022 rtx note;
10023
10024 /* If this is a libcall that sets a giv, skip ahead to its end. */
10025 if (INSN_P (p))
10026 {
10027 note = find_reg_note (p, REG_LIBCALL, NULL_RTX);
10028
10029 if (note)
10030 {
10031 rtx last = XEXP (note, 0);
10032 rtx set = single_set (last);
10033
10034 if (set && REG_P (SET_DEST (set)))
10035 {
10036 unsigned int regno = REGNO (SET_DEST (set));
10037
10038 if (regno < ivs->n_regs
10039 && REG_IV_TYPE (ivs, regno) == GENERAL_INDUCT
10040 && REG_IV_INFO (ivs, regno)->src_reg == bl->biv->src_reg)
10041 p = last;
10042 }
10043 }
10044 }
10045
10046 /* Closely examine the insn if the biv is mentioned. */
10047 if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
10048 && reg_mentioned_p (reg, PATTERN (p))
10049 && ! maybe_eliminate_biv_1 (loop, PATTERN (p), p, bl,
10050 eliminate_p, where_bb, where_insn))
10051 {
10052 if (loop_dump_stream)
10053 fprintf (loop_dump_stream,
10054 "Cannot eliminate biv %d: biv used in insn %d.\n",
10055 bl->regno, INSN_UID (p));
10056 break;
10057 }
10058
10059 /* If we are eliminating, kill REG_EQUAL notes mentioning the biv. */
10060 if (eliminate_p
10061 && (note = find_reg_note (p, REG_EQUAL, NULL_RTX)) != NULL_RTX
10062 && reg_mentioned_p (reg, XEXP (note, 0)))
10063 remove_note (p, note);
10064 }
10065
10066 if (p == loop->end)
10067 {
10068 if (loop_dump_stream)
10069 fprintf (loop_dump_stream, "biv %d %s eliminated.\n",
10070 bl->regno, eliminate_p ? "was" : "can be");
10071 return 1;
10072 }
10073
10074 return 0;
10075 }
10076 \f
10077 /* INSN and REFERENCE are instructions in the same insn chain.
10078 Return nonzero if INSN is first. */
10079
10080 static int
10081 loop_insn_first_p (rtx insn, rtx reference)
10082 {
10083 rtx p, q;
10084
10085 for (p = insn, q = reference;;)
10086 {
10087 /* Start with test for not first so that INSN == REFERENCE yields not
10088 first. */
10089 if (q == insn || ! p)
10090 return 0;
10091 if (p == reference || ! q)
10092 return 1;
10093
10094 /* Either of P or Q might be a NOTE. Notes have the same LUID as the
10095 previous insn, hence the <= comparison below does not work if
10096 P is a note. */
10097 if (INSN_UID (p) < max_uid_for_loop
10098 && INSN_UID (q) < max_uid_for_loop
10099 && !NOTE_P (p))
10100 return INSN_LUID (p) <= INSN_LUID (q);
10101
10102 if (INSN_UID (p) >= max_uid_for_loop
10103 || NOTE_P (p))
10104 p = NEXT_INSN (p);
10105 if (INSN_UID (q) >= max_uid_for_loop)
10106 q = NEXT_INSN (q);
10107 }
10108 }
10109
10110 /* We are trying to eliminate BIV in INSN using GIV. Return nonzero if
10111 the offset that we have to take into account due to auto-increment /
10112 div derivation is zero. */
10113 static int
10114 biv_elimination_giv_has_0_offset (struct induction *biv,
10115 struct induction *giv, rtx insn)
10116 {
10117 /* If the giv V had the auto-inc address optimization applied
10118 to it, and INSN occurs between the giv insn and the biv
10119 insn, then we'd have to adjust the value used here.
10120 This is rare, so we don't bother to make this possible. */
10121 if (giv->auto_inc_opt
10122 && ((loop_insn_first_p (giv->insn, insn)
10123 && loop_insn_first_p (insn, biv->insn))
10124 || (loop_insn_first_p (biv->insn, insn)
10125 && loop_insn_first_p (insn, giv->insn))))
10126 return 0;
10127
10128 return 1;
10129 }
10130
10131 /* If BL appears in X (part of the pattern of INSN), see if we can
10132 eliminate its use. If so, return 1. If not, return 0.
10133
10134 If BIV does not appear in X, return 1.
10135
10136 If ELIMINATE_P is nonzero, actually do the elimination.
10137 WHERE_INSN/WHERE_BB indicate where extra insns should be added.
10138 Depending on how many items have been moved out of the loop, it
10139 will either be before INSN (when WHERE_INSN is nonzero) or at the
10140 start of the loop (when WHERE_INSN is zero). */
10141
10142 static int
10143 maybe_eliminate_biv_1 (const struct loop *loop, rtx x, rtx insn,
10144 struct iv_class *bl, int eliminate_p,
10145 basic_block where_bb, rtx where_insn)
10146 {
10147 enum rtx_code code = GET_CODE (x);
10148 rtx reg = bl->biv->dest_reg;
10149 enum machine_mode mode = GET_MODE (reg);
10150 struct induction *v;
10151 rtx arg, tem;
10152 #ifdef HAVE_cc0
10153 rtx new;
10154 #endif
10155 int arg_operand;
10156 const char *fmt;
10157 int i, j;
10158
10159 switch (code)
10160 {
10161 case REG:
10162 /* If we haven't already been able to do something with this BIV,
10163 we can't eliminate it. */
10164 if (x == reg)
10165 return 0;
10166 return 1;
10167
10168 case SET:
10169 /* If this sets the BIV, it is not a problem. */
10170 if (SET_DEST (x) == reg)
10171 return 1;
10172
10173 /* If this is an insn that defines a giv, it is also ok because
10174 it will go away when the giv is reduced. */
10175 for (v = bl->giv; v; v = v->next_iv)
10176 if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg)
10177 return 1;
10178
10179 #ifdef HAVE_cc0
10180 if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg)
10181 {
10182 /* Can replace with any giv that was reduced and
10183 that has (MULT_VAL != 0) and (ADD_VAL == 0).
10184 Require a constant for MULT_VAL, so we know it's nonzero.
10185 ??? We disable this optimization to avoid potential
10186 overflows. */
10187
10188 for (v = bl->giv; v; v = v->next_iv)
10189 if (GET_CODE (v->mult_val) == CONST_INT && v->mult_val != const0_rtx
10190 && v->add_val == const0_rtx
10191 && ! v->ignore && ! v->maybe_dead && v->always_computable
10192 && v->mode == mode
10193 && 0)
10194 {
10195 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
10196 continue;
10197
10198 if (! eliminate_p)
10199 return 1;
10200
10201 /* If the giv has the opposite direction of change,
10202 then reverse the comparison. */
10203 if (INTVAL (v->mult_val) < 0)
10204 new = gen_rtx_COMPARE (GET_MODE (v->new_reg),
10205 const0_rtx, v->new_reg);
10206 else
10207 new = v->new_reg;
10208
10209 /* We can probably test that giv's reduced reg. */
10210 if (validate_change (insn, &SET_SRC (x), new, 0))
10211 return 1;
10212 }
10213
10214 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
10215 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
10216 Require a constant for MULT_VAL, so we know it's nonzero.
10217 ??? Do this only if ADD_VAL is a pointer to avoid a potential
10218 overflow problem. */
10219
10220 for (v = bl->giv; v; v = v->next_iv)
10221 if (GET_CODE (v->mult_val) == CONST_INT
10222 && v->mult_val != const0_rtx
10223 && ! v->ignore && ! v->maybe_dead && v->always_computable
10224 && v->mode == mode
10225 && (GET_CODE (v->add_val) == SYMBOL_REF
10226 || GET_CODE (v->add_val) == LABEL_REF
10227 || GET_CODE (v->add_val) == CONST
10228 || (REG_P (v->add_val)
10229 && REG_POINTER (v->add_val))))
10230 {
10231 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
10232 continue;
10233
10234 if (! eliminate_p)
10235 return 1;
10236
10237 /* If the giv has the opposite direction of change,
10238 then reverse the comparison. */
10239 if (INTVAL (v->mult_val) < 0)
10240 new = gen_rtx_COMPARE (VOIDmode, copy_rtx (v->add_val),
10241 v->new_reg);
10242 else
10243 new = gen_rtx_COMPARE (VOIDmode, v->new_reg,
10244 copy_rtx (v->add_val));
10245
10246 /* Replace biv with the giv's reduced register. */
10247 update_reg_last_use (v->add_val, insn);
10248 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
10249 return 1;
10250
10251 /* Insn doesn't support that constant or invariant. Copy it
10252 into a register (it will be a loop invariant.) */
10253 tem = gen_reg_rtx (GET_MODE (v->new_reg));
10254
10255 loop_insn_emit_before (loop, 0, where_insn,
10256 gen_move_insn (tem,
10257 copy_rtx (v->add_val)));
10258
10259 /* Substitute the new register for its invariant value in
10260 the compare expression. */
10261 XEXP (new, (INTVAL (v->mult_val) < 0) ? 0 : 1) = tem;
10262 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
10263 return 1;
10264 }
10265 }
10266 #endif
10267 break;
10268
10269 case COMPARE:
10270 case EQ: case NE:
10271 case GT: case GE: case GTU: case GEU:
10272 case LT: case LE: case LTU: case LEU:
10273 /* See if either argument is the biv. */
10274 if (XEXP (x, 0) == reg)
10275 arg = XEXP (x, 1), arg_operand = 1;
10276 else if (XEXP (x, 1) == reg)
10277 arg = XEXP (x, 0), arg_operand = 0;
10278 else
10279 break;
10280
10281 if (GET_CODE (arg) != CONST_INT)
10282 return 0;
10283
10284 /* Unless we're dealing with an equality comparison, if we can't
10285 determine that the original biv doesn't wrap, then we must not
10286 apply the transformation. */
10287 /* ??? Actually, what we must do is verify that the transformed
10288 giv doesn't wrap. But the general case of this transformation
10289 was disabled long ago due to wrapping problems, and there's no
10290 point reviving it this close to end-of-life for loop.c. The
10291 only case still enabled is known (via the check on add_val) to
10292 be pointer arithmetic, which in theory never overflows for
10293 valid programs. */
10294 /* Without lifetime analysis, we don't know how COMPARE will be
10295 used, so we must assume the worst. */
10296 if (code != EQ && code != NE
10297 && biased_biv_may_wrap_p (loop, bl, INTVAL (arg)))
10298 return 0;
10299
10300 /* Try to replace with any giv that has constant positive mult_val
10301 and a pointer add_val. */
10302 for (v = bl->giv; v; v = v->next_iv)
10303 if (GET_CODE (v->mult_val) == CONST_INT
10304 && INTVAL (v->mult_val) > 0
10305 && (GET_CODE (v->add_val) == SYMBOL_REF
10306 || GET_CODE (v->add_val) == LABEL_REF
10307 || GET_CODE (v->add_val) == CONST
10308 || (REG_P (v->add_val) && REG_POINTER (v->add_val)))
10309 && ! v->ignore && ! v->maybe_dead && v->always_computable
10310 && v->mode == mode)
10311 {
10312 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
10313 continue;
10314
10315 if (! eliminate_p)
10316 return 1;
10317
10318 /* Replace biv with the giv's reduced reg. */
10319 validate_change (insn, &XEXP (x, 1 - arg_operand), v->new_reg, 1);
10320
10321 /* Load the value into a register. */
10322 tem = gen_reg_rtx (mode);
10323 loop_iv_add_mult_emit_before (loop, arg, v->mult_val, v->add_val,
10324 tem, where_bb, where_insn);
10325
10326 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
10327
10328 if (apply_change_group ())
10329 return 1;
10330 }
10331
10332 /* If we get here, the biv can't be eliminated. */
10333 return 0;
10334
10335 case MEM:
10336 /* If this address is a DEST_ADDR giv, it doesn't matter if the
10337 biv is used in it, since it will be replaced. */
10338 for (v = bl->giv; v; v = v->next_iv)
10339 if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0))
10340 return 1;
10341 break;
10342
10343 default:
10344 break;
10345 }
10346
10347 /* See if any subexpression fails elimination. */
10348 fmt = GET_RTX_FORMAT (code);
10349 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
10350 {
10351 switch (fmt[i])
10352 {
10353 case 'e':
10354 if (! maybe_eliminate_biv_1 (loop, XEXP (x, i), insn, bl,
10355 eliminate_p, where_bb, where_insn))
10356 return 0;
10357 break;
10358
10359 case 'E':
10360 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
10361 if (! maybe_eliminate_biv_1 (loop, XVECEXP (x, i, j), insn, bl,
10362 eliminate_p, where_bb, where_insn))
10363 return 0;
10364 break;
10365 }
10366 }
10367
10368 return 1;
10369 }
10370 \f
10371 /* Return nonzero if the last use of REG
10372 is in an insn following INSN in the same basic block. */
10373
10374 static int
10375 last_use_this_basic_block (rtx reg, rtx insn)
10376 {
10377 rtx n;
10378 for (n = insn;
10379 n && !LABEL_P (n) && !JUMP_P (n);
10380 n = NEXT_INSN (n))
10381 {
10382 if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (n))
10383 return 1;
10384 }
10385 return 0;
10386 }
10387 \f
10388 /* Called via `note_stores' to record the initial value of a biv. Here we
10389 just record the location of the set and process it later. */
10390
10391 static void
10392 record_initial (rtx dest, rtx set, void *data ATTRIBUTE_UNUSED)
10393 {
10394 struct loop_ivs *ivs = (struct loop_ivs *) data;
10395 struct iv_class *bl;
10396
10397 if (!REG_P (dest)
10398 || REGNO (dest) >= ivs->n_regs
10399 || REG_IV_TYPE (ivs, REGNO (dest)) != BASIC_INDUCT)
10400 return;
10401
10402 bl = REG_IV_CLASS (ivs, REGNO (dest));
10403
10404 /* If this is the first set found, record it. */
10405 if (bl->init_insn == 0)
10406 {
10407 bl->init_insn = note_insn;
10408 bl->init_set = set;
10409 }
10410 }
10411 \f
10412 /* If any of the registers in X are "old" and currently have a last use earlier
10413 than INSN, update them to have a last use of INSN. Their actual last use
10414 will be the previous insn but it will not have a valid uid_luid so we can't
10415 use it. X must be a source expression only. */
10416
10417 static void
10418 update_reg_last_use (rtx x, rtx insn)
10419 {
10420 /* Check for the case where INSN does not have a valid luid. In this case,
10421 there is no need to modify the regno_last_uid, as this can only happen
10422 when code is inserted after the loop_end to set a pseudo's final value,
10423 and hence this insn will never be the last use of x.
10424 ???? This comment is not correct. See for example loop_givs_reduce.
10425 This may insert an insn before another new insn. */
10426 if (REG_P (x) && REGNO (x) < max_reg_before_loop
10427 && INSN_UID (insn) < max_uid_for_loop
10428 && REGNO_LAST_LUID (REGNO (x)) < INSN_LUID (insn))
10429 {
10430 REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn);
10431 }
10432 else
10433 {
10434 int i, j;
10435 const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
10436 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
10437 {
10438 if (fmt[i] == 'e')
10439 update_reg_last_use (XEXP (x, i), insn);
10440 else if (fmt[i] == 'E')
10441 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
10442 update_reg_last_use (XVECEXP (x, i, j), insn);
10443 }
10444 }
10445 }
10446 \f
10447 /* Similar to rtlanal.c:get_condition, except that we also put an
10448 invariant last unless both operands are invariants. */
10449
10450 static rtx
10451 get_condition_for_loop (const struct loop *loop, rtx x)
10452 {
10453 rtx comparison = get_condition (x, (rtx*) 0, false, true);
10454
10455 if (comparison == 0
10456 || ! loop_invariant_p (loop, XEXP (comparison, 0))
10457 || loop_invariant_p (loop, XEXP (comparison, 1)))
10458 return comparison;
10459
10460 return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)), VOIDmode,
10461 XEXP (comparison, 1), XEXP (comparison, 0));
10462 }
10463
10464 /* Scan the function and determine whether it has indirect (computed) jumps.
10465
10466 This is taken mostly from flow.c; similar code exists elsewhere
10467 in the compiler. It may be useful to put this into rtlanal.c. */
10468 static int
10469 indirect_jump_in_function_p (rtx start)
10470 {
10471 rtx insn;
10472
10473 for (insn = start; insn; insn = NEXT_INSN (insn))
10474 if (computed_jump_p (insn))
10475 return 1;
10476
10477 return 0;
10478 }
10479
10480 /* Add MEM to the LOOP_MEMS array, if appropriate. See the
10481 documentation for LOOP_MEMS for the definition of `appropriate'.
10482 This function is called from prescan_loop via for_each_rtx. */
10483
10484 static int
10485 insert_loop_mem (rtx *mem, void *data ATTRIBUTE_UNUSED)
10486 {
10487 struct loop_info *loop_info = data;
10488 int i;
10489 rtx m = *mem;
10490
10491 if (m == NULL_RTX)
10492 return 0;
10493
10494 switch (GET_CODE (m))
10495 {
10496 case MEM:
10497 break;
10498
10499 case CLOBBER:
10500 /* We're not interested in MEMs that are only clobbered. */
10501 return -1;
10502
10503 case CONST_DOUBLE:
10504 /* We're not interested in the MEM associated with a
10505 CONST_DOUBLE, so there's no need to traverse into this. */
10506 return -1;
10507
10508 case EXPR_LIST:
10509 /* We're not interested in any MEMs that only appear in notes. */
10510 return -1;
10511
10512 default:
10513 /* This is not a MEM. */
10514 return 0;
10515 }
10516
10517 /* See if we've already seen this MEM. */
10518 for (i = 0; i < loop_info->mems_idx; ++i)
10519 if (rtx_equal_p (m, loop_info->mems[i].mem))
10520 {
10521 if (MEM_VOLATILE_P (m) && !MEM_VOLATILE_P (loop_info->mems[i].mem))
10522 loop_info->mems[i].mem = m;
10523 if (GET_MODE (m) != GET_MODE (loop_info->mems[i].mem))
10524 /* The modes of the two memory accesses are different. If
10525 this happens, something tricky is going on, and we just
10526 don't optimize accesses to this MEM. */
10527 loop_info->mems[i].optimize = 0;
10528
10529 return 0;
10530 }
10531
10532 /* Resize the array, if necessary. */
10533 if (loop_info->mems_idx == loop_info->mems_allocated)
10534 {
10535 if (loop_info->mems_allocated != 0)
10536 loop_info->mems_allocated *= 2;
10537 else
10538 loop_info->mems_allocated = 32;
10539
10540 loop_info->mems = xrealloc (loop_info->mems,
10541 loop_info->mems_allocated * sizeof (loop_mem_info));
10542 }
10543
10544 /* Actually insert the MEM. */
10545 loop_info->mems[loop_info->mems_idx].mem = m;
10546 /* We can't hoist this MEM out of the loop if it's a BLKmode MEM
10547 because we can't put it in a register. We still store it in the
10548 table, though, so that if we see the same address later, but in a
10549 non-BLK mode, we'll not think we can optimize it at that point. */
10550 loop_info->mems[loop_info->mems_idx].optimize = (GET_MODE (m) != BLKmode);
10551 loop_info->mems[loop_info->mems_idx].reg = NULL_RTX;
10552 ++loop_info->mems_idx;
10553
10554 return 0;
10555 }
10556
10557
10558 /* Allocate REGS->ARRAY or reallocate it if it is too small.
10559
10560 Increment REGS->ARRAY[I].SET_IN_LOOP at the index I of each
10561 register that is modified by an insn between FROM and TO. If the
10562 value of an element of REGS->array[I].SET_IN_LOOP becomes 127 or
10563 more, stop incrementing it, to avoid overflow.
10564
10565 Store in REGS->ARRAY[I].SINGLE_USAGE the single insn in which
10566 register I is used, if it is only used once. Otherwise, it is set
10567 to 0 (for no uses) or const0_rtx for more than one use. This
10568 parameter may be zero, in which case this processing is not done.
10569
10570 Set REGS->ARRAY[I].MAY_NOT_OPTIMIZE nonzero if we should not
10571 optimize register I. */
10572
10573 static void
10574 loop_regs_scan (const struct loop *loop, int extra_size)
10575 {
10576 struct loop_regs *regs = LOOP_REGS (loop);
10577 int old_nregs;
10578 /* last_set[n] is nonzero iff reg n has been set in the current
10579 basic block. In that case, it is the insn that last set reg n. */
10580 rtx *last_set;
10581 rtx insn;
10582 int i;
10583
10584 old_nregs = regs->num;
10585 regs->num = max_reg_num ();
10586
10587 /* Grow the regs array if not allocated or too small. */
10588 if (regs->num >= regs->size)
10589 {
10590 regs->size = regs->num + extra_size;
10591
10592 regs->array = xrealloc (regs->array, regs->size * sizeof (*regs->array));
10593
10594 /* Zero the new elements. */
10595 memset (regs->array + old_nregs, 0,
10596 (regs->size - old_nregs) * sizeof (*regs->array));
10597 }
10598
10599 /* Clear previously scanned fields but do not clear n_times_set. */
10600 for (i = 0; i < old_nregs; i++)
10601 {
10602 regs->array[i].set_in_loop = 0;
10603 regs->array[i].may_not_optimize = 0;
10604 regs->array[i].single_usage = NULL_RTX;
10605 }
10606
10607 last_set = xcalloc (regs->num, sizeof (rtx));
10608
10609 /* Scan the loop, recording register usage. */
10610 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
10611 insn = NEXT_INSN (insn))
10612 {
10613 if (INSN_P (insn))
10614 {
10615 /* Record registers that have exactly one use. */
10616 find_single_use_in_loop (regs, insn, PATTERN (insn));
10617
10618 /* Include uses in REG_EQUAL notes. */
10619 if (REG_NOTES (insn))
10620 find_single_use_in_loop (regs, insn, REG_NOTES (insn));
10621
10622 if (GET_CODE (PATTERN (insn)) == SET
10623 || GET_CODE (PATTERN (insn)) == CLOBBER)
10624 count_one_set (regs, insn, PATTERN (insn), last_set);
10625 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
10626 {
10627 int i;
10628 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
10629 count_one_set (regs, insn, XVECEXP (PATTERN (insn), 0, i),
10630 last_set);
10631 }
10632 }
10633
10634 if (LABEL_P (insn) || JUMP_P (insn))
10635 memset (last_set, 0, regs->num * sizeof (rtx));
10636
10637 /* Invalidate all registers used for function argument passing.
10638 We check rtx_varies_p for the same reason as below, to allow
10639 optimizing PIC calculations. */
10640 if (CALL_P (insn))
10641 {
10642 rtx link;
10643 for (link = CALL_INSN_FUNCTION_USAGE (insn);
10644 link;
10645 link = XEXP (link, 1))
10646 {
10647 rtx op, reg;
10648
10649 if (GET_CODE (op = XEXP (link, 0)) == USE
10650 && REG_P (reg = XEXP (op, 0))
10651 && rtx_varies_p (reg, 1))
10652 regs->array[REGNO (reg)].may_not_optimize = 1;
10653 }
10654 }
10655 }
10656
10657 /* Invalidate all hard registers clobbered by calls. With one exception:
10658 a call-clobbered PIC register is still function-invariant for our
10659 purposes, since we can hoist any PIC calculations out of the loop.
10660 Thus the call to rtx_varies_p. */
10661 if (LOOP_INFO (loop)->has_call)
10662 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
10663 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i)
10664 && rtx_varies_p (regno_reg_rtx[i], 1))
10665 {
10666 regs->array[i].may_not_optimize = 1;
10667 regs->array[i].set_in_loop = 1;
10668 }
10669
10670 #ifdef AVOID_CCMODE_COPIES
10671 /* Don't try to move insns which set CC registers if we should not
10672 create CCmode register copies. */
10673 for (i = regs->num - 1; i >= FIRST_PSEUDO_REGISTER; i--)
10674 if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC)
10675 regs->array[i].may_not_optimize = 1;
10676 #endif
10677
10678 /* Set regs->array[I].n_times_set for the new registers. */
10679 for (i = old_nregs; i < regs->num; i++)
10680 regs->array[i].n_times_set = regs->array[i].set_in_loop;
10681
10682 free (last_set);
10683 }
10684
10685 /* Returns the number of real INSNs in the LOOP. */
10686
10687 static int
10688 count_insns_in_loop (const struct loop *loop)
10689 {
10690 int count = 0;
10691 rtx insn;
10692
10693 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
10694 insn = NEXT_INSN (insn))
10695 if (INSN_P (insn))
10696 ++count;
10697
10698 return count;
10699 }
10700
10701 /* Move MEMs into registers for the duration of the loop. */
10702
10703 static void
10704 load_mems (const struct loop *loop)
10705 {
10706 struct loop_info *loop_info = LOOP_INFO (loop);
10707 struct loop_regs *regs = LOOP_REGS (loop);
10708 int maybe_never = 0;
10709 int i;
10710 rtx p, prev_ebb_head;
10711 rtx label = NULL_RTX;
10712 rtx end_label;
10713 /* Nonzero if the next instruction may never be executed. */
10714 int next_maybe_never = 0;
10715 unsigned int last_max_reg = max_reg_num ();
10716
10717 if (loop_info->mems_idx == 0)
10718 return;
10719
10720 /* We cannot use next_label here because it skips over normal insns. */
10721 end_label = next_nonnote_insn (loop->end);
10722 if (end_label && !LABEL_P (end_label))
10723 end_label = NULL_RTX;
10724
10725 /* Check to see if it's possible that some instructions in the loop are
10726 never executed. Also check if there is a goto out of the loop other
10727 than right after the end of the loop. */
10728 for (p = next_insn_in_loop (loop, loop->scan_start);
10729 p != NULL_RTX;
10730 p = next_insn_in_loop (loop, p))
10731 {
10732 if (LABEL_P (p))
10733 maybe_never = 1;
10734 else if (JUMP_P (p)
10735 /* If we enter the loop in the middle, and scan
10736 around to the beginning, don't set maybe_never
10737 for that. This must be an unconditional jump,
10738 otherwise the code at the top of the loop might
10739 never be executed. Unconditional jumps are
10740 followed a by barrier then loop end. */
10741 && ! (JUMP_P (p)
10742 && JUMP_LABEL (p) == loop->top
10743 && NEXT_INSN (NEXT_INSN (p)) == loop->end
10744 && any_uncondjump_p (p)))
10745 {
10746 /* If this is a jump outside of the loop but not right
10747 after the end of the loop, we would have to emit new fixup
10748 sequences for each such label. */
10749 if (/* If we can't tell where control might go when this
10750 JUMP_INSN is executed, we must be conservative. */
10751 !JUMP_LABEL (p)
10752 || (JUMP_LABEL (p) != end_label
10753 && (INSN_UID (JUMP_LABEL (p)) >= max_uid_for_loop
10754 || INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop->start)
10755 || INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop->end))))
10756 return;
10757
10758 if (!any_condjump_p (p))
10759 /* Something complicated. */
10760 maybe_never = 1;
10761 else
10762 /* If there are any more instructions in the loop, they
10763 might not be reached. */
10764 next_maybe_never = 1;
10765 }
10766 else if (next_maybe_never)
10767 maybe_never = 1;
10768 }
10769
10770 /* Find start of the extended basic block that enters the loop. */
10771 for (p = loop->start;
10772 PREV_INSN (p) && !LABEL_P (p);
10773 p = PREV_INSN (p))
10774 ;
10775 prev_ebb_head = p;
10776
10777 cselib_init (true);
10778
10779 /* Build table of mems that get set to constant values before the
10780 loop. */
10781 for (; p != loop->start; p = NEXT_INSN (p))
10782 cselib_process_insn (p);
10783
10784 /* Actually move the MEMs. */
10785 for (i = 0; i < loop_info->mems_idx; ++i)
10786 {
10787 regset_head load_copies;
10788 regset_head store_copies;
10789 int written = 0;
10790 rtx reg;
10791 rtx mem = loop_info->mems[i].mem;
10792 rtx mem_list_entry;
10793
10794 if (MEM_VOLATILE_P (mem)
10795 || loop_invariant_p (loop, XEXP (mem, 0)) != 1)
10796 /* There's no telling whether or not MEM is modified. */
10797 loop_info->mems[i].optimize = 0;
10798
10799 /* Go through the MEMs written to in the loop to see if this
10800 one is aliased by one of them. */
10801 mem_list_entry = loop_info->store_mems;
10802 while (mem_list_entry)
10803 {
10804 if (rtx_equal_p (mem, XEXP (mem_list_entry, 0)))
10805 written = 1;
10806 else if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
10807 mem, rtx_varies_p))
10808 {
10809 /* MEM is indeed aliased by this store. */
10810 loop_info->mems[i].optimize = 0;
10811 break;
10812 }
10813 mem_list_entry = XEXP (mem_list_entry, 1);
10814 }
10815
10816 if (flag_float_store && written
10817 && GET_MODE_CLASS (GET_MODE (mem)) == MODE_FLOAT)
10818 loop_info->mems[i].optimize = 0;
10819
10820 /* If this MEM is written to, we must be sure that there
10821 are no reads from another MEM that aliases this one. */
10822 if (loop_info->mems[i].optimize && written)
10823 {
10824 int j;
10825
10826 for (j = 0; j < loop_info->mems_idx; ++j)
10827 {
10828 if (j == i)
10829 continue;
10830 else if (true_dependence (mem,
10831 VOIDmode,
10832 loop_info->mems[j].mem,
10833 rtx_varies_p))
10834 {
10835 /* It's not safe to hoist loop_info->mems[i] out of
10836 the loop because writes to it might not be
10837 seen by reads from loop_info->mems[j]. */
10838 loop_info->mems[i].optimize = 0;
10839 break;
10840 }
10841 }
10842 }
10843
10844 if (maybe_never && may_trap_p (mem))
10845 /* We can't access the MEM outside the loop; it might
10846 cause a trap that wouldn't have happened otherwise. */
10847 loop_info->mems[i].optimize = 0;
10848
10849 if (!loop_info->mems[i].optimize)
10850 /* We thought we were going to lift this MEM out of the
10851 loop, but later discovered that we could not. */
10852 continue;
10853
10854 INIT_REG_SET (&load_copies);
10855 INIT_REG_SET (&store_copies);
10856
10857 /* Allocate a pseudo for this MEM. We set REG_USERVAR_P in
10858 order to keep scan_loop from moving stores to this MEM
10859 out of the loop just because this REG is neither a
10860 user-variable nor used in the loop test. */
10861 reg = gen_reg_rtx (GET_MODE (mem));
10862 REG_USERVAR_P (reg) = 1;
10863 loop_info->mems[i].reg = reg;
10864
10865 /* Now, replace all references to the MEM with the
10866 corresponding pseudos. */
10867 maybe_never = 0;
10868 for (p = next_insn_in_loop (loop, loop->scan_start);
10869 p != NULL_RTX;
10870 p = next_insn_in_loop (loop, p))
10871 {
10872 if (INSN_P (p))
10873 {
10874 rtx set;
10875
10876 set = single_set (p);
10877
10878 /* See if this copies the mem into a register that isn't
10879 modified afterwards. We'll try to do copy propagation
10880 a little further on. */
10881 if (set
10882 /* @@@ This test is _way_ too conservative. */
10883 && ! maybe_never
10884 && REG_P (SET_DEST (set))
10885 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
10886 && REGNO (SET_DEST (set)) < last_max_reg
10887 && regs->array[REGNO (SET_DEST (set))].n_times_set == 1
10888 && rtx_equal_p (SET_SRC (set), mem))
10889 SET_REGNO_REG_SET (&load_copies, REGNO (SET_DEST (set)));
10890
10891 /* See if this copies the mem from a register that isn't
10892 modified afterwards. We'll try to remove the
10893 redundant copy later on by doing a little register
10894 renaming and copy propagation. This will help
10895 to untangle things for the BIV detection code. */
10896 if (set
10897 && ! maybe_never
10898 && REG_P (SET_SRC (set))
10899 && REGNO (SET_SRC (set)) >= FIRST_PSEUDO_REGISTER
10900 && REGNO (SET_SRC (set)) < last_max_reg
10901 && regs->array[REGNO (SET_SRC (set))].n_times_set == 1
10902 && rtx_equal_p (SET_DEST (set), mem))
10903 SET_REGNO_REG_SET (&store_copies, REGNO (SET_SRC (set)));
10904
10905 /* If this is a call which uses / clobbers this memory
10906 location, we must not change the interface here. */
10907 if (CALL_P (p)
10908 && reg_mentioned_p (loop_info->mems[i].mem,
10909 CALL_INSN_FUNCTION_USAGE (p)))
10910 {
10911 cancel_changes (0);
10912 loop_info->mems[i].optimize = 0;
10913 break;
10914 }
10915 else
10916 /* Replace the memory reference with the shadow register. */
10917 replace_loop_mems (p, loop_info->mems[i].mem,
10918 loop_info->mems[i].reg, written);
10919 }
10920
10921 if (LABEL_P (p)
10922 || JUMP_P (p))
10923 maybe_never = 1;
10924 }
10925
10926 if (! loop_info->mems[i].optimize)
10927 ; /* We found we couldn't do the replacement, so do nothing. */
10928 else if (! apply_change_group ())
10929 /* We couldn't replace all occurrences of the MEM. */
10930 loop_info->mems[i].optimize = 0;
10931 else
10932 {
10933 /* Load the memory immediately before LOOP->START, which is
10934 the NOTE_LOOP_BEG. */
10935 cselib_val *e = cselib_lookup (mem, VOIDmode, 0);
10936 rtx set;
10937 rtx best = mem;
10938 unsigned j;
10939 struct elt_loc_list *const_equiv = 0;
10940 reg_set_iterator rsi;
10941
10942 if (e)
10943 {
10944 struct elt_loc_list *equiv;
10945 struct elt_loc_list *best_equiv = 0;
10946 for (equiv = e->locs; equiv; equiv = equiv->next)
10947 {
10948 if (CONSTANT_P (equiv->loc))
10949 const_equiv = equiv;
10950 else if (REG_P (equiv->loc)
10951 /* Extending hard register lifetimes causes crash
10952 on SRC targets. Doing so on non-SRC is
10953 probably also not good idea, since we most
10954 probably have pseudoregister equivalence as
10955 well. */
10956 && REGNO (equiv->loc) >= FIRST_PSEUDO_REGISTER)
10957 best_equiv = equiv;
10958 }
10959 /* Use the constant equivalence if that is cheap enough. */
10960 if (! best_equiv)
10961 best_equiv = const_equiv;
10962 else if (const_equiv
10963 && (rtx_cost (const_equiv->loc, SET)
10964 <= rtx_cost (best_equiv->loc, SET)))
10965 {
10966 best_equiv = const_equiv;
10967 const_equiv = 0;
10968 }
10969
10970 /* If best_equiv is nonzero, we know that MEM is set to a
10971 constant or register before the loop. We will use this
10972 knowledge to initialize the shadow register with that
10973 constant or reg rather than by loading from MEM. */
10974 if (best_equiv)
10975 best = copy_rtx (best_equiv->loc);
10976 }
10977
10978 set = gen_move_insn (reg, best);
10979 set = loop_insn_hoist (loop, set);
10980 if (REG_P (best))
10981 {
10982 for (p = prev_ebb_head; p != loop->start; p = NEXT_INSN (p))
10983 if (REGNO_LAST_UID (REGNO (best)) == INSN_UID (p))
10984 {
10985 REGNO_LAST_UID (REGNO (best)) = INSN_UID (set);
10986 break;
10987 }
10988 }
10989
10990 if (const_equiv)
10991 set_unique_reg_note (set, REG_EQUAL, copy_rtx (const_equiv->loc));
10992
10993 if (written)
10994 {
10995 if (label == NULL_RTX)
10996 {
10997 label = gen_label_rtx ();
10998 emit_label_after (label, loop->end);
10999 }
11000
11001 /* Store the memory immediately after END, which is
11002 the NOTE_LOOP_END. */
11003 set = gen_move_insn (copy_rtx (mem), reg);
11004 loop_insn_emit_after (loop, 0, label, set);
11005 }
11006
11007 if (loop_dump_stream)
11008 {
11009 fprintf (loop_dump_stream, "Hoisted regno %d %s from ",
11010 REGNO (reg), (written ? "r/w" : "r/o"));
11011 print_rtl (loop_dump_stream, mem);
11012 fputc ('\n', loop_dump_stream);
11013 }
11014
11015 /* Attempt a bit of copy propagation. This helps untangle the
11016 data flow, and enables {basic,general}_induction_var to find
11017 more bivs/givs. */
11018 EXECUTE_IF_SET_IN_REG_SET
11019 (&load_copies, FIRST_PSEUDO_REGISTER, j, rsi)
11020 {
11021 try_copy_prop (loop, reg, j);
11022 }
11023 CLEAR_REG_SET (&load_copies);
11024
11025 EXECUTE_IF_SET_IN_REG_SET
11026 (&store_copies, FIRST_PSEUDO_REGISTER, j, rsi)
11027 {
11028 try_swap_copy_prop (loop, reg, j);
11029 }
11030 CLEAR_REG_SET (&store_copies);
11031 }
11032 }
11033
11034 /* Now, we need to replace all references to the previous exit
11035 label with the new one. */
11036 if (label != NULL_RTX && end_label != NULL_RTX)
11037 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
11038 if (JUMP_P (p) && JUMP_LABEL (p) == end_label)
11039 redirect_jump (p, label, false);
11040
11041 cselib_finish ();
11042 }
11043
11044 /* For communication between note_reg_stored and its caller. */
11045 struct note_reg_stored_arg
11046 {
11047 int set_seen;
11048 rtx reg;
11049 };
11050
11051 /* Called via note_stores, record in SET_SEEN whether X, which is written,
11052 is equal to ARG. */
11053 static void
11054 note_reg_stored (rtx x, rtx setter ATTRIBUTE_UNUSED, void *arg)
11055 {
11056 struct note_reg_stored_arg *t = (struct note_reg_stored_arg *) arg;
11057 if (t->reg == x)
11058 t->set_seen = 1;
11059 }
11060
11061 /* Try to replace every occurrence of pseudo REGNO with REPLACEMENT.
11062 There must be exactly one insn that sets this pseudo; it will be
11063 deleted if all replacements succeed and we can prove that the register
11064 is not used after the loop. */
11065
11066 static void
11067 try_copy_prop (const struct loop *loop, rtx replacement, unsigned int regno)
11068 {
11069 /* This is the reg that we are copying from. */
11070 rtx reg_rtx = regno_reg_rtx[regno];
11071 rtx init_insn = 0;
11072 rtx insn;
11073 /* These help keep track of whether we replaced all uses of the reg. */
11074 int replaced_last = 0;
11075 int store_is_first = 0;
11076
11077 for (insn = next_insn_in_loop (loop, loop->scan_start);
11078 insn != NULL_RTX;
11079 insn = next_insn_in_loop (loop, insn))
11080 {
11081 rtx set;
11082
11083 /* Only substitute within one extended basic block from the initializing
11084 insn. */
11085 if (LABEL_P (insn) && init_insn)
11086 break;
11087
11088 if (! INSN_P (insn))
11089 continue;
11090
11091 /* Is this the initializing insn? */
11092 set = single_set (insn);
11093 if (set
11094 && REG_P (SET_DEST (set))
11095 && REGNO (SET_DEST (set)) == regno)
11096 {
11097 gcc_assert (!init_insn);
11098
11099 init_insn = insn;
11100 if (REGNO_FIRST_UID (regno) == INSN_UID (insn))
11101 store_is_first = 1;
11102 }
11103
11104 /* Only substitute after seeing the initializing insn. */
11105 if (init_insn && insn != init_insn)
11106 {
11107 struct note_reg_stored_arg arg;
11108
11109 replace_loop_regs (insn, reg_rtx, replacement);
11110 if (REGNO_LAST_UID (regno) == INSN_UID (insn))
11111 replaced_last = 1;
11112
11113 /* Stop replacing when REPLACEMENT is modified. */
11114 arg.reg = replacement;
11115 arg.set_seen = 0;
11116 note_stores (PATTERN (insn), note_reg_stored, &arg);
11117 if (arg.set_seen)
11118 {
11119 rtx note = find_reg_note (insn, REG_EQUAL, NULL);
11120
11121 /* It is possible that we've turned previously valid REG_EQUAL to
11122 invalid, as we change the REGNO to REPLACEMENT and unlike REGNO,
11123 REPLACEMENT is modified, we get different meaning. */
11124 if (note && reg_mentioned_p (replacement, XEXP (note, 0)))
11125 remove_note (insn, note);
11126 break;
11127 }
11128 }
11129 }
11130 gcc_assert (init_insn);
11131 if (apply_change_group ())
11132 {
11133 if (loop_dump_stream)
11134 fprintf (loop_dump_stream, " Replaced reg %d", regno);
11135 if (store_is_first && replaced_last)
11136 {
11137 rtx first;
11138 rtx retval_note;
11139
11140 /* Assume we're just deleting INIT_INSN. */
11141 first = init_insn;
11142 /* Look for REG_RETVAL note. If we're deleting the end of
11143 the libcall sequence, the whole sequence can go. */
11144 retval_note = find_reg_note (init_insn, REG_RETVAL, NULL_RTX);
11145 /* If we found a REG_RETVAL note, find the first instruction
11146 in the sequence. */
11147 if (retval_note)
11148 first = XEXP (retval_note, 0);
11149
11150 /* Delete the instructions. */
11151 loop_delete_insns (first, init_insn);
11152 }
11153 if (loop_dump_stream)
11154 fprintf (loop_dump_stream, ".\n");
11155 }
11156 }
11157
11158 /* Replace all the instructions from FIRST up to and including LAST
11159 with NOTE_INSN_DELETED notes. */
11160
11161 static void
11162 loop_delete_insns (rtx first, rtx last)
11163 {
11164 while (1)
11165 {
11166 if (loop_dump_stream)
11167 fprintf (loop_dump_stream, ", deleting init_insn (%d)",
11168 INSN_UID (first));
11169 delete_insn (first);
11170
11171 /* If this was the LAST instructions we're supposed to delete,
11172 we're done. */
11173 if (first == last)
11174 break;
11175
11176 first = NEXT_INSN (first);
11177 }
11178 }
11179
11180 /* Try to replace occurrences of pseudo REGNO with REPLACEMENT within
11181 loop LOOP if the order of the sets of these registers can be
11182 swapped. There must be exactly one insn within the loop that sets
11183 this pseudo followed immediately by a move insn that sets
11184 REPLACEMENT with REGNO. */
11185 static void
11186 try_swap_copy_prop (const struct loop *loop, rtx replacement,
11187 unsigned int regno)
11188 {
11189 rtx insn;
11190 rtx set = NULL_RTX;
11191 unsigned int new_regno;
11192
11193 new_regno = REGNO (replacement);
11194
11195 for (insn = next_insn_in_loop (loop, loop->scan_start);
11196 insn != NULL_RTX;
11197 insn = next_insn_in_loop (loop, insn))
11198 {
11199 /* Search for the insn that copies REGNO to NEW_REGNO? */
11200 if (INSN_P (insn)
11201 && (set = single_set (insn))
11202 && REG_P (SET_DEST (set))
11203 && REGNO (SET_DEST (set)) == new_regno
11204 && REG_P (SET_SRC (set))
11205 && REGNO (SET_SRC (set)) == regno)
11206 break;
11207 }
11208
11209 if (insn != NULL_RTX)
11210 {
11211 rtx prev_insn;
11212 rtx prev_set;
11213
11214 /* Some DEF-USE info would come in handy here to make this
11215 function more general. For now, just check the previous insn
11216 which is the most likely candidate for setting REGNO. */
11217
11218 prev_insn = PREV_INSN (insn);
11219
11220 if (INSN_P (insn)
11221 && (prev_set = single_set (prev_insn))
11222 && REG_P (SET_DEST (prev_set))
11223 && REGNO (SET_DEST (prev_set)) == regno)
11224 {
11225 /* We have:
11226 (set (reg regno) (expr))
11227 (set (reg new_regno) (reg regno))
11228
11229 so try converting this to:
11230 (set (reg new_regno) (expr))
11231 (set (reg regno) (reg new_regno))
11232
11233 The former construct is often generated when a global
11234 variable used for an induction variable is shadowed by a
11235 register (NEW_REGNO). The latter construct improves the
11236 chances of GIV replacement and BIV elimination. */
11237
11238 validate_change (prev_insn, &SET_DEST (prev_set),
11239 replacement, 1);
11240 validate_change (insn, &SET_DEST (set),
11241 SET_SRC (set), 1);
11242 validate_change (insn, &SET_SRC (set),
11243 replacement, 1);
11244
11245 if (apply_change_group ())
11246 {
11247 if (loop_dump_stream)
11248 fprintf (loop_dump_stream,
11249 " Swapped set of reg %d at %d with reg %d at %d.\n",
11250 regno, INSN_UID (insn),
11251 new_regno, INSN_UID (prev_insn));
11252
11253 /* Update first use of REGNO. */
11254 if (REGNO_FIRST_UID (regno) == INSN_UID (prev_insn))
11255 REGNO_FIRST_UID (regno) = INSN_UID (insn);
11256
11257 /* Now perform copy propagation to hopefully
11258 remove all uses of REGNO within the loop. */
11259 try_copy_prop (loop, replacement, regno);
11260 }
11261 }
11262 }
11263 }
11264
11265 /* Worker function for find_mem_in_note, called via for_each_rtx. */
11266
11267 static int
11268 find_mem_in_note_1 (rtx *x, void *data)
11269 {
11270 if (*x != NULL_RTX && MEM_P (*x))
11271 {
11272 rtx *res = (rtx *) data;
11273 *res = *x;
11274 return 1;
11275 }
11276 return 0;
11277 }
11278
11279 /* Returns the first MEM found in NOTE by depth-first search. */
11280
11281 static rtx
11282 find_mem_in_note (rtx note)
11283 {
11284 if (note && for_each_rtx (&note, find_mem_in_note_1, &note))
11285 return note;
11286 return NULL_RTX;
11287 }
11288
11289 /* Replace MEM with its associated pseudo register. This function is
11290 called from load_mems via for_each_rtx. DATA is actually a pointer
11291 to a structure describing the instruction currently being scanned
11292 and the MEM we are currently replacing. */
11293
11294 static int
11295 replace_loop_mem (rtx *mem, void *data)
11296 {
11297 loop_replace_args *args = (loop_replace_args *) data;
11298 rtx m = *mem;
11299
11300 if (m == NULL_RTX)
11301 return 0;
11302
11303 switch (GET_CODE (m))
11304 {
11305 case MEM:
11306 break;
11307
11308 case CONST_DOUBLE:
11309 /* We're not interested in the MEM associated with a
11310 CONST_DOUBLE, so there's no need to traverse into one. */
11311 return -1;
11312
11313 default:
11314 /* This is not a MEM. */
11315 return 0;
11316 }
11317
11318 if (!rtx_equal_p (args->match, m))
11319 /* This is not the MEM we are currently replacing. */
11320 return 0;
11321
11322 /* Actually replace the MEM. */
11323 validate_change (args->insn, mem, args->replacement, 1);
11324
11325 return 0;
11326 }
11327
11328 static void
11329 replace_loop_mems (rtx insn, rtx mem, rtx reg, int written)
11330 {
11331 loop_replace_args args;
11332
11333 args.insn = insn;
11334 args.match = mem;
11335 args.replacement = reg;
11336
11337 for_each_rtx (&insn, replace_loop_mem, &args);
11338
11339 /* If we hoist a mem write out of the loop, then REG_EQUAL
11340 notes referring to the mem are no longer valid. */
11341 if (written)
11342 {
11343 rtx note, sub;
11344 rtx *link;
11345
11346 for (link = &REG_NOTES (insn); (note = *link); link = &XEXP (note, 1))
11347 {
11348 if (REG_NOTE_KIND (note) == REG_EQUAL
11349 && (sub = find_mem_in_note (note))
11350 && true_dependence (mem, VOIDmode, sub, rtx_varies_p))
11351 {
11352 /* Remove the note. */
11353 validate_change (NULL_RTX, link, XEXP (note, 1), 1);
11354 break;
11355 }
11356 }
11357 }
11358 }
11359
11360 /* Replace one register with another. Called through for_each_rtx; PX points
11361 to the rtx being scanned. DATA is actually a pointer to
11362 a structure of arguments. */
11363
11364 static int
11365 replace_loop_reg (rtx *px, void *data)
11366 {
11367 rtx x = *px;
11368 loop_replace_args *args = (loop_replace_args *) data;
11369
11370 if (x == NULL_RTX)
11371 return 0;
11372
11373 if (x == args->match)
11374 validate_change (args->insn, px, args->replacement, 1);
11375
11376 return 0;
11377 }
11378
11379 static void
11380 replace_loop_regs (rtx insn, rtx reg, rtx replacement)
11381 {
11382 loop_replace_args args;
11383
11384 args.insn = insn;
11385 args.match = reg;
11386 args.replacement = replacement;
11387
11388 for_each_rtx (&insn, replace_loop_reg, &args);
11389 }
11390 \f
11391 /* Emit insn for PATTERN after WHERE_INSN in basic block WHERE_BB
11392 (ignored in the interim). */
11393
11394 static rtx
11395 loop_insn_emit_after (const struct loop *loop ATTRIBUTE_UNUSED,
11396 basic_block where_bb ATTRIBUTE_UNUSED, rtx where_insn,
11397 rtx pattern)
11398 {
11399 return emit_insn_after (pattern, where_insn);
11400 }
11401
11402
11403 /* If WHERE_INSN is nonzero emit insn for PATTERN before WHERE_INSN
11404 in basic block WHERE_BB (ignored in the interim) within the loop
11405 otherwise hoist PATTERN into the loop pre-header. */
11406
11407 static rtx
11408 loop_insn_emit_before (const struct loop *loop,
11409 basic_block where_bb ATTRIBUTE_UNUSED,
11410 rtx where_insn, rtx pattern)
11411 {
11412 if (! where_insn)
11413 return loop_insn_hoist (loop, pattern);
11414 return emit_insn_before (pattern, where_insn);
11415 }
11416
11417
11418 /* Emit call insn for PATTERN before WHERE_INSN in basic block
11419 WHERE_BB (ignored in the interim) within the loop. */
11420
11421 static rtx
11422 loop_call_insn_emit_before (const struct loop *loop ATTRIBUTE_UNUSED,
11423 basic_block where_bb ATTRIBUTE_UNUSED,
11424 rtx where_insn, rtx pattern)
11425 {
11426 return emit_call_insn_before (pattern, where_insn);
11427 }
11428
11429
11430 /* Hoist insn for PATTERN into the loop pre-header. */
11431
11432 static rtx
11433 loop_insn_hoist (const struct loop *loop, rtx pattern)
11434 {
11435 return loop_insn_emit_before (loop, 0, loop->start, pattern);
11436 }
11437
11438
11439 /* Hoist call insn for PATTERN into the loop pre-header. */
11440
11441 static rtx
11442 loop_call_insn_hoist (const struct loop *loop, rtx pattern)
11443 {
11444 return loop_call_insn_emit_before (loop, 0, loop->start, pattern);
11445 }
11446
11447
11448 /* Sink insn for PATTERN after the loop end. */
11449
11450 static rtx
11451 loop_insn_sink (const struct loop *loop, rtx pattern)
11452 {
11453 return loop_insn_emit_before (loop, 0, loop->sink, pattern);
11454 }
11455
11456 /* bl->final_value can be either general_operand or PLUS of general_operand
11457 and constant. Emit sequence of instructions to load it into REG. */
11458 static rtx
11459 gen_load_of_final_value (rtx reg, rtx final_value)
11460 {
11461 rtx seq;
11462 start_sequence ();
11463 final_value = force_operand (final_value, reg);
11464 if (final_value != reg)
11465 emit_move_insn (reg, final_value);
11466 seq = get_insns ();
11467 end_sequence ();
11468 return seq;
11469 }
11470
11471 /* If the loop has multiple exits, emit insn for PATTERN before the
11472 loop to ensure that it will always be executed no matter how the
11473 loop exits. Otherwise, emit the insn for PATTERN after the loop,
11474 since this is slightly more efficient. */
11475
11476 static rtx
11477 loop_insn_sink_or_swim (const struct loop *loop, rtx pattern)
11478 {
11479 if (loop->exit_count)
11480 return loop_insn_hoist (loop, pattern);
11481 else
11482 return loop_insn_sink (loop, pattern);
11483 }
11484 \f
11485 static void
11486 loop_ivs_dump (const struct loop *loop, FILE *file, int verbose)
11487 {
11488 struct iv_class *bl;
11489 int iv_num = 0;
11490
11491 if (! loop || ! file)
11492 return;
11493
11494 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
11495 iv_num++;
11496
11497 fprintf (file, "Loop %d: %d IV classes\n", loop->num, iv_num);
11498
11499 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
11500 {
11501 loop_iv_class_dump (bl, file, verbose);
11502 fputc ('\n', file);
11503 }
11504 }
11505
11506
11507 static void
11508 loop_iv_class_dump (const struct iv_class *bl, FILE *file,
11509 int verbose ATTRIBUTE_UNUSED)
11510 {
11511 struct induction *v;
11512 rtx incr;
11513 int i;
11514
11515 if (! bl || ! file)
11516 return;
11517
11518 fprintf (file, "IV class for reg %d, benefit %d\n",
11519 bl->regno, bl->total_benefit);
11520
11521 fprintf (file, " Init insn %d", INSN_UID (bl->init_insn));
11522 if (bl->initial_value)
11523 {
11524 fprintf (file, ", init val: ");
11525 print_simple_rtl (file, bl->initial_value);
11526 }
11527 if (bl->initial_test)
11528 {
11529 fprintf (file, ", init test: ");
11530 print_simple_rtl (file, bl->initial_test);
11531 }
11532 fputc ('\n', file);
11533
11534 if (bl->final_value)
11535 {
11536 fprintf (file, " Final val: ");
11537 print_simple_rtl (file, bl->final_value);
11538 fputc ('\n', file);
11539 }
11540
11541 if ((incr = biv_total_increment (bl)))
11542 {
11543 fprintf (file, " Total increment: ");
11544 print_simple_rtl (file, incr);
11545 fputc ('\n', file);
11546 }
11547
11548 /* List the increments. */
11549 for (i = 0, v = bl->biv; v; v = v->next_iv, i++)
11550 {
11551 fprintf (file, " Inc%d: insn %d, incr: ", i, INSN_UID (v->insn));
11552 print_simple_rtl (file, v->add_val);
11553 fputc ('\n', file);
11554 }
11555
11556 /* List the givs. */
11557 for (i = 0, v = bl->giv; v; v = v->next_iv, i++)
11558 {
11559 fprintf (file, " Giv%d: insn %d, benefit %d, ",
11560 i, INSN_UID (v->insn), v->benefit);
11561 if (v->giv_type == DEST_ADDR)
11562 print_simple_rtl (file, v->mem);
11563 else
11564 print_simple_rtl (file, single_set (v->insn));
11565 fputc ('\n', file);
11566 }
11567 }
11568
11569
11570 static void
11571 loop_biv_dump (const struct induction *v, FILE *file, int verbose)
11572 {
11573 if (! v || ! file)
11574 return;
11575
11576 fprintf (file,
11577 "Biv %d: insn %d",
11578 REGNO (v->dest_reg), INSN_UID (v->insn));
11579 fprintf (file, " const ");
11580 print_simple_rtl (file, v->add_val);
11581
11582 if (verbose && v->final_value)
11583 {
11584 fputc ('\n', file);
11585 fprintf (file, " final ");
11586 print_simple_rtl (file, v->final_value);
11587 }
11588
11589 fputc ('\n', file);
11590 }
11591
11592
11593 static void
11594 loop_giv_dump (const struct induction *v, FILE *file, int verbose)
11595 {
11596 if (! v || ! file)
11597 return;
11598
11599 if (v->giv_type == DEST_REG)
11600 fprintf (file, "Giv %d: insn %d",
11601 REGNO (v->dest_reg), INSN_UID (v->insn));
11602 else
11603 fprintf (file, "Dest address: insn %d",
11604 INSN_UID (v->insn));
11605
11606 fprintf (file, " src reg %d benefit %d",
11607 REGNO (v->src_reg), v->benefit);
11608 fprintf (file, " lifetime %d",
11609 v->lifetime);
11610
11611 if (v->replaceable)
11612 fprintf (file, " replaceable");
11613
11614 if (v->no_const_addval)
11615 fprintf (file, " ncav");
11616
11617 if (v->ext_dependent)
11618 {
11619 switch (GET_CODE (v->ext_dependent))
11620 {
11621 case SIGN_EXTEND:
11622 fprintf (file, " ext se");
11623 break;
11624 case ZERO_EXTEND:
11625 fprintf (file, " ext ze");
11626 break;
11627 case TRUNCATE:
11628 fprintf (file, " ext tr");
11629 break;
11630 default:
11631 gcc_unreachable ();
11632 }
11633 }
11634
11635 fputc ('\n', file);
11636 fprintf (file, " mult ");
11637 print_simple_rtl (file, v->mult_val);
11638
11639 fputc ('\n', file);
11640 fprintf (file, " add ");
11641 print_simple_rtl (file, v->add_val);
11642
11643 if (verbose && v->final_value)
11644 {
11645 fputc ('\n', file);
11646 fprintf (file, " final ");
11647 print_simple_rtl (file, v->final_value);
11648 }
11649
11650 fputc ('\n', file);
11651 }
11652
11653
11654 void
11655 debug_ivs (const struct loop *loop)
11656 {
11657 loop_ivs_dump (loop, stderr, 1);
11658 }
11659
11660
11661 void
11662 debug_iv_class (const struct iv_class *bl)
11663 {
11664 loop_iv_class_dump (bl, stderr, 1);
11665 }
11666
11667
11668 void
11669 debug_biv (const struct induction *v)
11670 {
11671 loop_biv_dump (v, stderr, 1);
11672 }
11673
11674
11675 void
11676 debug_giv (const struct induction *v)
11677 {
11678 loop_giv_dump (v, stderr, 1);
11679 }
11680
11681
11682 #define LOOP_BLOCK_NUM_1(INSN) \
11683 ((INSN) ? (BLOCK_FOR_INSN (INSN) ? BLOCK_NUM (INSN) : - 1) : -1)
11684
11685 /* The notes do not have an assigned block, so look at the next insn. */
11686 #define LOOP_BLOCK_NUM(INSN) \
11687 ((INSN) ? (NOTE_P (INSN) \
11688 ? LOOP_BLOCK_NUM_1 (next_nonnote_insn (INSN)) \
11689 : LOOP_BLOCK_NUM_1 (INSN)) \
11690 : -1)
11691
11692 #define LOOP_INSN_UID(INSN) ((INSN) ? INSN_UID (INSN) : -1)
11693
11694 static void
11695 loop_dump_aux (const struct loop *loop, FILE *file,
11696 int verbose ATTRIBUTE_UNUSED)
11697 {
11698 rtx label;
11699
11700 if (! loop || ! file || !BB_HEAD (loop->first))
11701 return;
11702
11703 /* Print diagnostics to compare our concept of a loop with
11704 what the loop notes say. */
11705 if (! PREV_INSN (BB_HEAD (loop->first))
11706 || !NOTE_P (PREV_INSN (BB_HEAD (loop->first)))
11707 || NOTE_LINE_NUMBER (PREV_INSN (BB_HEAD (loop->first)))
11708 != NOTE_INSN_LOOP_BEG)
11709 fprintf (file, ";; No NOTE_INSN_LOOP_BEG at %d\n",
11710 INSN_UID (PREV_INSN (BB_HEAD (loop->first))));
11711 if (! NEXT_INSN (BB_END (loop->last))
11712 || !NOTE_P (NEXT_INSN (BB_END (loop->last)))
11713 || NOTE_LINE_NUMBER (NEXT_INSN (BB_END (loop->last)))
11714 != NOTE_INSN_LOOP_END)
11715 fprintf (file, ";; No NOTE_INSN_LOOP_END at %d\n",
11716 INSN_UID (NEXT_INSN (BB_END (loop->last))));
11717
11718 if (loop->start)
11719 {
11720 fprintf (file,
11721 ";; start %d (%d), end %d (%d)\n",
11722 LOOP_BLOCK_NUM (loop->start),
11723 LOOP_INSN_UID (loop->start),
11724 LOOP_BLOCK_NUM (loop->end),
11725 LOOP_INSN_UID (loop->end));
11726 fprintf (file, ";; top %d (%d), scan start %d (%d)\n",
11727 LOOP_BLOCK_NUM (loop->top),
11728 LOOP_INSN_UID (loop->top),
11729 LOOP_BLOCK_NUM (loop->scan_start),
11730 LOOP_INSN_UID (loop->scan_start));
11731 fprintf (file, ";; exit_count %d", loop->exit_count);
11732 if (loop->exit_count)
11733 {
11734 fputs (", labels:", file);
11735 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
11736 {
11737 fprintf (file, " %d ",
11738 LOOP_INSN_UID (XEXP (label, 0)));
11739 }
11740 }
11741 fputs ("\n", file);
11742 }
11743 }
11744
11745 /* Call this function from the debugger to dump LOOP. */
11746
11747 void
11748 debug_loop (const struct loop *loop)
11749 {
11750 flow_loop_dump (loop, stderr, loop_dump_aux, 1);
11751 }
11752
11753 /* Call this function from the debugger to dump LOOPS. */
11754
11755 void
11756 debug_loops (const struct loops *loops)
11757 {
11758 flow_loops_dump (loops, stderr, loop_dump_aux, 1);
11759 }
11760 \f
11761 static bool
11762 gate_handle_loop_optimize (void)
11763 {
11764 return (optimize > 0 && flag_loop_optimize);
11765 }
11766
11767 /* Move constant computations out of loops. */
11768 static void
11769 rest_of_handle_loop_optimize (void)
11770 {
11771 int do_prefetch;
11772
11773 /* CFG is no longer maintained up-to-date. */
11774 free_bb_for_insn ();
11775 profile_status = PROFILE_ABSENT;
11776
11777 do_prefetch = flag_prefetch_loop_arrays ? LOOP_PREFETCH : 0;
11778
11779 if (flag_rerun_loop_opt)
11780 {
11781 cleanup_barriers ();
11782
11783 /* We only want to perform unrolling once. */
11784 loop_optimize (get_insns (), dump_file, 0);
11785
11786 /* The first call to loop_optimize makes some instructions
11787 trivially dead. We delete those instructions now in the
11788 hope that doing so will make the heuristics in loop work
11789 better and possibly speed up compilation. */
11790 delete_trivially_dead_insns (get_insns (), max_reg_num ());
11791
11792 /* The regscan pass is currently necessary as the alias
11793 analysis code depends on this information. */
11794 reg_scan (get_insns (), max_reg_num ());
11795 }
11796 cleanup_barriers ();
11797 loop_optimize (get_insns (), dump_file, do_prefetch);
11798
11799 /* Loop can create trivially dead instructions. */
11800 delete_trivially_dead_insns (get_insns (), max_reg_num ());
11801 find_basic_blocks (get_insns ());
11802 }
11803
11804 struct tree_opt_pass pass_loop_optimize =
11805 {
11806 "old-loop", /* name */
11807 gate_handle_loop_optimize, /* gate */
11808 rest_of_handle_loop_optimize, /* execute */
11809 NULL, /* sub */
11810 NULL, /* next */
11811 0, /* static_pass_number */
11812 TV_LOOP, /* tv_id */
11813 0, /* properties_required */
11814 0, /* properties_provided */
11815 0, /* properties_destroyed */
11816 0, /* todo_flags_start */
11817 TODO_dump_func |
11818 TODO_ggc_collect, /* todo_flags_finish */
11819 'L' /* letter */
11820 };
11821
11822