tree.h (PHI_CHAIN): New.
[gcc.git] / gcc / loop.c
1 /* Perform various loop optimizations, including strength reduction.
2 Copyright (C) 1987, 1988, 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997,
3 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
21
22 /* This is the loop optimization pass of the compiler.
23 It finds invariant computations within loops and moves them
24 to the beginning of the loop. Then it identifies basic and
25 general induction variables.
26
27 Basic induction variables (BIVs) are a pseudo registers which are set within
28 a loop only by incrementing or decrementing its value. General induction
29 variables (GIVs) are pseudo registers with a value which is a linear function
30 of a basic induction variable. BIVs are recognized by `basic_induction_var';
31 GIVs by `general_induction_var'.
32
33 Once induction variables are identified, strength reduction is applied to the
34 general induction variables, and induction variable elimination is applied to
35 the basic induction variables.
36
37 It also finds cases where
38 a register is set within the loop by zero-extending a narrower value
39 and changes these to zero the entire register once before the loop
40 and merely copy the low part within the loop.
41
42 Most of the complexity is in heuristics to decide when it is worth
43 while to do these things. */
44
45 #include "config.h"
46 #include "system.h"
47 #include "coretypes.h"
48 #include "tm.h"
49 #include "rtl.h"
50 #include "tm_p.h"
51 #include "function.h"
52 #include "expr.h"
53 #include "hard-reg-set.h"
54 #include "basic-block.h"
55 #include "insn-config.h"
56 #include "regs.h"
57 #include "recog.h"
58 #include "flags.h"
59 #include "real.h"
60 #include "loop.h"
61 #include "cselib.h"
62 #include "except.h"
63 #include "toplev.h"
64 #include "predict.h"
65 #include "insn-flags.h"
66 #include "optabs.h"
67 #include "cfgloop.h"
68 #include "ggc.h"
69
70 /* Not really meaningful values, but at least something. */
71 #ifndef SIMULTANEOUS_PREFETCHES
72 #define SIMULTANEOUS_PREFETCHES 3
73 #endif
74 #ifndef PREFETCH_BLOCK
75 #define PREFETCH_BLOCK 32
76 #endif
77 #ifndef HAVE_prefetch
78 #define HAVE_prefetch 0
79 #define CODE_FOR_prefetch 0
80 #define gen_prefetch(a,b,c) (abort(), NULL_RTX)
81 #endif
82
83 /* Give up the prefetch optimizations once we exceed a given threshold.
84 It is unlikely that we would be able to optimize something in a loop
85 with so many detected prefetches. */
86 #define MAX_PREFETCHES 100
87 /* The number of prefetch blocks that are beneficial to fetch at once before
88 a loop with a known (and low) iteration count. */
89 #define PREFETCH_BLOCKS_BEFORE_LOOP_MAX 6
90 /* For very tiny loops it is not worthwhile to prefetch even before the loop,
91 since it is likely that the data are already in the cache. */
92 #define PREFETCH_BLOCKS_BEFORE_LOOP_MIN 2
93
94 /* Parameterize some prefetch heuristics so they can be turned on and off
95 easily for performance testing on new architectures. These can be
96 defined in target-dependent files. */
97
98 /* Prefetch is worthwhile only when loads/stores are dense. */
99 #ifndef PREFETCH_ONLY_DENSE_MEM
100 #define PREFETCH_ONLY_DENSE_MEM 1
101 #endif
102
103 /* Define what we mean by "dense" loads and stores; This value divided by 256
104 is the minimum percentage of memory references that worth prefetching. */
105 #ifndef PREFETCH_DENSE_MEM
106 #define PREFETCH_DENSE_MEM 220
107 #endif
108
109 /* Do not prefetch for a loop whose iteration count is known to be low. */
110 #ifndef PREFETCH_NO_LOW_LOOPCNT
111 #define PREFETCH_NO_LOW_LOOPCNT 1
112 #endif
113
114 /* Define what we mean by a "low" iteration count. */
115 #ifndef PREFETCH_LOW_LOOPCNT
116 #define PREFETCH_LOW_LOOPCNT 32
117 #endif
118
119 /* Do not prefetch for a loop that contains a function call; such a loop is
120 probably not an internal loop. */
121 #ifndef PREFETCH_NO_CALL
122 #define PREFETCH_NO_CALL 1
123 #endif
124
125 /* Do not prefetch accesses with an extreme stride. */
126 #ifndef PREFETCH_NO_EXTREME_STRIDE
127 #define PREFETCH_NO_EXTREME_STRIDE 1
128 #endif
129
130 /* Define what we mean by an "extreme" stride. */
131 #ifndef PREFETCH_EXTREME_STRIDE
132 #define PREFETCH_EXTREME_STRIDE 4096
133 #endif
134
135 /* Define a limit to how far apart indices can be and still be merged
136 into a single prefetch. */
137 #ifndef PREFETCH_EXTREME_DIFFERENCE
138 #define PREFETCH_EXTREME_DIFFERENCE 4096
139 #endif
140
141 /* Issue prefetch instructions before the loop to fetch data to be used
142 in the first few loop iterations. */
143 #ifndef PREFETCH_BEFORE_LOOP
144 #define PREFETCH_BEFORE_LOOP 1
145 #endif
146
147 /* Do not handle reversed order prefetches (negative stride). */
148 #ifndef PREFETCH_NO_REVERSE_ORDER
149 #define PREFETCH_NO_REVERSE_ORDER 1
150 #endif
151
152 /* Prefetch even if the GIV is in conditional code. */
153 #ifndef PREFETCH_CONDITIONAL
154 #define PREFETCH_CONDITIONAL 1
155 #endif
156
157 #define LOOP_REG_LIFETIME(LOOP, REGNO) \
158 ((REGNO_LAST_LUID (REGNO) - REGNO_FIRST_LUID (REGNO)))
159
160 #define LOOP_REG_GLOBAL_P(LOOP, REGNO) \
161 ((REGNO_LAST_LUID (REGNO) > INSN_LUID ((LOOP)->end) \
162 || REGNO_FIRST_LUID (REGNO) < INSN_LUID ((LOOP)->start)))
163
164 #define LOOP_REGNO_NREGS(REGNO, SET_DEST) \
165 ((REGNO) < FIRST_PSEUDO_REGISTER \
166 ? (int) hard_regno_nregs[(REGNO)][GET_MODE (SET_DEST)] : 1)
167
168
169 /* Vector mapping INSN_UIDs to luids.
170 The luids are like uids but increase monotonically always.
171 We use them to see whether a jump comes from outside a given loop. */
172
173 int *uid_luid;
174
175 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
176 number the insn is contained in. */
177
178 struct loop **uid_loop;
179
180 /* 1 + largest uid of any insn. */
181
182 int max_uid_for_loop;
183
184 /* Number of loops detected in current function. Used as index to the
185 next few tables. */
186
187 static int max_loop_num;
188
189 /* Bound on pseudo register number before loop optimization.
190 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
191 unsigned int max_reg_before_loop;
192
193 /* The value to pass to the next call of reg_scan_update. */
194 static int loop_max_reg;
195 \f
196 /* During the analysis of a loop, a chain of `struct movable's
197 is made to record all the movable insns found.
198 Then the entire chain can be scanned to decide which to move. */
199
200 struct movable
201 {
202 rtx insn; /* A movable insn */
203 rtx set_src; /* The expression this reg is set from. */
204 rtx set_dest; /* The destination of this SET. */
205 rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST
206 of any registers used within the LIBCALL. */
207 int consec; /* Number of consecutive following insns
208 that must be moved with this one. */
209 unsigned int regno; /* The register it sets */
210 short lifetime; /* lifetime of that register;
211 may be adjusted when matching movables
212 that load the same value are found. */
213 short savings; /* Number of insns we can move for this reg,
214 including other movables that force this
215 or match this one. */
216 ENUM_BITFIELD(machine_mode) savemode : 8; /* Nonzero means it is a mode for
217 a low part that we should avoid changing when
218 clearing the rest of the reg. */
219 unsigned int cond : 1; /* 1 if only conditionally movable */
220 unsigned int force : 1; /* 1 means MUST move this insn */
221 unsigned int global : 1; /* 1 means reg is live outside this loop */
222 /* If PARTIAL is 1, GLOBAL means something different:
223 that the reg is live outside the range from where it is set
224 to the following label. */
225 unsigned int done : 1; /* 1 inhibits further processing of this */
226
227 unsigned int partial : 1; /* 1 means this reg is used for zero-extending.
228 In particular, moving it does not make it
229 invariant. */
230 unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to
231 load SRC, rather than copying INSN. */
232 unsigned int move_insn_first:1;/* Same as above, if this is necessary for the
233 first insn of a consecutive sets group. */
234 unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */
235 unsigned int insert_temp : 1; /* 1 means we copy to a new pseudo and replace
236 the original insn with a copy from that
237 pseudo, rather than deleting it. */
238 struct movable *match; /* First entry for same value */
239 struct movable *forces; /* An insn that must be moved if this is */
240 struct movable *next;
241 };
242
243
244 FILE *loop_dump_stream;
245
246 /* Forward declarations. */
247
248 static void invalidate_loops_containing_label (rtx);
249 static void find_and_verify_loops (rtx, struct loops *);
250 static void mark_loop_jump (rtx, struct loop *);
251 static void prescan_loop (struct loop *);
252 static int reg_in_basic_block_p (rtx, rtx);
253 static int consec_sets_invariant_p (const struct loop *, rtx, int, rtx);
254 static int labels_in_range_p (rtx, int);
255 static void count_one_set (struct loop_regs *, rtx, rtx, rtx *);
256 static void note_addr_stored (rtx, rtx, void *);
257 static void note_set_pseudo_multiple_uses (rtx, rtx, void *);
258 static int loop_reg_used_before_p (const struct loop *, rtx, rtx);
259 static rtx find_regs_nested (rtx, rtx);
260 static void scan_loop (struct loop*, int);
261 #if 0
262 static void replace_call_address (rtx, rtx, rtx);
263 #endif
264 static rtx skip_consec_insns (rtx, int);
265 static int libcall_benefit (rtx);
266 static rtx libcall_other_reg (rtx, rtx);
267 static void record_excess_regs (rtx, rtx, rtx *);
268 static void ignore_some_movables (struct loop_movables *);
269 static void force_movables (struct loop_movables *);
270 static void combine_movables (struct loop_movables *, struct loop_regs *);
271 static int num_unmoved_movables (const struct loop *);
272 static int regs_match_p (rtx, rtx, struct loop_movables *);
273 static int rtx_equal_for_loop_p (rtx, rtx, struct loop_movables *,
274 struct loop_regs *);
275 static void add_label_notes (rtx, rtx);
276 static void move_movables (struct loop *loop, struct loop_movables *, int,
277 int);
278 static void loop_movables_add (struct loop_movables *, struct movable *);
279 static void loop_movables_free (struct loop_movables *);
280 static int count_nonfixed_reads (const struct loop *, rtx);
281 static void loop_bivs_find (struct loop *);
282 static void loop_bivs_init_find (struct loop *);
283 static void loop_bivs_check (struct loop *);
284 static void loop_givs_find (struct loop *);
285 static void loop_givs_check (struct loop *);
286 static int loop_biv_eliminable_p (struct loop *, struct iv_class *, int, int);
287 static int loop_giv_reduce_benefit (struct loop *, struct iv_class *,
288 struct induction *, rtx);
289 static void loop_givs_dead_check (struct loop *, struct iv_class *);
290 static void loop_givs_reduce (struct loop *, struct iv_class *);
291 static void loop_givs_rescan (struct loop *, struct iv_class *, rtx *);
292 static void loop_ivs_free (struct loop *);
293 static void strength_reduce (struct loop *, int);
294 static void find_single_use_in_loop (struct loop_regs *, rtx, rtx);
295 static int valid_initial_value_p (rtx, rtx, int, rtx);
296 static void find_mem_givs (const struct loop *, rtx, rtx, int, int);
297 static void record_biv (struct loop *, struct induction *, rtx, rtx, rtx,
298 rtx, rtx *, int, int);
299 static void check_final_value (const struct loop *, struct induction *);
300 static void loop_ivs_dump (const struct loop *, FILE *, int);
301 static void loop_iv_class_dump (const struct iv_class *, FILE *, int);
302 static void loop_biv_dump (const struct induction *, FILE *, int);
303 static void loop_giv_dump (const struct induction *, FILE *, int);
304 static void record_giv (const struct loop *, struct induction *, rtx, rtx,
305 rtx, rtx, rtx, rtx, int, enum g_types, int, int,
306 rtx *);
307 static void update_giv_derive (const struct loop *, rtx);
308 static void check_ext_dependent_givs (const struct loop *, struct iv_class *);
309 static int basic_induction_var (const struct loop *, rtx, enum machine_mode,
310 rtx, rtx, rtx *, rtx *, rtx **);
311 static rtx simplify_giv_expr (const struct loop *, rtx, rtx *, int *);
312 static int general_induction_var (const struct loop *loop, rtx, rtx *, rtx *,
313 rtx *, rtx *, int, int *, enum machine_mode);
314 static int consec_sets_giv (const struct loop *, int, rtx, rtx, rtx, rtx *,
315 rtx *, rtx *, rtx *);
316 static int check_dbra_loop (struct loop *, int);
317 static rtx express_from_1 (rtx, rtx, rtx);
318 static rtx combine_givs_p (struct induction *, struct induction *);
319 static int cmp_combine_givs_stats (const void *, const void *);
320 static void combine_givs (struct loop_regs *, struct iv_class *);
321 static int product_cheap_p (rtx, rtx);
322 static int maybe_eliminate_biv (const struct loop *, struct iv_class *, int,
323 int, int);
324 static int maybe_eliminate_biv_1 (const struct loop *, rtx, rtx,
325 struct iv_class *, int, basic_block, rtx);
326 static int last_use_this_basic_block (rtx, rtx);
327 static void record_initial (rtx, rtx, void *);
328 static void update_reg_last_use (rtx, rtx);
329 static rtx next_insn_in_loop (const struct loop *, rtx);
330 static void loop_regs_scan (const struct loop *, int);
331 static int count_insns_in_loop (const struct loop *);
332 static int find_mem_in_note_1 (rtx *, void *);
333 static rtx find_mem_in_note (rtx);
334 static void load_mems (const struct loop *);
335 static int insert_loop_mem (rtx *, void *);
336 static int replace_loop_mem (rtx *, void *);
337 static void replace_loop_mems (rtx, rtx, rtx, int);
338 static int replace_loop_reg (rtx *, void *);
339 static void replace_loop_regs (rtx insn, rtx, rtx);
340 static void note_reg_stored (rtx, rtx, void *);
341 static void try_copy_prop (const struct loop *, rtx, unsigned int);
342 static void try_swap_copy_prop (const struct loop *, rtx, unsigned int);
343 static rtx check_insn_for_givs (struct loop *, rtx, int, int);
344 static rtx check_insn_for_bivs (struct loop *, rtx, int, int);
345 static rtx gen_add_mult (rtx, rtx, rtx, rtx);
346 static void loop_regs_update (const struct loop *, rtx);
347 static int iv_add_mult_cost (rtx, rtx, rtx, rtx);
348
349 static rtx loop_insn_emit_after (const struct loop *, basic_block, rtx, rtx);
350 static rtx loop_call_insn_emit_before (const struct loop *, basic_block,
351 rtx, rtx);
352 static rtx loop_call_insn_hoist (const struct loop *, rtx);
353 static rtx loop_insn_sink_or_swim (const struct loop *, rtx);
354
355 static void loop_dump_aux (const struct loop *, FILE *, int);
356 static void loop_delete_insns (rtx, rtx);
357 static HOST_WIDE_INT remove_constant_addition (rtx *);
358 static rtx gen_load_of_final_value (rtx, rtx);
359 void debug_ivs (const struct loop *);
360 void debug_iv_class (const struct iv_class *);
361 void debug_biv (const struct induction *);
362 void debug_giv (const struct induction *);
363 void debug_loop (const struct loop *);
364 void debug_loops (const struct loops *);
365
366 typedef struct loop_replace_args
367 {
368 rtx match;
369 rtx replacement;
370 rtx insn;
371 } loop_replace_args;
372
373 /* Nonzero iff INSN is between START and END, inclusive. */
374 #define INSN_IN_RANGE_P(INSN, START, END) \
375 (INSN_UID (INSN) < max_uid_for_loop \
376 && INSN_LUID (INSN) >= INSN_LUID (START) \
377 && INSN_LUID (INSN) <= INSN_LUID (END))
378
379 /* Indirect_jump_in_function is computed once per function. */
380 static int indirect_jump_in_function;
381 static int indirect_jump_in_function_p (rtx);
382
383 static int compute_luids (rtx, rtx, int);
384
385 static int biv_elimination_giv_has_0_offset (struct induction *,
386 struct induction *, rtx);
387 \f
388 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
389 copy the value of the strength reduced giv to its original register. */
390 static int copy_cost;
391
392 /* Cost of using a register, to normalize the benefits of a giv. */
393 static int reg_address_cost;
394
395 void
396 init_loop (void)
397 {
398 rtx reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
399
400 reg_address_cost = address_cost (reg, SImode);
401
402 copy_cost = COSTS_N_INSNS (1);
403 }
404 \f
405 /* Compute the mapping from uids to luids.
406 LUIDs are numbers assigned to insns, like uids,
407 except that luids increase monotonically through the code.
408 Start at insn START and stop just before END. Assign LUIDs
409 starting with PREV_LUID + 1. Return the last assigned LUID + 1. */
410 static int
411 compute_luids (rtx start, rtx end, int prev_luid)
412 {
413 int i;
414 rtx insn;
415
416 for (insn = start, i = prev_luid; insn != end; insn = NEXT_INSN (insn))
417 {
418 if (INSN_UID (insn) >= max_uid_for_loop)
419 continue;
420 /* Don't assign luids to line-number NOTEs, so that the distance in
421 luids between two insns is not affected by -g. */
422 if (GET_CODE (insn) != NOTE
423 || NOTE_LINE_NUMBER (insn) <= 0)
424 uid_luid[INSN_UID (insn)] = ++i;
425 else
426 /* Give a line number note the same luid as preceding insn. */
427 uid_luid[INSN_UID (insn)] = i;
428 }
429 return i + 1;
430 }
431 \f
432 /* Entry point of this file. Perform loop optimization
433 on the current function. F is the first insn of the function
434 and DUMPFILE is a stream for output of a trace of actions taken
435 (or 0 if none should be output). */
436
437 void
438 loop_optimize (rtx f, FILE *dumpfile, int flags)
439 {
440 rtx insn;
441 int i;
442 struct loops loops_data;
443 struct loops *loops = &loops_data;
444 struct loop_info *loops_info;
445
446 loop_dump_stream = dumpfile;
447
448 init_recog_no_volatile ();
449
450 max_reg_before_loop = max_reg_num ();
451 loop_max_reg = max_reg_before_loop;
452
453 regs_may_share = 0;
454
455 /* Count the number of loops. */
456
457 max_loop_num = 0;
458 for (insn = f; insn; insn = NEXT_INSN (insn))
459 {
460 if (GET_CODE (insn) == NOTE
461 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
462 max_loop_num++;
463 }
464
465 /* Don't waste time if no loops. */
466 if (max_loop_num == 0)
467 return;
468
469 loops->num = max_loop_num;
470
471 /* Get size to use for tables indexed by uids.
472 Leave some space for labels allocated by find_and_verify_loops. */
473 max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
474
475 uid_luid = xcalloc (max_uid_for_loop, sizeof (int));
476 uid_loop = xcalloc (max_uid_for_loop, sizeof (struct loop *));
477
478 /* Allocate storage for array of loops. */
479 loops->array = xcalloc (loops->num, sizeof (struct loop));
480
481 /* Find and process each loop.
482 First, find them, and record them in order of their beginnings. */
483 find_and_verify_loops (f, loops);
484
485 /* Allocate and initialize auxiliary loop information. */
486 loops_info = xcalloc (loops->num, sizeof (struct loop_info));
487 for (i = 0; i < (int) loops->num; i++)
488 loops->array[i].aux = loops_info + i;
489
490 /* Now find all register lifetimes. This must be done after
491 find_and_verify_loops, because it might reorder the insns in the
492 function. */
493 reg_scan (f, max_reg_before_loop, 1);
494
495 /* This must occur after reg_scan so that registers created by gcse
496 will have entries in the register tables.
497
498 We could have added a call to reg_scan after gcse_main in toplev.c,
499 but moving this call to init_alias_analysis is more efficient. */
500 init_alias_analysis ();
501
502 /* See if we went too far. Note that get_max_uid already returns
503 one more that the maximum uid of all insn. */
504 if (get_max_uid () > max_uid_for_loop)
505 abort ();
506 /* Now reset it to the actual size we need. See above. */
507 max_uid_for_loop = get_max_uid ();
508
509 /* find_and_verify_loops has already called compute_luids, but it
510 might have rearranged code afterwards, so we need to recompute
511 the luids now. */
512 compute_luids (f, NULL_RTX, 0);
513
514 /* Don't leave gaps in uid_luid for insns that have been
515 deleted. It is possible that the first or last insn
516 using some register has been deleted by cross-jumping.
517 Make sure that uid_luid for that former insn's uid
518 points to the general area where that insn used to be. */
519 for (i = 0; i < max_uid_for_loop; i++)
520 {
521 uid_luid[0] = uid_luid[i];
522 if (uid_luid[0] != 0)
523 break;
524 }
525 for (i = 0; i < max_uid_for_loop; i++)
526 if (uid_luid[i] == 0)
527 uid_luid[i] = uid_luid[i - 1];
528
529 /* Determine if the function has indirect jump. On some systems
530 this prevents low overhead loop instructions from being used. */
531 indirect_jump_in_function = indirect_jump_in_function_p (f);
532
533 /* Now scan the loops, last ones first, since this means inner ones are done
534 before outer ones. */
535 for (i = max_loop_num - 1; i >= 0; i--)
536 {
537 struct loop *loop = &loops->array[i];
538
539 if (! loop->invalid && loop->end)
540 {
541 scan_loop (loop, flags);
542 ggc_collect ();
543 }
544 }
545
546 end_alias_analysis ();
547
548 /* Clean up. */
549 for (i = 0; i < (int) loops->num; i++)
550 free (loops_info[i].mems);
551
552 free (uid_luid);
553 free (uid_loop);
554 free (loops_info);
555 free (loops->array);
556 }
557 \f
558 /* Returns the next insn, in execution order, after INSN. START and
559 END are the NOTE_INSN_LOOP_BEG and NOTE_INSN_LOOP_END for the loop,
560 respectively. LOOP->TOP, if non-NULL, is the top of the loop in the
561 insn-stream; it is used with loops that are entered near the
562 bottom. */
563
564 static rtx
565 next_insn_in_loop (const struct loop *loop, rtx insn)
566 {
567 insn = NEXT_INSN (insn);
568
569 if (insn == loop->end)
570 {
571 if (loop->top)
572 /* Go to the top of the loop, and continue there. */
573 insn = loop->top;
574 else
575 /* We're done. */
576 insn = NULL_RTX;
577 }
578
579 if (insn == loop->scan_start)
580 /* We're done. */
581 insn = NULL_RTX;
582
583 return insn;
584 }
585
586 /* Find any register references hidden inside X and add them to
587 the dependency list DEPS. This is used to look inside CLOBBER (MEM
588 when checking whether a PARALLEL can be pulled out of a loop. */
589
590 static rtx
591 find_regs_nested (rtx deps, rtx x)
592 {
593 enum rtx_code code = GET_CODE (x);
594 if (code == REG)
595 deps = gen_rtx_EXPR_LIST (VOIDmode, x, deps);
596 else
597 {
598 const char *fmt = GET_RTX_FORMAT (code);
599 int i, j;
600 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
601 {
602 if (fmt[i] == 'e')
603 deps = find_regs_nested (deps, XEXP (x, i));
604 else if (fmt[i] == 'E')
605 for (j = 0; j < XVECLEN (x, i); j++)
606 deps = find_regs_nested (deps, XVECEXP (x, i, j));
607 }
608 }
609 return deps;
610 }
611
612 /* Optimize one loop described by LOOP. */
613
614 /* ??? Could also move memory writes out of loops if the destination address
615 is invariant, the source is invariant, the memory write is not volatile,
616 and if we can prove that no read inside the loop can read this address
617 before the write occurs. If there is a read of this address after the
618 write, then we can also mark the memory read as invariant. */
619
620 static void
621 scan_loop (struct loop *loop, int flags)
622 {
623 struct loop_info *loop_info = LOOP_INFO (loop);
624 struct loop_regs *regs = LOOP_REGS (loop);
625 int i;
626 rtx loop_start = loop->start;
627 rtx loop_end = loop->end;
628 rtx p;
629 /* 1 if we are scanning insns that could be executed zero times. */
630 int maybe_never = 0;
631 /* 1 if we are scanning insns that might never be executed
632 due to a subroutine call which might exit before they are reached. */
633 int call_passed = 0;
634 /* Number of insns in the loop. */
635 int insn_count;
636 int tem;
637 rtx temp, update_start, update_end;
638 /* The SET from an insn, if it is the only SET in the insn. */
639 rtx set, set1;
640 /* Chain describing insns movable in current loop. */
641 struct loop_movables *movables = LOOP_MOVABLES (loop);
642 /* Ratio of extra register life span we can justify
643 for saving an instruction. More if loop doesn't call subroutines
644 since in that case saving an insn makes more difference
645 and more registers are available. */
646 int threshold;
647 /* Nonzero if we are scanning instructions in a sub-loop. */
648 int loop_depth = 0;
649 int in_libcall;
650
651 loop->top = 0;
652
653 movables->head = 0;
654 movables->last = 0;
655
656 /* Determine whether this loop starts with a jump down to a test at
657 the end. This will occur for a small number of loops with a test
658 that is too complex to duplicate in front of the loop.
659
660 We search for the first insn or label in the loop, skipping NOTEs.
661 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
662 (because we might have a loop executed only once that contains a
663 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
664 (in case we have a degenerate loop).
665
666 Note that if we mistakenly think that a loop is entered at the top
667 when, in fact, it is entered at the exit test, the only effect will be
668 slightly poorer optimization. Making the opposite error can generate
669 incorrect code. Since very few loops now start with a jump to the
670 exit test, the code here to detect that case is very conservative. */
671
672 for (p = NEXT_INSN (loop_start);
673 p != loop_end
674 && GET_CODE (p) != CODE_LABEL && ! INSN_P (p)
675 && (GET_CODE (p) != NOTE
676 || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
677 && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END));
678 p = NEXT_INSN (p))
679 ;
680
681 loop->scan_start = p;
682
683 /* If loop end is the end of the current function, then emit a
684 NOTE_INSN_DELETED after loop_end and set loop->sink to the dummy
685 note insn. This is the position we use when sinking insns out of
686 the loop. */
687 if (NEXT_INSN (loop->end) != 0)
688 loop->sink = NEXT_INSN (loop->end);
689 else
690 loop->sink = emit_note_after (NOTE_INSN_DELETED, loop->end);
691
692 /* Set up variables describing this loop. */
693 prescan_loop (loop);
694 threshold = (loop_info->has_call ? 1 : 2) * (1 + n_non_fixed_regs);
695
696 /* If loop has a jump before the first label,
697 the true entry is the target of that jump.
698 Start scan from there.
699 But record in LOOP->TOP the place where the end-test jumps
700 back to so we can scan that after the end of the loop. */
701 if (GET_CODE (p) == JUMP_INSN
702 /* Loop entry must be unconditional jump (and not a RETURN) */
703 && any_uncondjump_p (p)
704 && JUMP_LABEL (p) != 0
705 /* Check to see whether the jump actually
706 jumps out of the loop (meaning it's no loop).
707 This case can happen for things like
708 do {..} while (0). If this label was generated previously
709 by loop, we can't tell anything about it and have to reject
710 the loop. */
711 && INSN_IN_RANGE_P (JUMP_LABEL (p), loop_start, loop_end))
712 {
713 loop->top = next_label (loop->scan_start);
714 loop->scan_start = JUMP_LABEL (p);
715 }
716
717 /* If LOOP->SCAN_START was an insn created by loop, we don't know its luid
718 as required by loop_reg_used_before_p. So skip such loops. (This
719 test may never be true, but it's best to play it safe.)
720
721 Also, skip loops where we do not start scanning at a label. This
722 test also rejects loops starting with a JUMP_INSN that failed the
723 test above. */
724
725 if (INSN_UID (loop->scan_start) >= max_uid_for_loop
726 || GET_CODE (loop->scan_start) != CODE_LABEL)
727 {
728 if (loop_dump_stream)
729 fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n",
730 INSN_UID (loop_start), INSN_UID (loop_end));
731 return;
732 }
733
734 /* Allocate extra space for REGs that might be created by load_mems.
735 We allocate a little extra slop as well, in the hopes that we
736 won't have to reallocate the regs array. */
737 loop_regs_scan (loop, loop_info->mems_idx + 16);
738 insn_count = count_insns_in_loop (loop);
739
740 if (loop_dump_stream)
741 {
742 fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n",
743 INSN_UID (loop_start), INSN_UID (loop_end), insn_count);
744 if (loop->cont)
745 fprintf (loop_dump_stream, "Continue at insn %d.\n",
746 INSN_UID (loop->cont));
747 }
748
749 /* Scan through the loop finding insns that are safe to move.
750 Set REGS->ARRAY[I].SET_IN_LOOP negative for the reg I being set, so that
751 this reg will be considered invariant for subsequent insns.
752 We consider whether subsequent insns use the reg
753 in deciding whether it is worth actually moving.
754
755 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
756 and therefore it is possible that the insns we are scanning
757 would never be executed. At such times, we must make sure
758 that it is safe to execute the insn once instead of zero times.
759 When MAYBE_NEVER is 0, all insns will be executed at least once
760 so that is not a problem. */
761
762 for (in_libcall = 0, p = next_insn_in_loop (loop, loop->scan_start);
763 p != NULL_RTX;
764 p = next_insn_in_loop (loop, p))
765 {
766 if (in_libcall && INSN_P (p) && find_reg_note (p, REG_RETVAL, NULL_RTX))
767 in_libcall--;
768 if (GET_CODE (p) == INSN)
769 {
770 temp = find_reg_note (p, REG_LIBCALL, NULL_RTX);
771 if (temp)
772 in_libcall++;
773 if (! in_libcall
774 && (set = single_set (p))
775 && REG_P (SET_DEST (set))
776 #ifdef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
777 && SET_DEST (set) != pic_offset_table_rtx
778 #endif
779 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
780 {
781 int tem1 = 0;
782 int tem2 = 0;
783 int move_insn = 0;
784 int insert_temp = 0;
785 rtx src = SET_SRC (set);
786 rtx dependencies = 0;
787
788 /* Figure out what to use as a source of this insn. If a
789 REG_EQUIV note is given or if a REG_EQUAL note with a
790 constant operand is specified, use it as the source and
791 mark that we should move this insn by calling
792 emit_move_insn rather that duplicating the insn.
793
794 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL
795 note is present. */
796 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
797 if (temp)
798 src = XEXP (temp, 0), move_insn = 1;
799 else
800 {
801 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
802 if (temp && CONSTANT_P (XEXP (temp, 0)))
803 src = XEXP (temp, 0), move_insn = 1;
804 if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
805 {
806 src = XEXP (temp, 0);
807 /* A libcall block can use regs that don't appear in
808 the equivalent expression. To move the libcall,
809 we must move those regs too. */
810 dependencies = libcall_other_reg (p, src);
811 }
812 }
813
814 /* For parallels, add any possible uses to the dependencies, as
815 we can't move the insn without resolving them first.
816 MEMs inside CLOBBERs may also reference registers; these
817 count as implicit uses. */
818 if (GET_CODE (PATTERN (p)) == PARALLEL)
819 {
820 for (i = 0; i < XVECLEN (PATTERN (p), 0); i++)
821 {
822 rtx x = XVECEXP (PATTERN (p), 0, i);
823 if (GET_CODE (x) == USE)
824 dependencies
825 = gen_rtx_EXPR_LIST (VOIDmode, XEXP (x, 0),
826 dependencies);
827 else if (GET_CODE (x) == CLOBBER
828 && GET_CODE (XEXP (x, 0)) == MEM)
829 dependencies = find_regs_nested (dependencies,
830 XEXP (XEXP (x, 0), 0));
831 }
832 }
833
834 if (/* The register is used in basic blocks other
835 than the one where it is set (meaning that
836 something after this point in the loop might
837 depend on its value before the set). */
838 ! reg_in_basic_block_p (p, SET_DEST (set))
839 /* And the set is not guaranteed to be executed once
840 the loop starts, or the value before the set is
841 needed before the set occurs...
842
843 ??? Note we have quadratic behavior here, mitigated
844 by the fact that the previous test will often fail for
845 large loops. Rather than re-scanning the entire loop
846 each time for register usage, we should build tables
847 of the register usage and use them here instead. */
848 && (maybe_never
849 || loop_reg_used_before_p (loop, set, p)))
850 /* It is unsafe to move the set. However, it may be OK to
851 move the source into a new pseudo, and substitute a
852 reg-to-reg copy for the original insn.
853
854 This code used to consider it OK to move a set of a variable
855 which was not created by the user and not used in an exit
856 test.
857 That behavior is incorrect and was removed. */
858 insert_temp = 1;
859
860 /* Don't try to optimize a MODE_CC set with a constant
861 source. It probably will be combined with a conditional
862 jump. */
863 if (GET_MODE_CLASS (GET_MODE (SET_DEST (set))) == MODE_CC
864 && CONSTANT_P (src))
865 ;
866 /* Don't try to optimize a register that was made
867 by loop-optimization for an inner loop.
868 We don't know its life-span, so we can't compute
869 the benefit. */
870 else if (REGNO (SET_DEST (set)) >= max_reg_before_loop)
871 ;
872 /* Don't move the source and add a reg-to-reg copy:
873 - with -Os (this certainly increases size),
874 - if the mode doesn't support copy operations (obviously),
875 - if the source is already a reg (the motion will gain nothing),
876 - if the source is a legitimate constant (likewise). */
877 else if (insert_temp
878 && (optimize_size
879 || ! can_copy_p (GET_MODE (SET_SRC (set)))
880 || REG_P (SET_SRC (set))
881 || (CONSTANT_P (SET_SRC (set))
882 && LEGITIMATE_CONSTANT_P (SET_SRC (set)))))
883 ;
884 else if ((tem = loop_invariant_p (loop, src))
885 && (dependencies == 0
886 || (tem2
887 = loop_invariant_p (loop, dependencies)) != 0)
888 && (regs->array[REGNO (SET_DEST (set))].set_in_loop == 1
889 || (tem1
890 = consec_sets_invariant_p
891 (loop, SET_DEST (set),
892 regs->array[REGNO (SET_DEST (set))].set_in_loop,
893 p)))
894 /* If the insn can cause a trap (such as divide by zero),
895 can't move it unless it's guaranteed to be executed
896 once loop is entered. Even a function call might
897 prevent the trap insn from being reached
898 (since it might exit!) */
899 && ! ((maybe_never || call_passed)
900 && may_trap_p (src)))
901 {
902 struct movable *m;
903 int regno = REGNO (SET_DEST (set));
904
905 /* A potential lossage is where we have a case where two insns
906 can be combined as long as they are both in the loop, but
907 we move one of them outside the loop. For large loops,
908 this can lose. The most common case of this is the address
909 of a function being called.
910
911 Therefore, if this register is marked as being used
912 exactly once if we are in a loop with calls
913 (a "large loop"), see if we can replace the usage of
914 this register with the source of this SET. If we can,
915 delete this insn.
916
917 Don't do this if P has a REG_RETVAL note or if we have
918 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
919
920 if (loop_info->has_call
921 && regs->array[regno].single_usage != 0
922 && regs->array[regno].single_usage != const0_rtx
923 && REGNO_FIRST_UID (regno) == INSN_UID (p)
924 && (REGNO_LAST_UID (regno)
925 == INSN_UID (regs->array[regno].single_usage))
926 && regs->array[regno].set_in_loop == 1
927 && GET_CODE (SET_SRC (set)) != ASM_OPERANDS
928 && ! side_effects_p (SET_SRC (set))
929 && ! find_reg_note (p, REG_RETVAL, NULL_RTX)
930 && (! SMALL_REGISTER_CLASSES
931 || (! (REG_P (SET_SRC (set))
932 && (REGNO (SET_SRC (set))
933 < FIRST_PSEUDO_REGISTER))))
934 /* This test is not redundant; SET_SRC (set) might be
935 a call-clobbered register and the life of REGNO
936 might span a call. */
937 && ! modified_between_p (SET_SRC (set), p,
938 regs->array[regno].single_usage)
939 && no_labels_between_p (p,
940 regs->array[regno].single_usage)
941 && validate_replace_rtx (SET_DEST (set), SET_SRC (set),
942 regs->array[regno].single_usage))
943 {
944 /* Replace any usage in a REG_EQUAL note. Must copy
945 the new source, so that we don't get rtx sharing
946 between the SET_SOURCE and REG_NOTES of insn p. */
947 REG_NOTES (regs->array[regno].single_usage)
948 = (replace_rtx
949 (REG_NOTES (regs->array[regno].single_usage),
950 SET_DEST (set), copy_rtx (SET_SRC (set))));
951
952 delete_insn (p);
953 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set));
954 i++)
955 regs->array[regno+i].set_in_loop = 0;
956 continue;
957 }
958
959 m = xmalloc (sizeof (struct movable));
960 m->next = 0;
961 m->insn = p;
962 m->set_src = src;
963 m->dependencies = dependencies;
964 m->set_dest = SET_DEST (set);
965 m->force = 0;
966 m->consec
967 = regs->array[REGNO (SET_DEST (set))].set_in_loop - 1;
968 m->done = 0;
969 m->forces = 0;
970 m->partial = 0;
971 m->move_insn = move_insn;
972 m->move_insn_first = 0;
973 m->insert_temp = insert_temp;
974 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
975 m->savemode = VOIDmode;
976 m->regno = regno;
977 /* Set M->cond if either loop_invariant_p
978 or consec_sets_invariant_p returned 2
979 (only conditionally invariant). */
980 m->cond = ((tem | tem1 | tem2) > 1);
981 m->global = LOOP_REG_GLOBAL_P (loop, regno);
982 m->match = 0;
983 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
984 m->savings = regs->array[regno].n_times_set;
985 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
986 m->savings += libcall_benefit (p);
987 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set)); i++)
988 regs->array[regno+i].set_in_loop = move_insn ? -2 : -1;
989 /* Add M to the end of the chain MOVABLES. */
990 loop_movables_add (movables, m);
991
992 if (m->consec > 0)
993 {
994 /* It is possible for the first instruction to have a
995 REG_EQUAL note but a non-invariant SET_SRC, so we must
996 remember the status of the first instruction in case
997 the last instruction doesn't have a REG_EQUAL note. */
998 m->move_insn_first = m->move_insn;
999
1000 /* Skip this insn, not checking REG_LIBCALL notes. */
1001 p = next_nonnote_insn (p);
1002 /* Skip the consecutive insns, if there are any. */
1003 p = skip_consec_insns (p, m->consec);
1004 /* Back up to the last insn of the consecutive group. */
1005 p = prev_nonnote_insn (p);
1006
1007 /* We must now reset m->move_insn, m->is_equiv, and
1008 possibly m->set_src to correspond to the effects of
1009 all the insns. */
1010 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
1011 if (temp)
1012 m->set_src = XEXP (temp, 0), m->move_insn = 1;
1013 else
1014 {
1015 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
1016 if (temp && CONSTANT_P (XEXP (temp, 0)))
1017 m->set_src = XEXP (temp, 0), m->move_insn = 1;
1018 else
1019 m->move_insn = 0;
1020
1021 }
1022 m->is_equiv
1023 = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
1024 }
1025 }
1026 /* If this register is always set within a STRICT_LOW_PART
1027 or set to zero, then its high bytes are constant.
1028 So clear them outside the loop and within the loop
1029 just load the low bytes.
1030 We must check that the machine has an instruction to do so.
1031 Also, if the value loaded into the register
1032 depends on the same register, this cannot be done. */
1033 else if (SET_SRC (set) == const0_rtx
1034 && GET_CODE (NEXT_INSN (p)) == INSN
1035 && (set1 = single_set (NEXT_INSN (p)))
1036 && GET_CODE (set1) == SET
1037 && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART)
1038 && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG)
1039 && (SUBREG_REG (XEXP (SET_DEST (set1), 0))
1040 == SET_DEST (set))
1041 && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1)))
1042 {
1043 int regno = REGNO (SET_DEST (set));
1044 if (regs->array[regno].set_in_loop == 2)
1045 {
1046 struct movable *m;
1047 m = xmalloc (sizeof (struct movable));
1048 m->next = 0;
1049 m->insn = p;
1050 m->set_dest = SET_DEST (set);
1051 m->dependencies = 0;
1052 m->force = 0;
1053 m->consec = 0;
1054 m->done = 0;
1055 m->forces = 0;
1056 m->move_insn = 0;
1057 m->move_insn_first = 0;
1058 m->insert_temp = insert_temp;
1059 m->partial = 1;
1060 /* If the insn may not be executed on some cycles,
1061 we can't clear the whole reg; clear just high part.
1062 Not even if the reg is used only within this loop.
1063 Consider this:
1064 while (1)
1065 while (s != t) {
1066 if (foo ()) x = *s;
1067 use (x);
1068 }
1069 Clearing x before the inner loop could clobber a value
1070 being saved from the last time around the outer loop.
1071 However, if the reg is not used outside this loop
1072 and all uses of the register are in the same
1073 basic block as the store, there is no problem.
1074
1075 If this insn was made by loop, we don't know its
1076 INSN_LUID and hence must make a conservative
1077 assumption. */
1078 m->global = (INSN_UID (p) >= max_uid_for_loop
1079 || LOOP_REG_GLOBAL_P (loop, regno)
1080 || (labels_in_range_p
1081 (p, REGNO_FIRST_LUID (regno))));
1082 if (maybe_never && m->global)
1083 m->savemode = GET_MODE (SET_SRC (set1));
1084 else
1085 m->savemode = VOIDmode;
1086 m->regno = regno;
1087 m->cond = 0;
1088 m->match = 0;
1089 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
1090 m->savings = 1;
1091 for (i = 0;
1092 i < LOOP_REGNO_NREGS (regno, SET_DEST (set));
1093 i++)
1094 regs->array[regno+i].set_in_loop = -1;
1095 /* Add M to the end of the chain MOVABLES. */
1096 loop_movables_add (movables, m);
1097 }
1098 }
1099 }
1100 }
1101 /* Past a call insn, we get to insns which might not be executed
1102 because the call might exit. This matters for insns that trap.
1103 Constant and pure call insns always return, so they don't count. */
1104 else if (GET_CODE (p) == CALL_INSN && ! CONST_OR_PURE_CALL_P (p))
1105 call_passed = 1;
1106 /* Past a label or a jump, we get to insns for which we
1107 can't count on whether or how many times they will be
1108 executed during each iteration. Therefore, we can
1109 only move out sets of trivial variables
1110 (those not used after the loop). */
1111 /* Similar code appears twice in strength_reduce. */
1112 else if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN)
1113 /* If we enter the loop in the middle, and scan around to the
1114 beginning, don't set maybe_never for that. This must be an
1115 unconditional jump, otherwise the code at the top of the
1116 loop might never be executed. Unconditional jumps are
1117 followed by a barrier then the loop_end. */
1118 && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop->top
1119 && NEXT_INSN (NEXT_INSN (p)) == loop_end
1120 && any_uncondjump_p (p)))
1121 maybe_never = 1;
1122 else if (GET_CODE (p) == NOTE)
1123 {
1124 /* At the virtual top of a converted loop, insns are again known to
1125 be executed: logically, the loop begins here even though the exit
1126 code has been duplicated. */
1127 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
1128 maybe_never = call_passed = 0;
1129 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
1130 loop_depth++;
1131 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
1132 loop_depth--;
1133 }
1134 }
1135
1136 /* If one movable subsumes another, ignore that other. */
1137
1138 ignore_some_movables (movables);
1139
1140 /* For each movable insn, see if the reg that it loads
1141 leads when it dies right into another conditionally movable insn.
1142 If so, record that the second insn "forces" the first one,
1143 since the second can be moved only if the first is. */
1144
1145 force_movables (movables);
1146
1147 /* See if there are multiple movable insns that load the same value.
1148 If there are, make all but the first point at the first one
1149 through the `match' field, and add the priorities of them
1150 all together as the priority of the first. */
1151
1152 combine_movables (movables, regs);
1153
1154 /* Now consider each movable insn to decide whether it is worth moving.
1155 Store 0 in regs->array[I].set_in_loop for each reg I that is moved.
1156
1157 For machines with few registers this increases code size, so do not
1158 move moveables when optimizing for code size on such machines.
1159 (The 18 below is the value for i386.) */
1160
1161 if (!optimize_size
1162 || (reg_class_size[GENERAL_REGS] > 18 && !loop_info->has_call))
1163 {
1164 move_movables (loop, movables, threshold, insn_count);
1165
1166 /* Recalculate regs->array if move_movables has created new
1167 registers. */
1168 if (max_reg_num () > regs->num)
1169 {
1170 loop_regs_scan (loop, 0);
1171 for (update_start = loop_start;
1172 PREV_INSN (update_start)
1173 && GET_CODE (PREV_INSN (update_start)) != CODE_LABEL;
1174 update_start = PREV_INSN (update_start))
1175 ;
1176 update_end = NEXT_INSN (loop_end);
1177
1178 reg_scan_update (update_start, update_end, loop_max_reg);
1179 loop_max_reg = max_reg_num ();
1180 }
1181 }
1182
1183 /* Now candidates that still are negative are those not moved.
1184 Change regs->array[I].set_in_loop to indicate that those are not actually
1185 invariant. */
1186 for (i = 0; i < regs->num; i++)
1187 if (regs->array[i].set_in_loop < 0)
1188 regs->array[i].set_in_loop = regs->array[i].n_times_set;
1189
1190 /* Now that we've moved some things out of the loop, we might be able to
1191 hoist even more memory references. */
1192 load_mems (loop);
1193
1194 /* Recalculate regs->array if load_mems has created new registers. */
1195 if (max_reg_num () > regs->num)
1196 loop_regs_scan (loop, 0);
1197
1198 for (update_start = loop_start;
1199 PREV_INSN (update_start)
1200 && GET_CODE (PREV_INSN (update_start)) != CODE_LABEL;
1201 update_start = PREV_INSN (update_start))
1202 ;
1203 update_end = NEXT_INSN (loop_end);
1204
1205 reg_scan_update (update_start, update_end, loop_max_reg);
1206 loop_max_reg = max_reg_num ();
1207
1208 if (flag_strength_reduce)
1209 {
1210 if (update_end && GET_CODE (update_end) == CODE_LABEL)
1211 /* Ensure our label doesn't go away. */
1212 LABEL_NUSES (update_end)++;
1213
1214 strength_reduce (loop, flags);
1215
1216 reg_scan_update (update_start, update_end, loop_max_reg);
1217 loop_max_reg = max_reg_num ();
1218
1219 if (update_end && GET_CODE (update_end) == CODE_LABEL
1220 && --LABEL_NUSES (update_end) == 0)
1221 delete_related_insns (update_end);
1222 }
1223
1224
1225 /* The movable information is required for strength reduction. */
1226 loop_movables_free (movables);
1227
1228 free (regs->array);
1229 regs->array = 0;
1230 regs->num = 0;
1231 }
1232 \f
1233 /* Add elements to *OUTPUT to record all the pseudo-regs
1234 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
1235
1236 static void
1237 record_excess_regs (rtx in_this, rtx not_in_this, rtx *output)
1238 {
1239 enum rtx_code code;
1240 const char *fmt;
1241 int i;
1242
1243 code = GET_CODE (in_this);
1244
1245 switch (code)
1246 {
1247 case PC:
1248 case CC0:
1249 case CONST_INT:
1250 case CONST_DOUBLE:
1251 case CONST:
1252 case SYMBOL_REF:
1253 case LABEL_REF:
1254 return;
1255
1256 case REG:
1257 if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER
1258 && ! reg_mentioned_p (in_this, not_in_this))
1259 *output = gen_rtx_EXPR_LIST (VOIDmode, in_this, *output);
1260 return;
1261
1262 default:
1263 break;
1264 }
1265
1266 fmt = GET_RTX_FORMAT (code);
1267 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1268 {
1269 int j;
1270
1271 switch (fmt[i])
1272 {
1273 case 'E':
1274 for (j = 0; j < XVECLEN (in_this, i); j++)
1275 record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output);
1276 break;
1277
1278 case 'e':
1279 record_excess_regs (XEXP (in_this, i), not_in_this, output);
1280 break;
1281 }
1282 }
1283 }
1284 \f
1285 /* Check what regs are referred to in the libcall block ending with INSN,
1286 aside from those mentioned in the equivalent value.
1287 If there are none, return 0.
1288 If there are one or more, return an EXPR_LIST containing all of them. */
1289
1290 static rtx
1291 libcall_other_reg (rtx insn, rtx equiv)
1292 {
1293 rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
1294 rtx p = XEXP (note, 0);
1295 rtx output = 0;
1296
1297 /* First, find all the regs used in the libcall block
1298 that are not mentioned as inputs to the result. */
1299
1300 while (p != insn)
1301 {
1302 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
1303 || GET_CODE (p) == CALL_INSN)
1304 record_excess_regs (PATTERN (p), equiv, &output);
1305 p = NEXT_INSN (p);
1306 }
1307
1308 return output;
1309 }
1310 \f
1311 /* Return 1 if all uses of REG
1312 are between INSN and the end of the basic block. */
1313
1314 static int
1315 reg_in_basic_block_p (rtx insn, rtx reg)
1316 {
1317 int regno = REGNO (reg);
1318 rtx p;
1319
1320 if (REGNO_FIRST_UID (regno) != INSN_UID (insn))
1321 return 0;
1322
1323 /* Search this basic block for the already recorded last use of the reg. */
1324 for (p = insn; p; p = NEXT_INSN (p))
1325 {
1326 switch (GET_CODE (p))
1327 {
1328 case NOTE:
1329 break;
1330
1331 case INSN:
1332 case CALL_INSN:
1333 /* Ordinary insn: if this is the last use, we win. */
1334 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1335 return 1;
1336 break;
1337
1338 case JUMP_INSN:
1339 /* Jump insn: if this is the last use, we win. */
1340 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1341 return 1;
1342 /* Otherwise, it's the end of the basic block, so we lose. */
1343 return 0;
1344
1345 case CODE_LABEL:
1346 case BARRIER:
1347 /* It's the end of the basic block, so we lose. */
1348 return 0;
1349
1350 default:
1351 break;
1352 }
1353 }
1354
1355 /* The "last use" that was recorded can't be found after the first
1356 use. This can happen when the last use was deleted while
1357 processing an inner loop, this inner loop was then completely
1358 unrolled, and the outer loop is always exited after the inner loop,
1359 so that everything after the first use becomes a single basic block. */
1360 return 1;
1361 }
1362 \f
1363 /* Compute the benefit of eliminating the insns in the block whose
1364 last insn is LAST. This may be a group of insns used to compute a
1365 value directly or can contain a library call. */
1366
1367 static int
1368 libcall_benefit (rtx last)
1369 {
1370 rtx insn;
1371 int benefit = 0;
1372
1373 for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0);
1374 insn != last; insn = NEXT_INSN (insn))
1375 {
1376 if (GET_CODE (insn) == CALL_INSN)
1377 benefit += 10; /* Assume at least this many insns in a library
1378 routine. */
1379 else if (GET_CODE (insn) == INSN
1380 && GET_CODE (PATTERN (insn)) != USE
1381 && GET_CODE (PATTERN (insn)) != CLOBBER)
1382 benefit++;
1383 }
1384
1385 return benefit;
1386 }
1387 \f
1388 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1389
1390 static rtx
1391 skip_consec_insns (rtx insn, int count)
1392 {
1393 for (; count > 0; count--)
1394 {
1395 rtx temp;
1396
1397 /* If first insn of libcall sequence, skip to end. */
1398 /* Do this at start of loop, since INSN is guaranteed to
1399 be an insn here. */
1400 if (GET_CODE (insn) != NOTE
1401 && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
1402 insn = XEXP (temp, 0);
1403
1404 do
1405 insn = NEXT_INSN (insn);
1406 while (GET_CODE (insn) == NOTE);
1407 }
1408
1409 return insn;
1410 }
1411
1412 /* Ignore any movable whose insn falls within a libcall
1413 which is part of another movable.
1414 We make use of the fact that the movable for the libcall value
1415 was made later and so appears later on the chain. */
1416
1417 static void
1418 ignore_some_movables (struct loop_movables *movables)
1419 {
1420 struct movable *m, *m1;
1421
1422 for (m = movables->head; m; m = m->next)
1423 {
1424 /* Is this a movable for the value of a libcall? */
1425 rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX);
1426 if (note)
1427 {
1428 rtx insn;
1429 /* Check for earlier movables inside that range,
1430 and mark them invalid. We cannot use LUIDs here because
1431 insns created by loop.c for prior loops don't have LUIDs.
1432 Rather than reject all such insns from movables, we just
1433 explicitly check each insn in the libcall (since invariant
1434 libcalls aren't that common). */
1435 for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn))
1436 for (m1 = movables->head; m1 != m; m1 = m1->next)
1437 if (m1->insn == insn)
1438 m1->done = 1;
1439 }
1440 }
1441 }
1442
1443 /* For each movable insn, see if the reg that it loads
1444 leads when it dies right into another conditionally movable insn.
1445 If so, record that the second insn "forces" the first one,
1446 since the second can be moved only if the first is. */
1447
1448 static void
1449 force_movables (struct loop_movables *movables)
1450 {
1451 struct movable *m, *m1;
1452
1453 for (m1 = movables->head; m1; m1 = m1->next)
1454 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1455 if (!m1->partial && !m1->done)
1456 {
1457 int regno = m1->regno;
1458 for (m = m1->next; m; m = m->next)
1459 /* ??? Could this be a bug? What if CSE caused the
1460 register of M1 to be used after this insn?
1461 Since CSE does not update regno_last_uid,
1462 this insn M->insn might not be where it dies.
1463 But very likely this doesn't matter; what matters is
1464 that M's reg is computed from M1's reg. */
1465 if (INSN_UID (m->insn) == REGNO_LAST_UID (regno)
1466 && !m->done)
1467 break;
1468 if (m != 0 && m->set_src == m1->set_dest
1469 /* If m->consec, m->set_src isn't valid. */
1470 && m->consec == 0)
1471 m = 0;
1472
1473 /* Increase the priority of the moving the first insn
1474 since it permits the second to be moved as well.
1475 Likewise for insns already forced by the first insn. */
1476 if (m != 0)
1477 {
1478 struct movable *m2;
1479
1480 m->forces = m1;
1481 for (m2 = m1; m2; m2 = m2->forces)
1482 {
1483 m2->lifetime += m->lifetime;
1484 m2->savings += m->savings;
1485 }
1486 }
1487 }
1488 }
1489 \f
1490 /* Find invariant expressions that are equal and can be combined into
1491 one register. */
1492
1493 static void
1494 combine_movables (struct loop_movables *movables, struct loop_regs *regs)
1495 {
1496 struct movable *m;
1497 char *matched_regs = xmalloc (regs->num);
1498 enum machine_mode mode;
1499
1500 /* Regs that are set more than once are not allowed to match
1501 or be matched. I'm no longer sure why not. */
1502 /* Only pseudo registers are allowed to match or be matched,
1503 since move_movables does not validate the change. */
1504 /* Perhaps testing m->consec_sets would be more appropriate here? */
1505
1506 for (m = movables->head; m; m = m->next)
1507 if (m->match == 0 && regs->array[m->regno].n_times_set == 1
1508 && m->regno >= FIRST_PSEUDO_REGISTER
1509 && !m->insert_temp
1510 && !m->partial)
1511 {
1512 struct movable *m1;
1513 int regno = m->regno;
1514
1515 memset (matched_regs, 0, regs->num);
1516 matched_regs[regno] = 1;
1517
1518 /* We want later insns to match the first one. Don't make the first
1519 one match any later ones. So start this loop at m->next. */
1520 for (m1 = m->next; m1; m1 = m1->next)
1521 if (m != m1 && m1->match == 0
1522 && !m1->insert_temp
1523 && regs->array[m1->regno].n_times_set == 1
1524 && m1->regno >= FIRST_PSEUDO_REGISTER
1525 /* A reg used outside the loop mustn't be eliminated. */
1526 && !m1->global
1527 /* A reg used for zero-extending mustn't be eliminated. */
1528 && !m1->partial
1529 && (matched_regs[m1->regno]
1530 ||
1531 (
1532 /* Can combine regs with different modes loaded from the
1533 same constant only if the modes are the same or
1534 if both are integer modes with M wider or the same
1535 width as M1. The check for integer is redundant, but
1536 safe, since the only case of differing destination
1537 modes with equal sources is when both sources are
1538 VOIDmode, i.e., CONST_INT. */
1539 (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)
1540 || (GET_MODE_CLASS (GET_MODE (m->set_dest)) == MODE_INT
1541 && GET_MODE_CLASS (GET_MODE (m1->set_dest)) == MODE_INT
1542 && (GET_MODE_BITSIZE (GET_MODE (m->set_dest))
1543 >= GET_MODE_BITSIZE (GET_MODE (m1->set_dest)))))
1544 /* See if the source of M1 says it matches M. */
1545 && ((REG_P (m1->set_src)
1546 && matched_regs[REGNO (m1->set_src)])
1547 || rtx_equal_for_loop_p (m->set_src, m1->set_src,
1548 movables, regs))))
1549 && ((m->dependencies == m1->dependencies)
1550 || rtx_equal_p (m->dependencies, m1->dependencies)))
1551 {
1552 m->lifetime += m1->lifetime;
1553 m->savings += m1->savings;
1554 m1->done = 1;
1555 m1->match = m;
1556 matched_regs[m1->regno] = 1;
1557 }
1558 }
1559
1560 /* Now combine the regs used for zero-extension.
1561 This can be done for those not marked `global'
1562 provided their lives don't overlap. */
1563
1564 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1565 mode = GET_MODE_WIDER_MODE (mode))
1566 {
1567 struct movable *m0 = 0;
1568
1569 /* Combine all the registers for extension from mode MODE.
1570 Don't combine any that are used outside this loop. */
1571 for (m = movables->head; m; m = m->next)
1572 if (m->partial && ! m->global
1573 && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn)))))
1574 {
1575 struct movable *m1;
1576
1577 int first = REGNO_FIRST_LUID (m->regno);
1578 int last = REGNO_LAST_LUID (m->regno);
1579
1580 if (m0 == 0)
1581 {
1582 /* First one: don't check for overlap, just record it. */
1583 m0 = m;
1584 continue;
1585 }
1586
1587 /* Make sure they extend to the same mode.
1588 (Almost always true.) */
1589 if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest))
1590 continue;
1591
1592 /* We already have one: check for overlap with those
1593 already combined together. */
1594 for (m1 = movables->head; m1 != m; m1 = m1->next)
1595 if (m1 == m0 || (m1->partial && m1->match == m0))
1596 if (! (REGNO_FIRST_LUID (m1->regno) > last
1597 || REGNO_LAST_LUID (m1->regno) < first))
1598 goto overlap;
1599
1600 /* No overlap: we can combine this with the others. */
1601 m0->lifetime += m->lifetime;
1602 m0->savings += m->savings;
1603 m->done = 1;
1604 m->match = m0;
1605
1606 overlap:
1607 ;
1608 }
1609 }
1610
1611 /* Clean up. */
1612 free (matched_regs);
1613 }
1614
1615 /* Returns the number of movable instructions in LOOP that were not
1616 moved outside the loop. */
1617
1618 static int
1619 num_unmoved_movables (const struct loop *loop)
1620 {
1621 int num = 0;
1622 struct movable *m;
1623
1624 for (m = LOOP_MOVABLES (loop)->head; m; m = m->next)
1625 if (!m->done)
1626 ++num;
1627
1628 return num;
1629 }
1630
1631 \f
1632 /* Return 1 if regs X and Y will become the same if moved. */
1633
1634 static int
1635 regs_match_p (rtx x, rtx y, struct loop_movables *movables)
1636 {
1637 unsigned int xn = REGNO (x);
1638 unsigned int yn = REGNO (y);
1639 struct movable *mx, *my;
1640
1641 for (mx = movables->head; mx; mx = mx->next)
1642 if (mx->regno == xn)
1643 break;
1644
1645 for (my = movables->head; my; my = my->next)
1646 if (my->regno == yn)
1647 break;
1648
1649 return (mx && my
1650 && ((mx->match == my->match && mx->match != 0)
1651 || mx->match == my
1652 || mx == my->match));
1653 }
1654
1655 /* Return 1 if X and Y are identical-looking rtx's.
1656 This is the Lisp function EQUAL for rtx arguments.
1657
1658 If two registers are matching movables or a movable register and an
1659 equivalent constant, consider them equal. */
1660
1661 static int
1662 rtx_equal_for_loop_p (rtx x, rtx y, struct loop_movables *movables,
1663 struct loop_regs *regs)
1664 {
1665 int i;
1666 int j;
1667 struct movable *m;
1668 enum rtx_code code;
1669 const char *fmt;
1670
1671 if (x == y)
1672 return 1;
1673 if (x == 0 || y == 0)
1674 return 0;
1675
1676 code = GET_CODE (x);
1677
1678 /* If we have a register and a constant, they may sometimes be
1679 equal. */
1680 if (REG_P (x) && regs->array[REGNO (x)].set_in_loop == -2
1681 && CONSTANT_P (y))
1682 {
1683 for (m = movables->head; m; m = m->next)
1684 if (m->move_insn && m->regno == REGNO (x)
1685 && rtx_equal_p (m->set_src, y))
1686 return 1;
1687 }
1688 else if (REG_P (y) && regs->array[REGNO (y)].set_in_loop == -2
1689 && CONSTANT_P (x))
1690 {
1691 for (m = movables->head; m; m = m->next)
1692 if (m->move_insn && m->regno == REGNO (y)
1693 && rtx_equal_p (m->set_src, x))
1694 return 1;
1695 }
1696
1697 /* Otherwise, rtx's of different codes cannot be equal. */
1698 if (code != GET_CODE (y))
1699 return 0;
1700
1701 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1702 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1703
1704 if (GET_MODE (x) != GET_MODE (y))
1705 return 0;
1706
1707 /* These three types of rtx's can be compared nonrecursively. */
1708 if (code == REG)
1709 return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables));
1710
1711 if (code == LABEL_REF)
1712 return XEXP (x, 0) == XEXP (y, 0);
1713 if (code == SYMBOL_REF)
1714 return XSTR (x, 0) == XSTR (y, 0);
1715
1716 /* Compare the elements. If any pair of corresponding elements
1717 fail to match, return 0 for the whole things. */
1718
1719 fmt = GET_RTX_FORMAT (code);
1720 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1721 {
1722 switch (fmt[i])
1723 {
1724 case 'w':
1725 if (XWINT (x, i) != XWINT (y, i))
1726 return 0;
1727 break;
1728
1729 case 'i':
1730 if (XINT (x, i) != XINT (y, i))
1731 return 0;
1732 break;
1733
1734 case 'E':
1735 /* Two vectors must have the same length. */
1736 if (XVECLEN (x, i) != XVECLEN (y, i))
1737 return 0;
1738
1739 /* And the corresponding elements must match. */
1740 for (j = 0; j < XVECLEN (x, i); j++)
1741 if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j),
1742 movables, regs) == 0)
1743 return 0;
1744 break;
1745
1746 case 'e':
1747 if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables, regs)
1748 == 0)
1749 return 0;
1750 break;
1751
1752 case 's':
1753 if (strcmp (XSTR (x, i), XSTR (y, i)))
1754 return 0;
1755 break;
1756
1757 case 'u':
1758 /* These are just backpointers, so they don't matter. */
1759 break;
1760
1761 case '0':
1762 break;
1763
1764 /* It is believed that rtx's at this level will never
1765 contain anything but integers and other rtx's,
1766 except for within LABEL_REFs and SYMBOL_REFs. */
1767 default:
1768 abort ();
1769 }
1770 }
1771 return 1;
1772 }
1773 \f
1774 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
1775 insns in INSNS which use the reference. LABEL_NUSES for CODE_LABEL
1776 references is incremented once for each added note. */
1777
1778 static void
1779 add_label_notes (rtx x, rtx insns)
1780 {
1781 enum rtx_code code = GET_CODE (x);
1782 int i, j;
1783 const char *fmt;
1784 rtx insn;
1785
1786 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
1787 {
1788 /* This code used to ignore labels that referred to dispatch tables to
1789 avoid flow generating (slightly) worse code.
1790
1791 We no longer ignore such label references (see LABEL_REF handling in
1792 mark_jump_label for additional information). */
1793 for (insn = insns; insn; insn = NEXT_INSN (insn))
1794 if (reg_mentioned_p (XEXP (x, 0), insn))
1795 {
1796 REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_LABEL, XEXP (x, 0),
1797 REG_NOTES (insn));
1798 if (LABEL_P (XEXP (x, 0)))
1799 LABEL_NUSES (XEXP (x, 0))++;
1800 }
1801 }
1802
1803 fmt = GET_RTX_FORMAT (code);
1804 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1805 {
1806 if (fmt[i] == 'e')
1807 add_label_notes (XEXP (x, i), insns);
1808 else if (fmt[i] == 'E')
1809 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1810 add_label_notes (XVECEXP (x, i, j), insns);
1811 }
1812 }
1813 \f
1814 /* Scan MOVABLES, and move the insns that deserve to be moved.
1815 If two matching movables are combined, replace one reg with the
1816 other throughout. */
1817
1818 static void
1819 move_movables (struct loop *loop, struct loop_movables *movables,
1820 int threshold, int insn_count)
1821 {
1822 struct loop_regs *regs = LOOP_REGS (loop);
1823 int nregs = regs->num;
1824 rtx new_start = 0;
1825 struct movable *m;
1826 rtx p;
1827 rtx loop_start = loop->start;
1828 rtx loop_end = loop->end;
1829 /* Map of pseudo-register replacements to handle combining
1830 when we move several insns that load the same value
1831 into different pseudo-registers. */
1832 rtx *reg_map = xcalloc (nregs, sizeof (rtx));
1833 char *already_moved = xcalloc (nregs, sizeof (char));
1834
1835 for (m = movables->head; m; m = m->next)
1836 {
1837 /* Describe this movable insn. */
1838
1839 if (loop_dump_stream)
1840 {
1841 fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ",
1842 INSN_UID (m->insn), m->regno, m->lifetime);
1843 if (m->consec > 0)
1844 fprintf (loop_dump_stream, "consec %d, ", m->consec);
1845 if (m->cond)
1846 fprintf (loop_dump_stream, "cond ");
1847 if (m->force)
1848 fprintf (loop_dump_stream, "force ");
1849 if (m->global)
1850 fprintf (loop_dump_stream, "global ");
1851 if (m->done)
1852 fprintf (loop_dump_stream, "done ");
1853 if (m->move_insn)
1854 fprintf (loop_dump_stream, "move-insn ");
1855 if (m->match)
1856 fprintf (loop_dump_stream, "matches %d ",
1857 INSN_UID (m->match->insn));
1858 if (m->forces)
1859 fprintf (loop_dump_stream, "forces %d ",
1860 INSN_UID (m->forces->insn));
1861 }
1862
1863 /* Ignore the insn if it's already done (it matched something else).
1864 Otherwise, see if it is now safe to move. */
1865
1866 if (!m->done
1867 && (! m->cond
1868 || (1 == loop_invariant_p (loop, m->set_src)
1869 && (m->dependencies == 0
1870 || 1 == loop_invariant_p (loop, m->dependencies))
1871 && (m->consec == 0
1872 || 1 == consec_sets_invariant_p (loop, m->set_dest,
1873 m->consec + 1,
1874 m->insn))))
1875 && (! m->forces || m->forces->done))
1876 {
1877 int regno;
1878 rtx p;
1879 int savings = m->savings;
1880
1881 /* We have an insn that is safe to move.
1882 Compute its desirability. */
1883
1884 p = m->insn;
1885 regno = m->regno;
1886
1887 if (loop_dump_stream)
1888 fprintf (loop_dump_stream, "savings %d ", savings);
1889
1890 if (regs->array[regno].moved_once && loop_dump_stream)
1891 fprintf (loop_dump_stream, "halved since already moved ");
1892
1893 /* An insn MUST be moved if we already moved something else
1894 which is safe only if this one is moved too: that is,
1895 if already_moved[REGNO] is nonzero. */
1896
1897 /* An insn is desirable to move if the new lifetime of the
1898 register is no more than THRESHOLD times the old lifetime.
1899 If it's not desirable, it means the loop is so big
1900 that moving won't speed things up much,
1901 and it is liable to make register usage worse. */
1902
1903 /* It is also desirable to move if it can be moved at no
1904 extra cost because something else was already moved. */
1905
1906 if (already_moved[regno]
1907 || flag_move_all_movables
1908 || (threshold * savings * m->lifetime) >=
1909 (regs->array[regno].moved_once ? insn_count * 2 : insn_count)
1910 || (m->forces && m->forces->done
1911 && regs->array[m->forces->regno].n_times_set == 1))
1912 {
1913 int count;
1914 struct movable *m1;
1915 rtx first = NULL_RTX;
1916 rtx newreg = NULL_RTX;
1917
1918 if (m->insert_temp)
1919 newreg = gen_reg_rtx (GET_MODE (m->set_dest));
1920
1921 /* Now move the insns that set the reg. */
1922
1923 if (m->partial && m->match)
1924 {
1925 rtx newpat, i1;
1926 rtx r1, r2;
1927 /* Find the end of this chain of matching regs.
1928 Thus, we load each reg in the chain from that one reg.
1929 And that reg is loaded with 0 directly,
1930 since it has ->match == 0. */
1931 for (m1 = m; m1->match; m1 = m1->match);
1932 newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)),
1933 SET_DEST (PATTERN (m1->insn)));
1934 i1 = loop_insn_hoist (loop, newpat);
1935
1936 /* Mark the moved, invariant reg as being allowed to
1937 share a hard reg with the other matching invariant. */
1938 REG_NOTES (i1) = REG_NOTES (m->insn);
1939 r1 = SET_DEST (PATTERN (m->insn));
1940 r2 = SET_DEST (PATTERN (m1->insn));
1941 regs_may_share
1942 = gen_rtx_EXPR_LIST (VOIDmode, r1,
1943 gen_rtx_EXPR_LIST (VOIDmode, r2,
1944 regs_may_share));
1945 delete_insn (m->insn);
1946
1947 if (new_start == 0)
1948 new_start = i1;
1949
1950 if (loop_dump_stream)
1951 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1952 }
1953 /* If we are to re-generate the item being moved with a
1954 new move insn, first delete what we have and then emit
1955 the move insn before the loop. */
1956 else if (m->move_insn)
1957 {
1958 rtx i1, temp, seq;
1959
1960 for (count = m->consec; count >= 0; count--)
1961 {
1962 /* If this is the first insn of a library call sequence,
1963 something is very wrong. */
1964 if (GET_CODE (p) != NOTE
1965 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1966 abort ();
1967
1968 /* If this is the last insn of a libcall sequence, then
1969 delete every insn in the sequence except the last.
1970 The last insn is handled in the normal manner. */
1971 if (GET_CODE (p) != NOTE
1972 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1973 {
1974 temp = XEXP (temp, 0);
1975 while (temp != p)
1976 temp = delete_insn (temp);
1977 }
1978
1979 temp = p;
1980 p = delete_insn (p);
1981
1982 /* simplify_giv_expr expects that it can walk the insns
1983 at m->insn forwards and see this old sequence we are
1984 tossing here. delete_insn does preserve the next
1985 pointers, but when we skip over a NOTE we must fix
1986 it up. Otherwise that code walks into the non-deleted
1987 insn stream. */
1988 while (p && GET_CODE (p) == NOTE)
1989 p = NEXT_INSN (temp) = NEXT_INSN (p);
1990
1991 if (m->insert_temp)
1992 {
1993 /* Replace the original insn with a move from
1994 our newly created temp. */
1995 start_sequence ();
1996 emit_move_insn (m->set_dest, newreg);
1997 seq = get_insns ();
1998 end_sequence ();
1999 emit_insn_before (seq, p);
2000 }
2001 }
2002
2003 start_sequence ();
2004 emit_move_insn (m->insert_temp ? newreg : m->set_dest,
2005 m->set_src);
2006 seq = get_insns ();
2007 end_sequence ();
2008
2009 add_label_notes (m->set_src, seq);
2010
2011 i1 = loop_insn_hoist (loop, seq);
2012 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
2013 set_unique_reg_note (i1,
2014 m->is_equiv ? REG_EQUIV : REG_EQUAL,
2015 m->set_src);
2016
2017 if (loop_dump_stream)
2018 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
2019
2020 /* The more regs we move, the less we like moving them. */
2021 threshold -= 3;
2022 }
2023 else
2024 {
2025 for (count = m->consec; count >= 0; count--)
2026 {
2027 rtx i1, temp;
2028
2029 /* If first insn of libcall sequence, skip to end. */
2030 /* Do this at start of loop, since p is guaranteed to
2031 be an insn here. */
2032 if (GET_CODE (p) != NOTE
2033 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
2034 p = XEXP (temp, 0);
2035
2036 /* If last insn of libcall sequence, move all
2037 insns except the last before the loop. The last
2038 insn is handled in the normal manner. */
2039 if (GET_CODE (p) != NOTE
2040 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
2041 {
2042 rtx fn_address = 0;
2043 rtx fn_reg = 0;
2044 rtx fn_address_insn = 0;
2045
2046 first = 0;
2047 for (temp = XEXP (temp, 0); temp != p;
2048 temp = NEXT_INSN (temp))
2049 {
2050 rtx body;
2051 rtx n;
2052 rtx next;
2053
2054 if (GET_CODE (temp) == NOTE)
2055 continue;
2056
2057 body = PATTERN (temp);
2058
2059 /* Find the next insn after TEMP,
2060 not counting USE or NOTE insns. */
2061 for (next = NEXT_INSN (temp); next != p;
2062 next = NEXT_INSN (next))
2063 if (! (GET_CODE (next) == INSN
2064 && GET_CODE (PATTERN (next)) == USE)
2065 && GET_CODE (next) != NOTE)
2066 break;
2067
2068 /* If that is the call, this may be the insn
2069 that loads the function address.
2070
2071 Extract the function address from the insn
2072 that loads it into a register.
2073 If this insn was cse'd, we get incorrect code.
2074
2075 So emit a new move insn that copies the
2076 function address into the register that the
2077 call insn will use. flow.c will delete any
2078 redundant stores that we have created. */
2079 if (GET_CODE (next) == CALL_INSN
2080 && GET_CODE (body) == SET
2081 && REG_P (SET_DEST (body))
2082 && (n = find_reg_note (temp, REG_EQUAL,
2083 NULL_RTX)))
2084 {
2085 fn_reg = SET_SRC (body);
2086 if (!REG_P (fn_reg))
2087 fn_reg = SET_DEST (body);
2088 fn_address = XEXP (n, 0);
2089 fn_address_insn = temp;
2090 }
2091 /* We have the call insn.
2092 If it uses the register we suspect it might,
2093 load it with the correct address directly. */
2094 if (GET_CODE (temp) == CALL_INSN
2095 && fn_address != 0
2096 && reg_referenced_p (fn_reg, body))
2097 loop_insn_emit_after (loop, 0, fn_address_insn,
2098 gen_move_insn
2099 (fn_reg, fn_address));
2100
2101 if (GET_CODE (temp) == CALL_INSN)
2102 {
2103 i1 = loop_call_insn_hoist (loop, body);
2104 /* Because the USAGE information potentially
2105 contains objects other than hard registers
2106 we need to copy it. */
2107 if (CALL_INSN_FUNCTION_USAGE (temp))
2108 CALL_INSN_FUNCTION_USAGE (i1)
2109 = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp));
2110 }
2111 else
2112 i1 = loop_insn_hoist (loop, body);
2113 if (first == 0)
2114 first = i1;
2115 if (temp == fn_address_insn)
2116 fn_address_insn = i1;
2117 REG_NOTES (i1) = REG_NOTES (temp);
2118 REG_NOTES (temp) = NULL;
2119 delete_insn (temp);
2120 }
2121 if (new_start == 0)
2122 new_start = first;
2123 }
2124 if (m->savemode != VOIDmode)
2125 {
2126 /* P sets REG to zero; but we should clear only
2127 the bits that are not covered by the mode
2128 m->savemode. */
2129 rtx reg = m->set_dest;
2130 rtx sequence;
2131 rtx tem;
2132
2133 start_sequence ();
2134 tem = expand_simple_binop
2135 (GET_MODE (reg), AND, reg,
2136 GEN_INT ((((HOST_WIDE_INT) 1
2137 << GET_MODE_BITSIZE (m->savemode)))
2138 - 1),
2139 reg, 1, OPTAB_LIB_WIDEN);
2140 if (tem == 0)
2141 abort ();
2142 if (tem != reg)
2143 emit_move_insn (reg, tem);
2144 sequence = get_insns ();
2145 end_sequence ();
2146 i1 = loop_insn_hoist (loop, sequence);
2147 }
2148 else if (GET_CODE (p) == CALL_INSN)
2149 {
2150 i1 = loop_call_insn_hoist (loop, PATTERN (p));
2151 /* Because the USAGE information potentially
2152 contains objects other than hard registers
2153 we need to copy it. */
2154 if (CALL_INSN_FUNCTION_USAGE (p))
2155 CALL_INSN_FUNCTION_USAGE (i1)
2156 = copy_rtx (CALL_INSN_FUNCTION_USAGE (p));
2157 }
2158 else if (count == m->consec && m->move_insn_first)
2159 {
2160 rtx seq;
2161 /* The SET_SRC might not be invariant, so we must
2162 use the REG_EQUAL note. */
2163 start_sequence ();
2164 emit_move_insn (m->insert_temp ? newreg : m->set_dest,
2165 m->set_src);
2166 seq = get_insns ();
2167 end_sequence ();
2168
2169 add_label_notes (m->set_src, seq);
2170
2171 i1 = loop_insn_hoist (loop, seq);
2172 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
2173 set_unique_reg_note (i1, m->is_equiv ? REG_EQUIV
2174 : REG_EQUAL, m->set_src);
2175 }
2176 else if (m->insert_temp)
2177 {
2178 rtx *reg_map2 = xcalloc (REGNO (newreg),
2179 sizeof(rtx));
2180 reg_map2 [m->regno] = newreg;
2181
2182 i1 = loop_insn_hoist (loop, copy_rtx (PATTERN (p)));
2183 replace_regs (i1, reg_map2, REGNO (newreg), 1);
2184 free (reg_map2);
2185 }
2186 else
2187 i1 = loop_insn_hoist (loop, PATTERN (p));
2188
2189 if (REG_NOTES (i1) == 0)
2190 {
2191 REG_NOTES (i1) = REG_NOTES (p);
2192 REG_NOTES (p) = NULL;
2193
2194 /* If there is a REG_EQUAL note present whose value
2195 is not loop invariant, then delete it, since it
2196 may cause problems with later optimization passes.
2197 It is possible for cse to create such notes
2198 like this as a result of record_jump_cond. */
2199
2200 if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX))
2201 && ! loop_invariant_p (loop, XEXP (temp, 0)))
2202 remove_note (i1, temp);
2203 }
2204
2205 if (new_start == 0)
2206 new_start = i1;
2207
2208 if (loop_dump_stream)
2209 fprintf (loop_dump_stream, " moved to %d",
2210 INSN_UID (i1));
2211
2212 /* If library call, now fix the REG_NOTES that contain
2213 insn pointers, namely REG_LIBCALL on FIRST
2214 and REG_RETVAL on I1. */
2215 if ((temp = find_reg_note (i1, REG_RETVAL, NULL_RTX)))
2216 {
2217 XEXP (temp, 0) = first;
2218 temp = find_reg_note (first, REG_LIBCALL, NULL_RTX);
2219 XEXP (temp, 0) = i1;
2220 }
2221
2222 temp = p;
2223 delete_insn (p);
2224 p = NEXT_INSN (p);
2225
2226 /* simplify_giv_expr expects that it can walk the insns
2227 at m->insn forwards and see this old sequence we are
2228 tossing here. delete_insn does preserve the next
2229 pointers, but when we skip over a NOTE we must fix
2230 it up. Otherwise that code walks into the non-deleted
2231 insn stream. */
2232 while (p && GET_CODE (p) == NOTE)
2233 p = NEXT_INSN (temp) = NEXT_INSN (p);
2234
2235 if (m->insert_temp)
2236 {
2237 rtx seq;
2238 /* Replace the original insn with a move from
2239 our newly created temp. */
2240 start_sequence ();
2241 emit_move_insn (m->set_dest, newreg);
2242 seq = get_insns ();
2243 end_sequence ();
2244 emit_insn_before (seq, p);
2245 }
2246 }
2247
2248 /* The more regs we move, the less we like moving them. */
2249 threshold -= 3;
2250 }
2251
2252 m->done = 1;
2253
2254 if (!m->insert_temp)
2255 {
2256 /* Any other movable that loads the same register
2257 MUST be moved. */
2258 already_moved[regno] = 1;
2259
2260 /* This reg has been moved out of one loop. */
2261 regs->array[regno].moved_once = 1;
2262
2263 /* The reg set here is now invariant. */
2264 if (! m->partial)
2265 {
2266 int i;
2267 for (i = 0; i < LOOP_REGNO_NREGS (regno, m->set_dest); i++)
2268 regs->array[regno+i].set_in_loop = 0;
2269 }
2270
2271 /* Change the length-of-life info for the register
2272 to say it lives at least the full length of this loop.
2273 This will help guide optimizations in outer loops. */
2274
2275 if (REGNO_FIRST_LUID (regno) > INSN_LUID (loop_start))
2276 /* This is the old insn before all the moved insns.
2277 We can't use the moved insn because it is out of range
2278 in uid_luid. Only the old insns have luids. */
2279 REGNO_FIRST_UID (regno) = INSN_UID (loop_start);
2280 if (REGNO_LAST_LUID (regno) < INSN_LUID (loop_end))
2281 REGNO_LAST_UID (regno) = INSN_UID (loop_end);
2282 }
2283
2284 /* Combine with this moved insn any other matching movables. */
2285
2286 if (! m->partial)
2287 for (m1 = movables->head; m1; m1 = m1->next)
2288 if (m1->match == m)
2289 {
2290 rtx temp;
2291
2292 /* Schedule the reg loaded by M1
2293 for replacement so that shares the reg of M.
2294 If the modes differ (only possible in restricted
2295 circumstances, make a SUBREG.
2296
2297 Note this assumes that the target dependent files
2298 treat REG and SUBREG equally, including within
2299 GO_IF_LEGITIMATE_ADDRESS and in all the
2300 predicates since we never verify that replacing the
2301 original register with a SUBREG results in a
2302 recognizable insn. */
2303 if (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest))
2304 reg_map[m1->regno] = m->set_dest;
2305 else
2306 reg_map[m1->regno]
2307 = gen_lowpart_common (GET_MODE (m1->set_dest),
2308 m->set_dest);
2309
2310 /* Get rid of the matching insn
2311 and prevent further processing of it. */
2312 m1->done = 1;
2313
2314 /* If library call, delete all insns. */
2315 if ((temp = find_reg_note (m1->insn, REG_RETVAL,
2316 NULL_RTX)))
2317 delete_insn_chain (XEXP (temp, 0), m1->insn);
2318 else
2319 delete_insn (m1->insn);
2320
2321 /* Any other movable that loads the same register
2322 MUST be moved. */
2323 already_moved[m1->regno] = 1;
2324
2325 /* The reg merged here is now invariant,
2326 if the reg it matches is invariant. */
2327 if (! m->partial)
2328 {
2329 int i;
2330 for (i = 0;
2331 i < LOOP_REGNO_NREGS (regno, m1->set_dest);
2332 i++)
2333 regs->array[m1->regno+i].set_in_loop = 0;
2334 }
2335 }
2336 }
2337 else if (loop_dump_stream)
2338 fprintf (loop_dump_stream, "not desirable");
2339 }
2340 else if (loop_dump_stream && !m->match)
2341 fprintf (loop_dump_stream, "not safe");
2342
2343 if (loop_dump_stream)
2344 fprintf (loop_dump_stream, "\n");
2345 }
2346
2347 if (new_start == 0)
2348 new_start = loop_start;
2349
2350 /* Go through all the instructions in the loop, making
2351 all the register substitutions scheduled in REG_MAP. */
2352 for (p = new_start; p != loop_end; p = NEXT_INSN (p))
2353 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
2354 || GET_CODE (p) == CALL_INSN)
2355 {
2356 replace_regs (PATTERN (p), reg_map, nregs, 0);
2357 replace_regs (REG_NOTES (p), reg_map, nregs, 0);
2358 INSN_CODE (p) = -1;
2359 }
2360
2361 /* Clean up. */
2362 free (reg_map);
2363 free (already_moved);
2364 }
2365
2366
2367 static void
2368 loop_movables_add (struct loop_movables *movables, struct movable *m)
2369 {
2370 if (movables->head == 0)
2371 movables->head = m;
2372 else
2373 movables->last->next = m;
2374 movables->last = m;
2375 }
2376
2377
2378 static void
2379 loop_movables_free (struct loop_movables *movables)
2380 {
2381 struct movable *m;
2382 struct movable *m_next;
2383
2384 for (m = movables->head; m; m = m_next)
2385 {
2386 m_next = m->next;
2387 free (m);
2388 }
2389 }
2390 \f
2391 #if 0
2392 /* Scan X and replace the address of any MEM in it with ADDR.
2393 REG is the address that MEM should have before the replacement. */
2394
2395 static void
2396 replace_call_address (rtx x, rtx reg, rtx addr)
2397 {
2398 enum rtx_code code;
2399 int i;
2400 const char *fmt;
2401
2402 if (x == 0)
2403 return;
2404 code = GET_CODE (x);
2405 switch (code)
2406 {
2407 case PC:
2408 case CC0:
2409 case CONST_INT:
2410 case CONST_DOUBLE:
2411 case CONST:
2412 case SYMBOL_REF:
2413 case LABEL_REF:
2414 case REG:
2415 return;
2416
2417 case SET:
2418 /* Short cut for very common case. */
2419 replace_call_address (XEXP (x, 1), reg, addr);
2420 return;
2421
2422 case CALL:
2423 /* Short cut for very common case. */
2424 replace_call_address (XEXP (x, 0), reg, addr);
2425 return;
2426
2427 case MEM:
2428 /* If this MEM uses a reg other than the one we expected,
2429 something is wrong. */
2430 if (XEXP (x, 0) != reg)
2431 abort ();
2432 XEXP (x, 0) = addr;
2433 return;
2434
2435 default:
2436 break;
2437 }
2438
2439 fmt = GET_RTX_FORMAT (code);
2440 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2441 {
2442 if (fmt[i] == 'e')
2443 replace_call_address (XEXP (x, i), reg, addr);
2444 else if (fmt[i] == 'E')
2445 {
2446 int j;
2447 for (j = 0; j < XVECLEN (x, i); j++)
2448 replace_call_address (XVECEXP (x, i, j), reg, addr);
2449 }
2450 }
2451 }
2452 #endif
2453 \f
2454 /* Return the number of memory refs to addresses that vary
2455 in the rtx X. */
2456
2457 static int
2458 count_nonfixed_reads (const struct loop *loop, rtx x)
2459 {
2460 enum rtx_code code;
2461 int i;
2462 const char *fmt;
2463 int value;
2464
2465 if (x == 0)
2466 return 0;
2467
2468 code = GET_CODE (x);
2469 switch (code)
2470 {
2471 case PC:
2472 case CC0:
2473 case CONST_INT:
2474 case CONST_DOUBLE:
2475 case CONST:
2476 case SYMBOL_REF:
2477 case LABEL_REF:
2478 case REG:
2479 return 0;
2480
2481 case MEM:
2482 return ((loop_invariant_p (loop, XEXP (x, 0)) != 1)
2483 + count_nonfixed_reads (loop, XEXP (x, 0)));
2484
2485 default:
2486 break;
2487 }
2488
2489 value = 0;
2490 fmt = GET_RTX_FORMAT (code);
2491 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2492 {
2493 if (fmt[i] == 'e')
2494 value += count_nonfixed_reads (loop, XEXP (x, i));
2495 if (fmt[i] == 'E')
2496 {
2497 int j;
2498 for (j = 0; j < XVECLEN (x, i); j++)
2499 value += count_nonfixed_reads (loop, XVECEXP (x, i, j));
2500 }
2501 }
2502 return value;
2503 }
2504 \f
2505 /* Scan a loop setting the elements `cont', `vtop', `loops_enclosed',
2506 `has_call', `has_nonconst_call', `has_volatile', `has_tablejump',
2507 `unknown_address_altered', `unknown_constant_address_altered', and
2508 `num_mem_sets' in LOOP. Also, fill in the array `mems' and the
2509 list `store_mems' in LOOP. */
2510
2511 static void
2512 prescan_loop (struct loop *loop)
2513 {
2514 int level = 1;
2515 rtx insn;
2516 struct loop_info *loop_info = LOOP_INFO (loop);
2517 rtx start = loop->start;
2518 rtx end = loop->end;
2519 /* The label after END. Jumping here is just like falling off the
2520 end of the loop. We use next_nonnote_insn instead of next_label
2521 as a hedge against the (pathological) case where some actual insn
2522 might end up between the two. */
2523 rtx exit_target = next_nonnote_insn (end);
2524
2525 loop_info->has_indirect_jump = indirect_jump_in_function;
2526 loop_info->pre_header_has_call = 0;
2527 loop_info->has_call = 0;
2528 loop_info->has_nonconst_call = 0;
2529 loop_info->has_prefetch = 0;
2530 loop_info->has_volatile = 0;
2531 loop_info->has_tablejump = 0;
2532 loop_info->has_multiple_exit_targets = 0;
2533 loop->level = 1;
2534
2535 loop_info->unknown_address_altered = 0;
2536 loop_info->unknown_constant_address_altered = 0;
2537 loop_info->store_mems = NULL_RTX;
2538 loop_info->first_loop_store_insn = NULL_RTX;
2539 loop_info->mems_idx = 0;
2540 loop_info->num_mem_sets = 0;
2541 /* If loop opts run twice, this was set on 1st pass for 2nd. */
2542 loop_info->preconditioned = NOTE_PRECONDITIONED (end);
2543
2544 for (insn = start; insn && GET_CODE (insn) != CODE_LABEL;
2545 insn = PREV_INSN (insn))
2546 {
2547 if (GET_CODE (insn) == CALL_INSN)
2548 {
2549 loop_info->pre_header_has_call = 1;
2550 break;
2551 }
2552 }
2553
2554 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2555 insn = NEXT_INSN (insn))
2556 {
2557 switch (GET_CODE (insn))
2558 {
2559 case NOTE:
2560 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
2561 {
2562 ++level;
2563 /* Count number of loops contained in this one. */
2564 loop->level++;
2565 }
2566 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
2567 --level;
2568 break;
2569
2570 case CALL_INSN:
2571 if (! CONST_OR_PURE_CALL_P (insn))
2572 {
2573 loop_info->unknown_address_altered = 1;
2574 loop_info->has_nonconst_call = 1;
2575 }
2576 else if (pure_call_p (insn))
2577 loop_info->has_nonconst_call = 1;
2578 loop_info->has_call = 1;
2579 if (can_throw_internal (insn))
2580 loop_info->has_multiple_exit_targets = 1;
2581
2582 /* Calls initializing constant objects have CLOBBER of MEM /u in the
2583 attached FUNCTION_USAGE expression list, not accounted for by the
2584 code above. We should note these to avoid missing dependencies in
2585 later references. */
2586 {
2587 rtx fusage_entry;
2588
2589 for (fusage_entry = CALL_INSN_FUNCTION_USAGE (insn);
2590 fusage_entry; fusage_entry = XEXP (fusage_entry, 1))
2591 {
2592 rtx fusage = XEXP (fusage_entry, 0);
2593
2594 if (GET_CODE (fusage) == CLOBBER
2595 && GET_CODE (XEXP (fusage, 0)) == MEM
2596 && RTX_UNCHANGING_P (XEXP (fusage, 0)))
2597 {
2598 note_stores (fusage, note_addr_stored, loop_info);
2599 if (! loop_info->first_loop_store_insn
2600 && loop_info->store_mems)
2601 loop_info->first_loop_store_insn = insn;
2602 }
2603 }
2604 }
2605 break;
2606
2607 case JUMP_INSN:
2608 if (! loop_info->has_multiple_exit_targets)
2609 {
2610 rtx set = pc_set (insn);
2611
2612 if (set)
2613 {
2614 rtx src = SET_SRC (set);
2615 rtx label1, label2;
2616
2617 if (GET_CODE (src) == IF_THEN_ELSE)
2618 {
2619 label1 = XEXP (src, 1);
2620 label2 = XEXP (src, 2);
2621 }
2622 else
2623 {
2624 label1 = src;
2625 label2 = NULL_RTX;
2626 }
2627
2628 do
2629 {
2630 if (label1 && label1 != pc_rtx)
2631 {
2632 if (GET_CODE (label1) != LABEL_REF)
2633 {
2634 /* Something tricky. */
2635 loop_info->has_multiple_exit_targets = 1;
2636 break;
2637 }
2638 else if (XEXP (label1, 0) != exit_target
2639 && LABEL_OUTSIDE_LOOP_P (label1))
2640 {
2641 /* A jump outside the current loop. */
2642 loop_info->has_multiple_exit_targets = 1;
2643 break;
2644 }
2645 }
2646
2647 label1 = label2;
2648 label2 = NULL_RTX;
2649 }
2650 while (label1);
2651 }
2652 else
2653 {
2654 /* A return, or something tricky. */
2655 loop_info->has_multiple_exit_targets = 1;
2656 }
2657 }
2658 /* Fall through. */
2659
2660 case INSN:
2661 if (volatile_refs_p (PATTERN (insn)))
2662 loop_info->has_volatile = 1;
2663
2664 if (GET_CODE (insn) == JUMP_INSN
2665 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
2666 || GET_CODE (PATTERN (insn)) == ADDR_VEC))
2667 loop_info->has_tablejump = 1;
2668
2669 note_stores (PATTERN (insn), note_addr_stored, loop_info);
2670 if (! loop_info->first_loop_store_insn && loop_info->store_mems)
2671 loop_info->first_loop_store_insn = insn;
2672
2673 if (flag_non_call_exceptions && can_throw_internal (insn))
2674 loop_info->has_multiple_exit_targets = 1;
2675 break;
2676
2677 default:
2678 break;
2679 }
2680 }
2681
2682 /* Now, rescan the loop, setting up the LOOP_MEMS array. */
2683 if (/* An exception thrown by a called function might land us
2684 anywhere. */
2685 ! loop_info->has_nonconst_call
2686 /* We don't want loads for MEMs moved to a location before the
2687 one at which their stack memory becomes allocated. (Note
2688 that this is not a problem for malloc, etc., since those
2689 require actual function calls. */
2690 && ! current_function_calls_alloca
2691 /* There are ways to leave the loop other than falling off the
2692 end. */
2693 && ! loop_info->has_multiple_exit_targets)
2694 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2695 insn = NEXT_INSN (insn))
2696 for_each_rtx (&insn, insert_loop_mem, loop_info);
2697
2698 /* BLKmode MEMs are added to LOOP_STORE_MEM as necessary so
2699 that loop_invariant_p and load_mems can use true_dependence
2700 to determine what is really clobbered. */
2701 if (loop_info->unknown_address_altered)
2702 {
2703 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
2704
2705 loop_info->store_mems
2706 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
2707 }
2708 if (loop_info->unknown_constant_address_altered)
2709 {
2710 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
2711
2712 RTX_UNCHANGING_P (mem) = 1;
2713 loop_info->store_mems
2714 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
2715 }
2716 }
2717 \f
2718 /* Invalidate all loops containing LABEL. */
2719
2720 static void
2721 invalidate_loops_containing_label (rtx label)
2722 {
2723 struct loop *loop;
2724 for (loop = uid_loop[INSN_UID (label)]; loop; loop = loop->outer)
2725 loop->invalid = 1;
2726 }
2727
2728 /* Scan the function looking for loops. Record the start and end of each loop.
2729 Also mark as invalid loops any loops that contain a setjmp or are branched
2730 to from outside the loop. */
2731
2732 static void
2733 find_and_verify_loops (rtx f, struct loops *loops)
2734 {
2735 rtx insn;
2736 rtx label;
2737 int num_loops;
2738 struct loop *current_loop;
2739 struct loop *next_loop;
2740 struct loop *loop;
2741
2742 num_loops = loops->num;
2743
2744 compute_luids (f, NULL_RTX, 0);
2745
2746 /* If there are jumps to undefined labels,
2747 treat them as jumps out of any/all loops.
2748 This also avoids writing past end of tables when there are no loops. */
2749 uid_loop[0] = NULL;
2750
2751 /* Find boundaries of loops, mark which loops are contained within
2752 loops, and invalidate loops that have setjmp. */
2753
2754 num_loops = 0;
2755 current_loop = NULL;
2756 for (insn = f; insn; insn = NEXT_INSN (insn))
2757 {
2758 if (GET_CODE (insn) == NOTE)
2759 switch (NOTE_LINE_NUMBER (insn))
2760 {
2761 case NOTE_INSN_LOOP_BEG:
2762 next_loop = loops->array + num_loops;
2763 next_loop->num = num_loops;
2764 num_loops++;
2765 next_loop->start = insn;
2766 next_loop->outer = current_loop;
2767 current_loop = next_loop;
2768 break;
2769
2770 case NOTE_INSN_LOOP_CONT:
2771 current_loop->cont = insn;
2772 break;
2773
2774 case NOTE_INSN_LOOP_VTOP:
2775 current_loop->vtop = insn;
2776 break;
2777
2778 case NOTE_INSN_LOOP_END:
2779 if (! current_loop)
2780 abort ();
2781
2782 current_loop->end = insn;
2783 current_loop = current_loop->outer;
2784 break;
2785
2786 default:
2787 break;
2788 }
2789
2790 if (GET_CODE (insn) == CALL_INSN
2791 && find_reg_note (insn, REG_SETJMP, NULL))
2792 {
2793 /* In this case, we must invalidate our current loop and any
2794 enclosing loop. */
2795 for (loop = current_loop; loop; loop = loop->outer)
2796 {
2797 loop->invalid = 1;
2798 if (loop_dump_stream)
2799 fprintf (loop_dump_stream,
2800 "\nLoop at %d ignored due to setjmp.\n",
2801 INSN_UID (loop->start));
2802 }
2803 }
2804
2805 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
2806 enclosing loop, but this doesn't matter. */
2807 uid_loop[INSN_UID (insn)] = current_loop;
2808 }
2809
2810 /* Any loop containing a label used in an initializer must be invalidated,
2811 because it can be jumped into from anywhere. */
2812 for (label = forced_labels; label; label = XEXP (label, 1))
2813 invalidate_loops_containing_label (XEXP (label, 0));
2814
2815 /* Any loop containing a label used for an exception handler must be
2816 invalidated, because it can be jumped into from anywhere. */
2817 for_each_eh_label (invalidate_loops_containing_label);
2818
2819 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
2820 loop that it is not contained within, that loop is marked invalid.
2821 If any INSN or CALL_INSN uses a label's address, then the loop containing
2822 that label is marked invalid, because it could be jumped into from
2823 anywhere.
2824
2825 Also look for blocks of code ending in an unconditional branch that
2826 exits the loop. If such a block is surrounded by a conditional
2827 branch around the block, move the block elsewhere (see below) and
2828 invert the jump to point to the code block. This may eliminate a
2829 label in our loop and will simplify processing by both us and a
2830 possible second cse pass. */
2831
2832 for (insn = f; insn; insn = NEXT_INSN (insn))
2833 if (INSN_P (insn))
2834 {
2835 struct loop *this_loop = uid_loop[INSN_UID (insn)];
2836
2837 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
2838 {
2839 rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
2840 if (note)
2841 invalidate_loops_containing_label (XEXP (note, 0));
2842 }
2843
2844 if (GET_CODE (insn) != JUMP_INSN)
2845 continue;
2846
2847 mark_loop_jump (PATTERN (insn), this_loop);
2848
2849 /* See if this is an unconditional branch outside the loop. */
2850 if (this_loop
2851 && (GET_CODE (PATTERN (insn)) == RETURN
2852 || (any_uncondjump_p (insn)
2853 && onlyjump_p (insn)
2854 && (uid_loop[INSN_UID (JUMP_LABEL (insn))]
2855 != this_loop)))
2856 && get_max_uid () < max_uid_for_loop)
2857 {
2858 rtx p;
2859 rtx our_next = next_real_insn (insn);
2860 rtx last_insn_to_move = NEXT_INSN (insn);
2861 struct loop *dest_loop;
2862 struct loop *outer_loop = NULL;
2863
2864 /* Go backwards until we reach the start of the loop, a label,
2865 or a JUMP_INSN. */
2866 for (p = PREV_INSN (insn);
2867 GET_CODE (p) != CODE_LABEL
2868 && ! (GET_CODE (p) == NOTE
2869 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
2870 && GET_CODE (p) != JUMP_INSN;
2871 p = PREV_INSN (p))
2872 ;
2873
2874 /* Check for the case where we have a jump to an inner nested
2875 loop, and do not perform the optimization in that case. */
2876
2877 if (JUMP_LABEL (insn))
2878 {
2879 dest_loop = uid_loop[INSN_UID (JUMP_LABEL (insn))];
2880 if (dest_loop)
2881 {
2882 for (outer_loop = dest_loop; outer_loop;
2883 outer_loop = outer_loop->outer)
2884 if (outer_loop == this_loop)
2885 break;
2886 }
2887 }
2888
2889 /* Make sure that the target of P is within the current loop. */
2890
2891 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
2892 && uid_loop[INSN_UID (JUMP_LABEL (p))] != this_loop)
2893 outer_loop = this_loop;
2894
2895 /* If we stopped on a JUMP_INSN to the next insn after INSN,
2896 we have a block of code to try to move.
2897
2898 We look backward and then forward from the target of INSN
2899 to find a BARRIER at the same loop depth as the target.
2900 If we find such a BARRIER, we make a new label for the start
2901 of the block, invert the jump in P and point it to that label,
2902 and move the block of code to the spot we found. */
2903
2904 if (! outer_loop
2905 && GET_CODE (p) == JUMP_INSN
2906 && JUMP_LABEL (p) != 0
2907 /* Just ignore jumps to labels that were never emitted.
2908 These always indicate compilation errors. */
2909 && INSN_UID (JUMP_LABEL (p)) != 0
2910 && any_condjump_p (p) && onlyjump_p (p)
2911 && next_real_insn (JUMP_LABEL (p)) == our_next
2912 /* If it's not safe to move the sequence, then we
2913 mustn't try. */
2914 && insns_safe_to_move_p (p, NEXT_INSN (insn),
2915 &last_insn_to_move))
2916 {
2917 rtx target
2918 = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn ();
2919 struct loop *target_loop = uid_loop[INSN_UID (target)];
2920 rtx loc, loc2;
2921 rtx tmp;
2922
2923 /* Search for possible garbage past the conditional jumps
2924 and look for the last barrier. */
2925 for (tmp = last_insn_to_move;
2926 tmp && GET_CODE (tmp) != CODE_LABEL; tmp = NEXT_INSN (tmp))
2927 if (GET_CODE (tmp) == BARRIER)
2928 last_insn_to_move = tmp;
2929
2930 for (loc = target; loc; loc = PREV_INSN (loc))
2931 if (GET_CODE (loc) == BARRIER
2932 /* Don't move things inside a tablejump. */
2933 && ((loc2 = next_nonnote_insn (loc)) == 0
2934 || GET_CODE (loc2) != CODE_LABEL
2935 || (loc2 = next_nonnote_insn (loc2)) == 0
2936 || GET_CODE (loc2) != JUMP_INSN
2937 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
2938 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
2939 && uid_loop[INSN_UID (loc)] == target_loop)
2940 break;
2941
2942 if (loc == 0)
2943 for (loc = target; loc; loc = NEXT_INSN (loc))
2944 if (GET_CODE (loc) == BARRIER
2945 /* Don't move things inside a tablejump. */
2946 && ((loc2 = next_nonnote_insn (loc)) == 0
2947 || GET_CODE (loc2) != CODE_LABEL
2948 || (loc2 = next_nonnote_insn (loc2)) == 0
2949 || GET_CODE (loc2) != JUMP_INSN
2950 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
2951 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
2952 && uid_loop[INSN_UID (loc)] == target_loop)
2953 break;
2954
2955 if (loc)
2956 {
2957 rtx cond_label = JUMP_LABEL (p);
2958 rtx new_label = get_label_after (p);
2959
2960 /* Ensure our label doesn't go away. */
2961 LABEL_NUSES (cond_label)++;
2962
2963 /* Verify that uid_loop is large enough and that
2964 we can invert P. */
2965 if (invert_jump (p, new_label, 1))
2966 {
2967 rtx q, r;
2968
2969 /* If no suitable BARRIER was found, create a suitable
2970 one before TARGET. Since TARGET is a fall through
2971 path, we'll need to insert a jump around our block
2972 and add a BARRIER before TARGET.
2973
2974 This creates an extra unconditional jump outside
2975 the loop. However, the benefits of removing rarely
2976 executed instructions from inside the loop usually
2977 outweighs the cost of the extra unconditional jump
2978 outside the loop. */
2979 if (loc == 0)
2980 {
2981 rtx temp;
2982
2983 temp = gen_jump (JUMP_LABEL (insn));
2984 temp = emit_jump_insn_before (temp, target);
2985 JUMP_LABEL (temp) = JUMP_LABEL (insn);
2986 LABEL_NUSES (JUMP_LABEL (insn))++;
2987 loc = emit_barrier_before (target);
2988 }
2989
2990 /* Include the BARRIER after INSN and copy the
2991 block after LOC. */
2992 if (squeeze_notes (&new_label, &last_insn_to_move))
2993 abort ();
2994 reorder_insns (new_label, last_insn_to_move, loc);
2995
2996 /* All those insns are now in TARGET_LOOP. */
2997 for (q = new_label;
2998 q != NEXT_INSN (last_insn_to_move);
2999 q = NEXT_INSN (q))
3000 uid_loop[INSN_UID (q)] = target_loop;
3001
3002 /* The label jumped to by INSN is no longer a loop
3003 exit. Unless INSN does not have a label (e.g.,
3004 it is a RETURN insn), search loop->exit_labels
3005 to find its label_ref, and remove it. Also turn
3006 off LABEL_OUTSIDE_LOOP_P bit. */
3007 if (JUMP_LABEL (insn))
3008 {
3009 for (q = 0, r = this_loop->exit_labels;
3010 r;
3011 q = r, r = LABEL_NEXTREF (r))
3012 if (XEXP (r, 0) == JUMP_LABEL (insn))
3013 {
3014 LABEL_OUTSIDE_LOOP_P (r) = 0;
3015 if (q)
3016 LABEL_NEXTREF (q) = LABEL_NEXTREF (r);
3017 else
3018 this_loop->exit_labels = LABEL_NEXTREF (r);
3019 break;
3020 }
3021
3022 for (loop = this_loop; loop && loop != target_loop;
3023 loop = loop->outer)
3024 loop->exit_count--;
3025
3026 /* If we didn't find it, then something is
3027 wrong. */
3028 if (! r)
3029 abort ();
3030 }
3031
3032 /* P is now a jump outside the loop, so it must be put
3033 in loop->exit_labels, and marked as such.
3034 The easiest way to do this is to just call
3035 mark_loop_jump again for P. */
3036 mark_loop_jump (PATTERN (p), this_loop);
3037
3038 /* If INSN now jumps to the insn after it,
3039 delete INSN. */
3040 if (JUMP_LABEL (insn) != 0
3041 && (next_real_insn (JUMP_LABEL (insn))
3042 == next_real_insn (insn)))
3043 delete_related_insns (insn);
3044 }
3045
3046 /* Continue the loop after where the conditional
3047 branch used to jump, since the only branch insn
3048 in the block (if it still remains) is an inter-loop
3049 branch and hence needs no processing. */
3050 insn = NEXT_INSN (cond_label);
3051
3052 if (--LABEL_NUSES (cond_label) == 0)
3053 delete_related_insns (cond_label);
3054
3055 /* This loop will be continued with NEXT_INSN (insn). */
3056 insn = PREV_INSN (insn);
3057 }
3058 }
3059 }
3060 }
3061 }
3062
3063 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
3064 loops it is contained in, mark the target loop invalid.
3065
3066 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
3067
3068 static void
3069 mark_loop_jump (rtx x, struct loop *loop)
3070 {
3071 struct loop *dest_loop;
3072 struct loop *outer_loop;
3073 int i;
3074
3075 switch (GET_CODE (x))
3076 {
3077 case PC:
3078 case USE:
3079 case CLOBBER:
3080 case REG:
3081 case MEM:
3082 case CONST_INT:
3083 case CONST_DOUBLE:
3084 case RETURN:
3085 return;
3086
3087 case CONST:
3088 /* There could be a label reference in here. */
3089 mark_loop_jump (XEXP (x, 0), loop);
3090 return;
3091
3092 case PLUS:
3093 case MINUS:
3094 case MULT:
3095 mark_loop_jump (XEXP (x, 0), loop);
3096 mark_loop_jump (XEXP (x, 1), loop);
3097 return;
3098
3099 case LO_SUM:
3100 /* This may refer to a LABEL_REF or SYMBOL_REF. */
3101 mark_loop_jump (XEXP (x, 1), loop);
3102 return;
3103
3104 case SIGN_EXTEND:
3105 case ZERO_EXTEND:
3106 mark_loop_jump (XEXP (x, 0), loop);
3107 return;
3108
3109 case LABEL_REF:
3110 dest_loop = uid_loop[INSN_UID (XEXP (x, 0))];
3111
3112 /* Link together all labels that branch outside the loop. This
3113 is used by final_[bg]iv_value and the loop unrolling code. Also
3114 mark this LABEL_REF so we know that this branch should predict
3115 false. */
3116
3117 /* A check to make sure the label is not in an inner nested loop,
3118 since this does not count as a loop exit. */
3119 if (dest_loop)
3120 {
3121 for (outer_loop = dest_loop; outer_loop;
3122 outer_loop = outer_loop->outer)
3123 if (outer_loop == loop)
3124 break;
3125 }
3126 else
3127 outer_loop = NULL;
3128
3129 if (loop && ! outer_loop)
3130 {
3131 LABEL_OUTSIDE_LOOP_P (x) = 1;
3132 LABEL_NEXTREF (x) = loop->exit_labels;
3133 loop->exit_labels = x;
3134
3135 for (outer_loop = loop;
3136 outer_loop && outer_loop != dest_loop;
3137 outer_loop = outer_loop->outer)
3138 outer_loop->exit_count++;
3139 }
3140
3141 /* If this is inside a loop, but not in the current loop or one enclosed
3142 by it, it invalidates at least one loop. */
3143
3144 if (! dest_loop)
3145 return;
3146
3147 /* We must invalidate every nested loop containing the target of this
3148 label, except those that also contain the jump insn. */
3149
3150 for (; dest_loop; dest_loop = dest_loop->outer)
3151 {
3152 /* Stop when we reach a loop that also contains the jump insn. */
3153 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
3154 if (dest_loop == outer_loop)
3155 return;
3156
3157 /* If we get here, we know we need to invalidate a loop. */
3158 if (loop_dump_stream && ! dest_loop->invalid)
3159 fprintf (loop_dump_stream,
3160 "\nLoop at %d ignored due to multiple entry points.\n",
3161 INSN_UID (dest_loop->start));
3162
3163 dest_loop->invalid = 1;
3164 }
3165 return;
3166
3167 case SET:
3168 /* If this is not setting pc, ignore. */
3169 if (SET_DEST (x) == pc_rtx)
3170 mark_loop_jump (SET_SRC (x), loop);
3171 return;
3172
3173 case IF_THEN_ELSE:
3174 mark_loop_jump (XEXP (x, 1), loop);
3175 mark_loop_jump (XEXP (x, 2), loop);
3176 return;
3177
3178 case PARALLEL:
3179 case ADDR_VEC:
3180 for (i = 0; i < XVECLEN (x, 0); i++)
3181 mark_loop_jump (XVECEXP (x, 0, i), loop);
3182 return;
3183
3184 case ADDR_DIFF_VEC:
3185 for (i = 0; i < XVECLEN (x, 1); i++)
3186 mark_loop_jump (XVECEXP (x, 1, i), loop);
3187 return;
3188
3189 default:
3190 /* Strictly speaking this is not a jump into the loop, only a possible
3191 jump out of the loop. However, we have no way to link the destination
3192 of this jump onto the list of exit labels. To be safe we mark this
3193 loop and any containing loops as invalid. */
3194 if (loop)
3195 {
3196 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
3197 {
3198 if (loop_dump_stream && ! outer_loop->invalid)
3199 fprintf (loop_dump_stream,
3200 "\nLoop at %d ignored due to unknown exit jump.\n",
3201 INSN_UID (outer_loop->start));
3202 outer_loop->invalid = 1;
3203 }
3204 }
3205 return;
3206 }
3207 }
3208 \f
3209 /* Return nonzero if there is a label in the range from
3210 insn INSN to and including the insn whose luid is END
3211 INSN must have an assigned luid (i.e., it must not have
3212 been previously created by loop.c). */
3213
3214 static int
3215 labels_in_range_p (rtx insn, int end)
3216 {
3217 while (insn && INSN_LUID (insn) <= end)
3218 {
3219 if (GET_CODE (insn) == CODE_LABEL)
3220 return 1;
3221 insn = NEXT_INSN (insn);
3222 }
3223
3224 return 0;
3225 }
3226
3227 /* Record that a memory reference X is being set. */
3228
3229 static void
3230 note_addr_stored (rtx x, rtx y ATTRIBUTE_UNUSED,
3231 void *data ATTRIBUTE_UNUSED)
3232 {
3233 struct loop_info *loop_info = data;
3234
3235 if (x == 0 || GET_CODE (x) != MEM)
3236 return;
3237
3238 /* Count number of memory writes.
3239 This affects heuristics in strength_reduce. */
3240 loop_info->num_mem_sets++;
3241
3242 /* BLKmode MEM means all memory is clobbered. */
3243 if (GET_MODE (x) == BLKmode)
3244 {
3245 if (RTX_UNCHANGING_P (x))
3246 loop_info->unknown_constant_address_altered = 1;
3247 else
3248 loop_info->unknown_address_altered = 1;
3249
3250 return;
3251 }
3252
3253 loop_info->store_mems = gen_rtx_EXPR_LIST (VOIDmode, x,
3254 loop_info->store_mems);
3255 }
3256
3257 /* X is a value modified by an INSN that references a biv inside a loop
3258 exit test (ie, X is somehow related to the value of the biv). If X
3259 is a pseudo that is used more than once, then the biv is (effectively)
3260 used more than once. DATA is a pointer to a loop_regs structure. */
3261
3262 static void
3263 note_set_pseudo_multiple_uses (rtx x, rtx y ATTRIBUTE_UNUSED, void *data)
3264 {
3265 struct loop_regs *regs = (struct loop_regs *) data;
3266
3267 if (x == 0)
3268 return;
3269
3270 while (GET_CODE (x) == STRICT_LOW_PART
3271 || GET_CODE (x) == SIGN_EXTRACT
3272 || GET_CODE (x) == ZERO_EXTRACT
3273 || GET_CODE (x) == SUBREG)
3274 x = XEXP (x, 0);
3275
3276 if (!REG_P (x) || REGNO (x) < FIRST_PSEUDO_REGISTER)
3277 return;
3278
3279 /* If we do not have usage information, or if we know the register
3280 is used more than once, note that fact for check_dbra_loop. */
3281 if (REGNO (x) >= max_reg_before_loop
3282 || ! regs->array[REGNO (x)].single_usage
3283 || regs->array[REGNO (x)].single_usage == const0_rtx)
3284 regs->multiple_uses = 1;
3285 }
3286 \f
3287 /* Return nonzero if the rtx X is invariant over the current loop.
3288
3289 The value is 2 if we refer to something only conditionally invariant.
3290
3291 A memory ref is invariant if it is not volatile and does not conflict
3292 with anything stored in `loop_info->store_mems'. */
3293
3294 int
3295 loop_invariant_p (const struct loop *loop, rtx x)
3296 {
3297 struct loop_info *loop_info = LOOP_INFO (loop);
3298 struct loop_regs *regs = LOOP_REGS (loop);
3299 int i;
3300 enum rtx_code code;
3301 const char *fmt;
3302 int conditional = 0;
3303 rtx mem_list_entry;
3304
3305 if (x == 0)
3306 return 1;
3307 code = GET_CODE (x);
3308 switch (code)
3309 {
3310 case CONST_INT:
3311 case CONST_DOUBLE:
3312 case SYMBOL_REF:
3313 case CONST:
3314 return 1;
3315
3316 case LABEL_REF:
3317 /* A LABEL_REF is normally invariant, however, if we are unrolling
3318 loops, and this label is inside the loop, then it isn't invariant.
3319 This is because each unrolled copy of the loop body will have
3320 a copy of this label. If this was invariant, then an insn loading
3321 the address of this label into a register might get moved outside
3322 the loop, and then each loop body would end up using the same label.
3323
3324 We don't know the loop bounds here though, so just fail for all
3325 labels. */
3326 if (flag_old_unroll_loops)
3327 return 0;
3328 else
3329 return 1;
3330
3331 case PC:
3332 case CC0:
3333 case UNSPEC_VOLATILE:
3334 return 0;
3335
3336 case REG:
3337 /* We used to check RTX_UNCHANGING_P (x) here, but that is invalid
3338 since the reg might be set by initialization within the loop. */
3339
3340 if ((x == frame_pointer_rtx || x == hard_frame_pointer_rtx
3341 || x == arg_pointer_rtx || x == pic_offset_table_rtx)
3342 && ! current_function_has_nonlocal_goto)
3343 return 1;
3344
3345 if (LOOP_INFO (loop)->has_call
3346 && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)])
3347 return 0;
3348
3349 /* Out-of-range regs can occur when we are called from unrolling.
3350 These registers created by the unroller are set in the loop,
3351 hence are never invariant.
3352 Other out-of-range regs can be generated by load_mems; those that
3353 are written to in the loop are not invariant, while those that are
3354 not written to are invariant. It would be easy for load_mems
3355 to set n_times_set correctly for these registers, however, there
3356 is no easy way to distinguish them from registers created by the
3357 unroller. */
3358
3359 if (REGNO (x) >= (unsigned) regs->num)
3360 return 0;
3361
3362 if (regs->array[REGNO (x)].set_in_loop < 0)
3363 return 2;
3364
3365 return regs->array[REGNO (x)].set_in_loop == 0;
3366
3367 case MEM:
3368 /* Volatile memory references must be rejected. Do this before
3369 checking for read-only items, so that volatile read-only items
3370 will be rejected also. */
3371 if (MEM_VOLATILE_P (x))
3372 return 0;
3373
3374 /* See if there is any dependence between a store and this load. */
3375 mem_list_entry = loop_info->store_mems;
3376 while (mem_list_entry)
3377 {
3378 if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
3379 x, rtx_varies_p))
3380 return 0;
3381
3382 mem_list_entry = XEXP (mem_list_entry, 1);
3383 }
3384
3385 /* It's not invalidated by a store in memory
3386 but we must still verify the address is invariant. */
3387 break;
3388
3389 case ASM_OPERANDS:
3390 /* Don't mess with insns declared volatile. */
3391 if (MEM_VOLATILE_P (x))
3392 return 0;
3393 break;
3394
3395 default:
3396 break;
3397 }
3398
3399 fmt = GET_RTX_FORMAT (code);
3400 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3401 {
3402 if (fmt[i] == 'e')
3403 {
3404 int tem = loop_invariant_p (loop, XEXP (x, i));
3405 if (tem == 0)
3406 return 0;
3407 if (tem == 2)
3408 conditional = 1;
3409 }
3410 else if (fmt[i] == 'E')
3411 {
3412 int j;
3413 for (j = 0; j < XVECLEN (x, i); j++)
3414 {
3415 int tem = loop_invariant_p (loop, XVECEXP (x, i, j));
3416 if (tem == 0)
3417 return 0;
3418 if (tem == 2)
3419 conditional = 1;
3420 }
3421
3422 }
3423 }
3424
3425 return 1 + conditional;
3426 }
3427 \f
3428 /* Return nonzero if all the insns in the loop that set REG
3429 are INSN and the immediately following insns,
3430 and if each of those insns sets REG in an invariant way
3431 (not counting uses of REG in them).
3432
3433 The value is 2 if some of these insns are only conditionally invariant.
3434
3435 We assume that INSN itself is the first set of REG
3436 and that its source is invariant. */
3437
3438 static int
3439 consec_sets_invariant_p (const struct loop *loop, rtx reg, int n_sets,
3440 rtx insn)
3441 {
3442 struct loop_regs *regs = LOOP_REGS (loop);
3443 rtx p = insn;
3444 unsigned int regno = REGNO (reg);
3445 rtx temp;
3446 /* Number of sets we have to insist on finding after INSN. */
3447 int count = n_sets - 1;
3448 int old = regs->array[regno].set_in_loop;
3449 int value = 0;
3450 int this;
3451
3452 /* If N_SETS hit the limit, we can't rely on its value. */
3453 if (n_sets == 127)
3454 return 0;
3455
3456 regs->array[regno].set_in_loop = 0;
3457
3458 while (count > 0)
3459 {
3460 enum rtx_code code;
3461 rtx set;
3462
3463 p = NEXT_INSN (p);
3464 code = GET_CODE (p);
3465
3466 /* If library call, skip to end of it. */
3467 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3468 p = XEXP (temp, 0);
3469
3470 this = 0;
3471 if (code == INSN
3472 && (set = single_set (p))
3473 && REG_P (SET_DEST (set))
3474 && REGNO (SET_DEST (set)) == regno)
3475 {
3476 this = loop_invariant_p (loop, SET_SRC (set));
3477 if (this != 0)
3478 value |= this;
3479 else if ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX)))
3480 {
3481 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
3482 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
3483 notes are OK. */
3484 this = (CONSTANT_P (XEXP (temp, 0))
3485 || (find_reg_note (p, REG_RETVAL, NULL_RTX)
3486 && loop_invariant_p (loop, XEXP (temp, 0))));
3487 if (this != 0)
3488 value |= this;
3489 }
3490 }
3491 if (this != 0)
3492 count--;
3493 else if (code != NOTE)
3494 {
3495 regs->array[regno].set_in_loop = old;
3496 return 0;
3497 }
3498 }
3499
3500 regs->array[regno].set_in_loop = old;
3501 /* If loop_invariant_p ever returned 2, we return 2. */
3502 return 1 + (value & 2);
3503 }
3504 \f
3505 /* Look at all uses (not sets) of registers in X. For each, if it is
3506 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
3507 a different insn, set USAGE[REGNO] to const0_rtx. */
3508
3509 static void
3510 find_single_use_in_loop (struct loop_regs *regs, rtx insn, rtx x)
3511 {
3512 enum rtx_code code = GET_CODE (x);
3513 const char *fmt = GET_RTX_FORMAT (code);
3514 int i, j;
3515
3516 if (code == REG)
3517 regs->array[REGNO (x)].single_usage
3518 = (regs->array[REGNO (x)].single_usage != 0
3519 && regs->array[REGNO (x)].single_usage != insn)
3520 ? const0_rtx : insn;
3521
3522 else if (code == SET)
3523 {
3524 /* Don't count SET_DEST if it is a REG; otherwise count things
3525 in SET_DEST because if a register is partially modified, it won't
3526 show up as a potential movable so we don't care how USAGE is set
3527 for it. */
3528 if (!REG_P (SET_DEST (x)))
3529 find_single_use_in_loop (regs, insn, SET_DEST (x));
3530 find_single_use_in_loop (regs, insn, SET_SRC (x));
3531 }
3532 else
3533 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3534 {
3535 if (fmt[i] == 'e' && XEXP (x, i) != 0)
3536 find_single_use_in_loop (regs, insn, XEXP (x, i));
3537 else if (fmt[i] == 'E')
3538 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3539 find_single_use_in_loop (regs, insn, XVECEXP (x, i, j));
3540 }
3541 }
3542 \f
3543 /* Count and record any set in X which is contained in INSN. Update
3544 REGS->array[I].MAY_NOT_OPTIMIZE and LAST_SET for any register I set
3545 in X. */
3546
3547 static void
3548 count_one_set (struct loop_regs *regs, rtx insn, rtx x, rtx *last_set)
3549 {
3550 if (GET_CODE (x) == CLOBBER && REG_P (XEXP (x, 0)))
3551 /* Don't move a reg that has an explicit clobber.
3552 It's not worth the pain to try to do it correctly. */
3553 regs->array[REGNO (XEXP (x, 0))].may_not_optimize = 1;
3554
3555 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
3556 {
3557 rtx dest = SET_DEST (x);
3558 while (GET_CODE (dest) == SUBREG
3559 || GET_CODE (dest) == ZERO_EXTRACT
3560 || GET_CODE (dest) == SIGN_EXTRACT
3561 || GET_CODE (dest) == STRICT_LOW_PART)
3562 dest = XEXP (dest, 0);
3563 if (REG_P (dest))
3564 {
3565 int i;
3566 int regno = REGNO (dest);
3567 for (i = 0; i < LOOP_REGNO_NREGS (regno, dest); i++)
3568 {
3569 /* If this is the first setting of this reg
3570 in current basic block, and it was set before,
3571 it must be set in two basic blocks, so it cannot
3572 be moved out of the loop. */
3573 if (regs->array[regno].set_in_loop > 0
3574 && last_set[regno] == 0)
3575 regs->array[regno+i].may_not_optimize = 1;
3576 /* If this is not first setting in current basic block,
3577 see if reg was used in between previous one and this.
3578 If so, neither one can be moved. */
3579 if (last_set[regno] != 0
3580 && reg_used_between_p (dest, last_set[regno], insn))
3581 regs->array[regno+i].may_not_optimize = 1;
3582 if (regs->array[regno+i].set_in_loop < 127)
3583 ++regs->array[regno+i].set_in_loop;
3584 last_set[regno+i] = insn;
3585 }
3586 }
3587 }
3588 }
3589 \f
3590 /* Given a loop that is bounded by LOOP->START and LOOP->END and that
3591 is entered at LOOP->SCAN_START, return 1 if the register set in SET
3592 contained in insn INSN is used by any insn that precedes INSN in
3593 cyclic order starting from the loop entry point.
3594
3595 We don't want to use INSN_LUID here because if we restrict INSN to those
3596 that have a valid INSN_LUID, it means we cannot move an invariant out
3597 from an inner loop past two loops. */
3598
3599 static int
3600 loop_reg_used_before_p (const struct loop *loop, rtx set, rtx insn)
3601 {
3602 rtx reg = SET_DEST (set);
3603 rtx p;
3604
3605 /* Scan forward checking for register usage. If we hit INSN, we
3606 are done. Otherwise, if we hit LOOP->END, wrap around to LOOP->START. */
3607 for (p = loop->scan_start; p != insn; p = NEXT_INSN (p))
3608 {
3609 if (INSN_P (p) && reg_overlap_mentioned_p (reg, PATTERN (p)))
3610 return 1;
3611
3612 if (p == loop->end)
3613 p = loop->start;
3614 }
3615
3616 return 0;
3617 }
3618 \f
3619
3620 /* Information we collect about arrays that we might want to prefetch. */
3621 struct prefetch_info
3622 {
3623 struct iv_class *class; /* Class this prefetch is based on. */
3624 struct induction *giv; /* GIV this prefetch is based on. */
3625 rtx base_address; /* Start prefetching from this address plus
3626 index. */
3627 HOST_WIDE_INT index;
3628 HOST_WIDE_INT stride; /* Prefetch stride in bytes in each
3629 iteration. */
3630 unsigned int bytes_accessed; /* Sum of sizes of all accesses to this
3631 prefetch area in one iteration. */
3632 unsigned int total_bytes; /* Total bytes loop will access in this block.
3633 This is set only for loops with known
3634 iteration counts and is 0xffffffff
3635 otherwise. */
3636 int prefetch_in_loop; /* Number of prefetch insns in loop. */
3637 int prefetch_before_loop; /* Number of prefetch insns before loop. */
3638 unsigned int write : 1; /* 1 for read/write prefetches. */
3639 };
3640
3641 /* Data used by check_store function. */
3642 struct check_store_data
3643 {
3644 rtx mem_address;
3645 int mem_write;
3646 };
3647
3648 static void check_store (rtx, rtx, void *);
3649 static void emit_prefetch_instructions (struct loop *);
3650 static int rtx_equal_for_prefetch_p (rtx, rtx);
3651
3652 /* Set mem_write when mem_address is found. Used as callback to
3653 note_stores. */
3654 static void
3655 check_store (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
3656 {
3657 struct check_store_data *d = (struct check_store_data *) data;
3658
3659 if ((GET_CODE (x) == MEM) && rtx_equal_p (d->mem_address, XEXP (x, 0)))
3660 d->mem_write = 1;
3661 }
3662 \f
3663 /* Like rtx_equal_p, but attempts to swap commutative operands. This is
3664 important to get some addresses combined. Later more sophisticated
3665 transformations can be added when necessary.
3666
3667 ??? Same trick with swapping operand is done at several other places.
3668 It can be nice to develop some common way to handle this. */
3669
3670 static int
3671 rtx_equal_for_prefetch_p (rtx x, rtx y)
3672 {
3673 int i;
3674 int j;
3675 enum rtx_code code = GET_CODE (x);
3676 const char *fmt;
3677
3678 if (x == y)
3679 return 1;
3680 if (code != GET_CODE (y))
3681 return 0;
3682
3683 if (COMMUTATIVE_ARITH_P (x))
3684 {
3685 return ((rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 0))
3686 && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 1)))
3687 || (rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 1))
3688 && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 0))));
3689 }
3690
3691 /* Compare the elements. If any pair of corresponding elements fails to
3692 match, return 0 for the whole thing. */
3693
3694 fmt = GET_RTX_FORMAT (code);
3695 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3696 {
3697 switch (fmt[i])
3698 {
3699 case 'w':
3700 if (XWINT (x, i) != XWINT (y, i))
3701 return 0;
3702 break;
3703
3704 case 'i':
3705 if (XINT (x, i) != XINT (y, i))
3706 return 0;
3707 break;
3708
3709 case 'E':
3710 /* Two vectors must have the same length. */
3711 if (XVECLEN (x, i) != XVECLEN (y, i))
3712 return 0;
3713
3714 /* And the corresponding elements must match. */
3715 for (j = 0; j < XVECLEN (x, i); j++)
3716 if (rtx_equal_for_prefetch_p (XVECEXP (x, i, j),
3717 XVECEXP (y, i, j)) == 0)
3718 return 0;
3719 break;
3720
3721 case 'e':
3722 if (rtx_equal_for_prefetch_p (XEXP (x, i), XEXP (y, i)) == 0)
3723 return 0;
3724 break;
3725
3726 case 's':
3727 if (strcmp (XSTR (x, i), XSTR (y, i)))
3728 return 0;
3729 break;
3730
3731 case 'u':
3732 /* These are just backpointers, so they don't matter. */
3733 break;
3734
3735 case '0':
3736 break;
3737
3738 /* It is believed that rtx's at this level will never
3739 contain anything but integers and other rtx's,
3740 except for within LABEL_REFs and SYMBOL_REFs. */
3741 default:
3742 abort ();
3743 }
3744 }
3745 return 1;
3746 }
3747 \f
3748 /* Remove constant addition value from the expression X (when present)
3749 and return it. */
3750
3751 static HOST_WIDE_INT
3752 remove_constant_addition (rtx *x)
3753 {
3754 HOST_WIDE_INT addval = 0;
3755 rtx exp = *x;
3756
3757 /* Avoid clobbering a shared CONST expression. */
3758 if (GET_CODE (exp) == CONST)
3759 {
3760 if (GET_CODE (XEXP (exp, 0)) == PLUS
3761 && GET_CODE (XEXP (XEXP (exp, 0), 0)) == SYMBOL_REF
3762 && GET_CODE (XEXP (XEXP (exp, 0), 1)) == CONST_INT)
3763 {
3764 *x = XEXP (XEXP (exp, 0), 0);
3765 return INTVAL (XEXP (XEXP (exp, 0), 1));
3766 }
3767 return 0;
3768 }
3769
3770 if (GET_CODE (exp) == CONST_INT)
3771 {
3772 addval = INTVAL (exp);
3773 *x = const0_rtx;
3774 }
3775
3776 /* For plus expression recurse on ourself. */
3777 else if (GET_CODE (exp) == PLUS)
3778 {
3779 addval += remove_constant_addition (&XEXP (exp, 0));
3780 addval += remove_constant_addition (&XEXP (exp, 1));
3781
3782 /* In case our parameter was constant, remove extra zero from the
3783 expression. */
3784 if (XEXP (exp, 0) == const0_rtx)
3785 *x = XEXP (exp, 1);
3786 else if (XEXP (exp, 1) == const0_rtx)
3787 *x = XEXP (exp, 0);
3788 }
3789
3790 return addval;
3791 }
3792
3793 /* Attempt to identify accesses to arrays that are most likely to cause cache
3794 misses, and emit prefetch instructions a few prefetch blocks forward.
3795
3796 To detect the arrays we use the GIV information that was collected by the
3797 strength reduction pass.
3798
3799 The prefetch instructions are generated after the GIV information is done
3800 and before the strength reduction process. The new GIVs are injected into
3801 the strength reduction tables, so the prefetch addresses are optimized as
3802 well.
3803
3804 GIVs are split into base address, stride, and constant addition values.
3805 GIVs with the same address, stride and close addition values are combined
3806 into a single prefetch. Also writes to GIVs are detected, so that prefetch
3807 for write instructions can be used for the block we write to, on machines
3808 that support write prefetches.
3809
3810 Several heuristics are used to determine when to prefetch. They are
3811 controlled by defined symbols that can be overridden for each target. */
3812
3813 static void
3814 emit_prefetch_instructions (struct loop *loop)
3815 {
3816 int num_prefetches = 0;
3817 int num_real_prefetches = 0;
3818 int num_real_write_prefetches = 0;
3819 int num_prefetches_before = 0;
3820 int num_write_prefetches_before = 0;
3821 int ahead = 0;
3822 int i;
3823 struct iv_class *bl;
3824 struct induction *iv;
3825 struct prefetch_info info[MAX_PREFETCHES];
3826 struct loop_ivs *ivs = LOOP_IVS (loop);
3827
3828 if (!HAVE_prefetch)
3829 return;
3830
3831 /* Consider only loops w/o calls. When a call is done, the loop is probably
3832 slow enough to read the memory. */
3833 if (PREFETCH_NO_CALL && LOOP_INFO (loop)->has_call)
3834 {
3835 if (loop_dump_stream)
3836 fprintf (loop_dump_stream, "Prefetch: ignoring loop: has call.\n");
3837
3838 return;
3839 }
3840
3841 /* Don't prefetch in loops known to have few iterations. */
3842 if (PREFETCH_NO_LOW_LOOPCNT
3843 && LOOP_INFO (loop)->n_iterations
3844 && LOOP_INFO (loop)->n_iterations <= PREFETCH_LOW_LOOPCNT)
3845 {
3846 if (loop_dump_stream)
3847 fprintf (loop_dump_stream,
3848 "Prefetch: ignoring loop: not enough iterations.\n");
3849 return;
3850 }
3851
3852 /* Search all induction variables and pick those interesting for the prefetch
3853 machinery. */
3854 for (bl = ivs->list; bl; bl = bl->next)
3855 {
3856 struct induction *biv = bl->biv, *biv1;
3857 int basestride = 0;
3858
3859 biv1 = biv;
3860
3861 /* Expect all BIVs to be executed in each iteration. This makes our
3862 analysis more conservative. */
3863 while (biv1)
3864 {
3865 /* Discard non-constant additions that we can't handle well yet, and
3866 BIVs that are executed multiple times; such BIVs ought to be
3867 handled in the nested loop. We accept not_every_iteration BIVs,
3868 since these only result in larger strides and make our
3869 heuristics more conservative. */
3870 if (GET_CODE (biv->add_val) != CONST_INT)
3871 {
3872 if (loop_dump_stream)
3873 {
3874 fprintf (loop_dump_stream,
3875 "Prefetch: ignoring biv %d: non-constant addition at insn %d:",
3876 REGNO (biv->src_reg), INSN_UID (biv->insn));
3877 print_rtl (loop_dump_stream, biv->add_val);
3878 fprintf (loop_dump_stream, "\n");
3879 }
3880 break;
3881 }
3882
3883 if (biv->maybe_multiple)
3884 {
3885 if (loop_dump_stream)
3886 {
3887 fprintf (loop_dump_stream,
3888 "Prefetch: ignoring biv %d: maybe_multiple at insn %i:",
3889 REGNO (biv->src_reg), INSN_UID (biv->insn));
3890 print_rtl (loop_dump_stream, biv->add_val);
3891 fprintf (loop_dump_stream, "\n");
3892 }
3893 break;
3894 }
3895
3896 basestride += INTVAL (biv1->add_val);
3897 biv1 = biv1->next_iv;
3898 }
3899
3900 if (biv1 || !basestride)
3901 continue;
3902
3903 for (iv = bl->giv; iv; iv = iv->next_iv)
3904 {
3905 rtx address;
3906 rtx temp;
3907 HOST_WIDE_INT index = 0;
3908 int add = 1;
3909 HOST_WIDE_INT stride = 0;
3910 int stride_sign = 1;
3911 struct check_store_data d;
3912 const char *ignore_reason = NULL;
3913 int size = GET_MODE_SIZE (GET_MODE (iv));
3914
3915 /* See whether an induction variable is interesting to us and if
3916 not, report the reason. */
3917 if (iv->giv_type != DEST_ADDR)
3918 ignore_reason = "giv is not a destination address";
3919
3920 /* We are interested only in constant stride memory references
3921 in order to be able to compute density easily. */
3922 else if (GET_CODE (iv->mult_val) != CONST_INT)
3923 ignore_reason = "stride is not constant";
3924
3925 else
3926 {
3927 stride = INTVAL (iv->mult_val) * basestride;
3928 if (stride < 0)
3929 {
3930 stride = -stride;
3931 stride_sign = -1;
3932 }
3933
3934 /* On some targets, reversed order prefetches are not
3935 worthwhile. */
3936 if (PREFETCH_NO_REVERSE_ORDER && stride_sign < 0)
3937 ignore_reason = "reversed order stride";
3938
3939 /* Prefetch of accesses with an extreme stride might not be
3940 worthwhile, either. */
3941 else if (PREFETCH_NO_EXTREME_STRIDE
3942 && stride > PREFETCH_EXTREME_STRIDE)
3943 ignore_reason = "extreme stride";
3944
3945 /* Ignore GIVs with varying add values; we can't predict the
3946 value for the next iteration. */
3947 else if (!loop_invariant_p (loop, iv->add_val))
3948 ignore_reason = "giv has varying add value";
3949
3950 /* Ignore GIVs in the nested loops; they ought to have been
3951 handled already. */
3952 else if (iv->maybe_multiple)
3953 ignore_reason = "giv is in nested loop";
3954 }
3955
3956 if (ignore_reason != NULL)
3957 {
3958 if (loop_dump_stream)
3959 fprintf (loop_dump_stream,
3960 "Prefetch: ignoring giv at %d: %s.\n",
3961 INSN_UID (iv->insn), ignore_reason);
3962 continue;
3963 }
3964
3965 /* Determine the pointer to the basic array we are examining. It is
3966 the sum of the BIV's initial value and the GIV's add_val. */
3967 address = copy_rtx (iv->add_val);
3968 temp = copy_rtx (bl->initial_value);
3969
3970 address = simplify_gen_binary (PLUS, Pmode, temp, address);
3971 index = remove_constant_addition (&address);
3972
3973 d.mem_write = 0;
3974 d.mem_address = *iv->location;
3975
3976 /* When the GIV is not always executed, we might be better off by
3977 not dirtying the cache pages. */
3978 if (PREFETCH_CONDITIONAL || iv->always_executed)
3979 note_stores (PATTERN (iv->insn), check_store, &d);
3980 else
3981 {
3982 if (loop_dump_stream)
3983 fprintf (loop_dump_stream, "Prefetch: Ignoring giv at %d: %s\n",
3984 INSN_UID (iv->insn), "in conditional code.");
3985 continue;
3986 }
3987
3988 /* Attempt to find another prefetch to the same array and see if we
3989 can merge this one. */
3990 for (i = 0; i < num_prefetches; i++)
3991 if (rtx_equal_for_prefetch_p (address, info[i].base_address)
3992 && stride == info[i].stride)
3993 {
3994 /* In case both access same array (same location
3995 just with small difference in constant indexes), merge
3996 the prefetches. Just do the later and the earlier will
3997 get prefetched from previous iteration.
3998 The artificial threshold should not be too small,
3999 but also not bigger than small portion of memory usually
4000 traversed by single loop. */
4001 if (index >= info[i].index
4002 && index - info[i].index < PREFETCH_EXTREME_DIFFERENCE)
4003 {
4004 info[i].write |= d.mem_write;
4005 info[i].bytes_accessed += size;
4006 info[i].index = index;
4007 info[i].giv = iv;
4008 info[i].class = bl;
4009 info[num_prefetches].base_address = address;
4010 add = 0;
4011 break;
4012 }
4013
4014 if (index < info[i].index
4015 && info[i].index - index < PREFETCH_EXTREME_DIFFERENCE)
4016 {
4017 info[i].write |= d.mem_write;
4018 info[i].bytes_accessed += size;
4019 add = 0;
4020 break;
4021 }
4022 }
4023
4024 /* Merging failed. */
4025 if (add)
4026 {
4027 info[num_prefetches].giv = iv;
4028 info[num_prefetches].class = bl;
4029 info[num_prefetches].index = index;
4030 info[num_prefetches].stride = stride;
4031 info[num_prefetches].base_address = address;
4032 info[num_prefetches].write = d.mem_write;
4033 info[num_prefetches].bytes_accessed = size;
4034 num_prefetches++;
4035 if (num_prefetches >= MAX_PREFETCHES)
4036 {
4037 if (loop_dump_stream)
4038 fprintf (loop_dump_stream,
4039 "Maximal number of prefetches exceeded.\n");
4040 return;
4041 }
4042 }
4043 }
4044 }
4045
4046 for (i = 0; i < num_prefetches; i++)
4047 {
4048 int density;
4049
4050 /* Attempt to calculate the total number of bytes fetched by all
4051 iterations of the loop. Avoid overflow. */
4052 if (LOOP_INFO (loop)->n_iterations
4053 && ((unsigned HOST_WIDE_INT) (0xffffffff / info[i].stride)
4054 >= LOOP_INFO (loop)->n_iterations))
4055 info[i].total_bytes = info[i].stride * LOOP_INFO (loop)->n_iterations;
4056 else
4057 info[i].total_bytes = 0xffffffff;
4058
4059 density = info[i].bytes_accessed * 100 / info[i].stride;
4060
4061 /* Prefetch might be worthwhile only when the loads/stores are dense. */
4062 if (PREFETCH_ONLY_DENSE_MEM)
4063 if (density * 256 > PREFETCH_DENSE_MEM * 100
4064 && (info[i].total_bytes / PREFETCH_BLOCK
4065 >= PREFETCH_BLOCKS_BEFORE_LOOP_MIN))
4066 {
4067 info[i].prefetch_before_loop = 1;
4068 info[i].prefetch_in_loop
4069 = (info[i].total_bytes / PREFETCH_BLOCK
4070 > PREFETCH_BLOCKS_BEFORE_LOOP_MAX);
4071 }
4072 else
4073 {
4074 info[i].prefetch_in_loop = 0, info[i].prefetch_before_loop = 0;
4075 if (loop_dump_stream)
4076 fprintf (loop_dump_stream,
4077 "Prefetch: ignoring giv at %d: %d%% density is too low.\n",
4078 INSN_UID (info[i].giv->insn), density);
4079 }
4080 else
4081 info[i].prefetch_in_loop = 1, info[i].prefetch_before_loop = 1;
4082
4083 /* Find how many prefetch instructions we'll use within the loop. */
4084 if (info[i].prefetch_in_loop != 0)
4085 {
4086 info[i].prefetch_in_loop = ((info[i].stride + PREFETCH_BLOCK - 1)
4087 / PREFETCH_BLOCK);
4088 num_real_prefetches += info[i].prefetch_in_loop;
4089 if (info[i].write)
4090 num_real_write_prefetches += info[i].prefetch_in_loop;
4091 }
4092 }
4093
4094 /* Determine how many iterations ahead to prefetch within the loop, based
4095 on how many prefetches we currently expect to do within the loop. */
4096 if (num_real_prefetches != 0)
4097 {
4098 if ((ahead = SIMULTANEOUS_PREFETCHES / num_real_prefetches) == 0)
4099 {
4100 if (loop_dump_stream)
4101 fprintf (loop_dump_stream,
4102 "Prefetch: ignoring prefetches within loop: ahead is zero; %d < %d\n",
4103 SIMULTANEOUS_PREFETCHES, num_real_prefetches);
4104 num_real_prefetches = 0, num_real_write_prefetches = 0;
4105 }
4106 }
4107 /* We'll also use AHEAD to determine how many prefetch instructions to
4108 emit before a loop, so don't leave it zero. */
4109 if (ahead == 0)
4110 ahead = PREFETCH_BLOCKS_BEFORE_LOOP_MAX;
4111
4112 for (i = 0; i < num_prefetches; i++)
4113 {
4114 /* Update if we've decided not to prefetch anything within the loop. */
4115 if (num_real_prefetches == 0)
4116 info[i].prefetch_in_loop = 0;
4117
4118 /* Find how many prefetch instructions we'll use before the loop. */
4119 if (info[i].prefetch_before_loop != 0)
4120 {
4121 int n = info[i].total_bytes / PREFETCH_BLOCK;
4122 if (n > ahead)
4123 n = ahead;
4124 info[i].prefetch_before_loop = n;
4125 num_prefetches_before += n;
4126 if (info[i].write)
4127 num_write_prefetches_before += n;
4128 }
4129
4130 if (loop_dump_stream)
4131 {
4132 if (info[i].prefetch_in_loop == 0
4133 && info[i].prefetch_before_loop == 0)
4134 continue;
4135 fprintf (loop_dump_stream, "Prefetch insn: %d",
4136 INSN_UID (info[i].giv->insn));
4137 fprintf (loop_dump_stream,
4138 "; in loop: %d; before: %d; %s\n",
4139 info[i].prefetch_in_loop,
4140 info[i].prefetch_before_loop,
4141 info[i].write ? "read/write" : "read only");
4142 fprintf (loop_dump_stream,
4143 " density: %d%%; bytes_accessed: %u; total_bytes: %u\n",
4144 (int) (info[i].bytes_accessed * 100 / info[i].stride),
4145 info[i].bytes_accessed, info[i].total_bytes);
4146 fprintf (loop_dump_stream, " index: " HOST_WIDE_INT_PRINT_DEC
4147 "; stride: " HOST_WIDE_INT_PRINT_DEC "; address: ",
4148 info[i].index, info[i].stride);
4149 print_rtl (loop_dump_stream, info[i].base_address);
4150 fprintf (loop_dump_stream, "\n");
4151 }
4152 }
4153
4154 if (num_real_prefetches + num_prefetches_before > 0)
4155 {
4156 /* Record that this loop uses prefetch instructions. */
4157 LOOP_INFO (loop)->has_prefetch = 1;
4158
4159 if (loop_dump_stream)
4160 {
4161 fprintf (loop_dump_stream, "Real prefetches needed within loop: %d (write: %d)\n",
4162 num_real_prefetches, num_real_write_prefetches);
4163 fprintf (loop_dump_stream, "Real prefetches needed before loop: %d (write: %d)\n",
4164 num_prefetches_before, num_write_prefetches_before);
4165 }
4166 }
4167
4168 for (i = 0; i < num_prefetches; i++)
4169 {
4170 int y;
4171
4172 for (y = 0; y < info[i].prefetch_in_loop; y++)
4173 {
4174 rtx loc = copy_rtx (*info[i].giv->location);
4175 rtx insn;
4176 int bytes_ahead = PREFETCH_BLOCK * (ahead + y);
4177 rtx before_insn = info[i].giv->insn;
4178 rtx prev_insn = PREV_INSN (info[i].giv->insn);
4179 rtx seq;
4180
4181 /* We can save some effort by offsetting the address on
4182 architectures with offsettable memory references. */
4183 if (offsettable_address_p (0, VOIDmode, loc))
4184 loc = plus_constant (loc, bytes_ahead);
4185 else
4186 {
4187 rtx reg = gen_reg_rtx (Pmode);
4188 loop_iv_add_mult_emit_before (loop, loc, const1_rtx,
4189 GEN_INT (bytes_ahead), reg,
4190 0, before_insn);
4191 loc = reg;
4192 }
4193
4194 start_sequence ();
4195 /* Make sure the address operand is valid for prefetch. */
4196 if (! (*insn_data[(int)CODE_FOR_prefetch].operand[0].predicate)
4197 (loc, insn_data[(int)CODE_FOR_prefetch].operand[0].mode))
4198 loc = force_reg (Pmode, loc);
4199 emit_insn (gen_prefetch (loc, GEN_INT (info[i].write),
4200 GEN_INT (3)));
4201 seq = get_insns ();
4202 end_sequence ();
4203 emit_insn_before (seq, before_insn);
4204
4205 /* Check all insns emitted and record the new GIV
4206 information. */
4207 insn = NEXT_INSN (prev_insn);
4208 while (insn != before_insn)
4209 {
4210 insn = check_insn_for_givs (loop, insn,
4211 info[i].giv->always_executed,
4212 info[i].giv->maybe_multiple);
4213 insn = NEXT_INSN (insn);
4214 }
4215 }
4216
4217 if (PREFETCH_BEFORE_LOOP)
4218 {
4219 /* Emit insns before the loop to fetch the first cache lines or,
4220 if we're not prefetching within the loop, everything we expect
4221 to need. */
4222 for (y = 0; y < info[i].prefetch_before_loop; y++)
4223 {
4224 rtx reg = gen_reg_rtx (Pmode);
4225 rtx loop_start = loop->start;
4226 rtx init_val = info[i].class->initial_value;
4227 rtx add_val = simplify_gen_binary (PLUS, Pmode,
4228 info[i].giv->add_val,
4229 GEN_INT (y * PREFETCH_BLOCK));
4230
4231 /* Functions called by LOOP_IV_ADD_EMIT_BEFORE expect a
4232 non-constant INIT_VAL to have the same mode as REG, which
4233 in this case we know to be Pmode. */
4234 if (GET_MODE (init_val) != Pmode && !CONSTANT_P (init_val))
4235 {
4236 rtx seq;
4237
4238 start_sequence ();
4239 init_val = convert_to_mode (Pmode, init_val, 0);
4240 seq = get_insns ();
4241 end_sequence ();
4242 loop_insn_emit_before (loop, 0, loop_start, seq);
4243 }
4244 loop_iv_add_mult_emit_before (loop, init_val,
4245 info[i].giv->mult_val,
4246 add_val, reg, 0, loop_start);
4247 emit_insn_before (gen_prefetch (reg, GEN_INT (info[i].write),
4248 GEN_INT (3)),
4249 loop_start);
4250 }
4251 }
4252 }
4253
4254 return;
4255 }
4256 \f
4257 /* Communication with routines called via `note_stores'. */
4258
4259 static rtx note_insn;
4260
4261 /* Dummy register to have nonzero DEST_REG for DEST_ADDR type givs. */
4262
4263 static rtx addr_placeholder;
4264
4265 /* ??? Unfinished optimizations, and possible future optimizations,
4266 for the strength reduction code. */
4267
4268 /* ??? The interaction of biv elimination, and recognition of 'constant'
4269 bivs, may cause problems. */
4270
4271 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
4272 performance problems.
4273
4274 Perhaps don't eliminate things that can be combined with an addressing
4275 mode. Find all givs that have the same biv, mult_val, and add_val;
4276 then for each giv, check to see if its only use dies in a following
4277 memory address. If so, generate a new memory address and check to see
4278 if it is valid. If it is valid, then store the modified memory address,
4279 otherwise, mark the giv as not done so that it will get its own iv. */
4280
4281 /* ??? Could try to optimize branches when it is known that a biv is always
4282 positive. */
4283
4284 /* ??? When replace a biv in a compare insn, we should replace with closest
4285 giv so that an optimized branch can still be recognized by the combiner,
4286 e.g. the VAX acb insn. */
4287
4288 /* ??? Many of the checks involving uid_luid could be simplified if regscan
4289 was rerun in loop_optimize whenever a register was added or moved.
4290 Also, some of the optimizations could be a little less conservative. */
4291 \f
4292 /* Scan the loop body and call FNCALL for each insn. In the addition to the
4293 LOOP and INSN parameters pass MAYBE_MULTIPLE and NOT_EVERY_ITERATION to the
4294 callback.
4295
4296 NOT_EVERY_ITERATION is 1 if current insn is not known to be executed at
4297 least once for every loop iteration except for the last one.
4298
4299 MAYBE_MULTIPLE is 1 if current insn may be executed more than once for every
4300 loop iteration.
4301 */
4302 void
4303 for_each_insn_in_loop (struct loop *loop, loop_insn_callback fncall)
4304 {
4305 int not_every_iteration = 0;
4306 int maybe_multiple = 0;
4307 int past_loop_latch = 0;
4308 int loop_depth = 0;
4309 rtx p;
4310
4311 /* If loop_scan_start points to the loop exit test, we have to be wary of
4312 subversive use of gotos inside expression statements. */
4313 if (prev_nonnote_insn (loop->scan_start) != prev_nonnote_insn (loop->start))
4314 maybe_multiple = back_branch_in_range_p (loop, loop->scan_start);
4315
4316 /* Scan through loop and update NOT_EVERY_ITERATION and MAYBE_MULTIPLE. */
4317 for (p = next_insn_in_loop (loop, loop->scan_start);
4318 p != NULL_RTX;
4319 p = next_insn_in_loop (loop, p))
4320 {
4321 p = fncall (loop, p, not_every_iteration, maybe_multiple);
4322
4323 /* Past CODE_LABEL, we get to insns that may be executed multiple
4324 times. The only way we can be sure that they can't is if every
4325 jump insn between here and the end of the loop either
4326 returns, exits the loop, is a jump to a location that is still
4327 behind the label, or is a jump to the loop start. */
4328
4329 if (GET_CODE (p) == CODE_LABEL)
4330 {
4331 rtx insn = p;
4332
4333 maybe_multiple = 0;
4334
4335 while (1)
4336 {
4337 insn = NEXT_INSN (insn);
4338 if (insn == loop->scan_start)
4339 break;
4340 if (insn == loop->end)
4341 {
4342 if (loop->top != 0)
4343 insn = loop->top;
4344 else
4345 break;
4346 if (insn == loop->scan_start)
4347 break;
4348 }
4349
4350 if (GET_CODE (insn) == JUMP_INSN
4351 && GET_CODE (PATTERN (insn)) != RETURN
4352 && (!any_condjump_p (insn)
4353 || (JUMP_LABEL (insn) != 0
4354 && JUMP_LABEL (insn) != loop->scan_start
4355 && !loop_insn_first_p (p, JUMP_LABEL (insn)))))
4356 {
4357 maybe_multiple = 1;
4358 break;
4359 }
4360 }
4361 }
4362
4363 /* Past a jump, we get to insns for which we can't count
4364 on whether they will be executed during each iteration. */
4365 /* This code appears twice in strength_reduce. There is also similar
4366 code in scan_loop. */
4367 if (GET_CODE (p) == JUMP_INSN
4368 /* If we enter the loop in the middle, and scan around to the
4369 beginning, don't set not_every_iteration for that.
4370 This can be any kind of jump, since we want to know if insns
4371 will be executed if the loop is executed. */
4372 && !(JUMP_LABEL (p) == loop->top
4373 && ((NEXT_INSN (NEXT_INSN (p)) == loop->end
4374 && any_uncondjump_p (p))
4375 || (NEXT_INSN (p) == loop->end && any_condjump_p (p)))))
4376 {
4377 rtx label = 0;
4378
4379 /* If this is a jump outside the loop, then it also doesn't
4380 matter. Check to see if the target of this branch is on the
4381 loop->exits_labels list. */
4382
4383 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
4384 if (XEXP (label, 0) == JUMP_LABEL (p))
4385 break;
4386
4387 if (!label)
4388 not_every_iteration = 1;
4389 }
4390
4391 else if (GET_CODE (p) == NOTE)
4392 {
4393 /* At the virtual top of a converted loop, insns are again known to
4394 be executed each iteration: logically, the loop begins here
4395 even though the exit code has been duplicated.
4396
4397 Insns are also again known to be executed each iteration at
4398 the LOOP_CONT note. */
4399 if ((NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP
4400 || NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_CONT)
4401 && loop_depth == 0)
4402 not_every_iteration = 0;
4403 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
4404 loop_depth++;
4405 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
4406 loop_depth--;
4407 }
4408
4409 /* Note if we pass a loop latch. If we do, then we can not clear
4410 NOT_EVERY_ITERATION below when we pass the last CODE_LABEL in
4411 a loop since a jump before the last CODE_LABEL may have started
4412 a new loop iteration.
4413
4414 Note that LOOP_TOP is only set for rotated loops and we need
4415 this check for all loops, so compare against the CODE_LABEL
4416 which immediately follows LOOP_START. */
4417 if (GET_CODE (p) == JUMP_INSN
4418 && JUMP_LABEL (p) == NEXT_INSN (loop->start))
4419 past_loop_latch = 1;
4420
4421 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
4422 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
4423 or not an insn is known to be executed each iteration of the
4424 loop, whether or not any iterations are known to occur.
4425
4426 Therefore, if we have just passed a label and have no more labels
4427 between here and the test insn of the loop, and we have not passed
4428 a jump to the top of the loop, then we know these insns will be
4429 executed each iteration. */
4430
4431 if (not_every_iteration
4432 && !past_loop_latch
4433 && GET_CODE (p) == CODE_LABEL
4434 && no_labels_between_p (p, loop->end)
4435 && loop_insn_first_p (p, loop->cont))
4436 not_every_iteration = 0;
4437 }
4438 }
4439 \f
4440 static void
4441 loop_bivs_find (struct loop *loop)
4442 {
4443 struct loop_regs *regs = LOOP_REGS (loop);
4444 struct loop_ivs *ivs = LOOP_IVS (loop);
4445 /* Temporary list pointers for traversing ivs->list. */
4446 struct iv_class *bl, **backbl;
4447
4448 ivs->list = 0;
4449
4450 for_each_insn_in_loop (loop, check_insn_for_bivs);
4451
4452 /* Scan ivs->list to remove all regs that proved not to be bivs.
4453 Make a sanity check against regs->n_times_set. */
4454 for (backbl = &ivs->list, bl = *backbl; bl; bl = bl->next)
4455 {
4456 if (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
4457 /* Above happens if register modified by subreg, etc. */
4458 /* Make sure it is not recognized as a basic induction var: */
4459 || regs->array[bl->regno].n_times_set != bl->biv_count
4460 /* If never incremented, it is invariant that we decided not to
4461 move. So leave it alone. */
4462 || ! bl->incremented)
4463 {
4464 if (loop_dump_stream)
4465 fprintf (loop_dump_stream, "Biv %d: discarded, %s\n",
4466 bl->regno,
4467 (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
4468 ? "not induction variable"
4469 : (! bl->incremented ? "never incremented"
4470 : "count error")));
4471
4472 REG_IV_TYPE (ivs, bl->regno) = NOT_BASIC_INDUCT;
4473 *backbl = bl->next;
4474 }
4475 else
4476 {
4477 backbl = &bl->next;
4478
4479 if (loop_dump_stream)
4480 fprintf (loop_dump_stream, "Biv %d: verified\n", bl->regno);
4481 }
4482 }
4483 }
4484
4485
4486 /* Determine how BIVS are initialized by looking through pre-header
4487 extended basic block. */
4488 static void
4489 loop_bivs_init_find (struct loop *loop)
4490 {
4491 struct loop_ivs *ivs = LOOP_IVS (loop);
4492 /* Temporary list pointers for traversing ivs->list. */
4493 struct iv_class *bl;
4494 int call_seen;
4495 rtx p;
4496
4497 /* Find initial value for each biv by searching backwards from loop_start,
4498 halting at first label. Also record any test condition. */
4499
4500 call_seen = 0;
4501 for (p = loop->start; p && GET_CODE (p) != CODE_LABEL; p = PREV_INSN (p))
4502 {
4503 rtx test;
4504
4505 note_insn = p;
4506
4507 if (GET_CODE (p) == CALL_INSN)
4508 call_seen = 1;
4509
4510 if (INSN_P (p))
4511 note_stores (PATTERN (p), record_initial, ivs);
4512
4513 /* Record any test of a biv that branches around the loop if no store
4514 between it and the start of loop. We only care about tests with
4515 constants and registers and only certain of those. */
4516 if (GET_CODE (p) == JUMP_INSN
4517 && JUMP_LABEL (p) != 0
4518 && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop->end)
4519 && (test = get_condition_for_loop (loop, p)) != 0
4520 && REG_P (XEXP (test, 0))
4521 && REGNO (XEXP (test, 0)) < max_reg_before_loop
4522 && (bl = REG_IV_CLASS (ivs, REGNO (XEXP (test, 0)))) != 0
4523 && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop->start)
4524 && bl->init_insn == 0)
4525 {
4526 /* If an NE test, we have an initial value! */
4527 if (GET_CODE (test) == NE)
4528 {
4529 bl->init_insn = p;
4530 bl->init_set = gen_rtx_SET (VOIDmode,
4531 XEXP (test, 0), XEXP (test, 1));
4532 }
4533 else
4534 bl->initial_test = test;
4535 }
4536 }
4537 }
4538
4539
4540 /* Look at the each biv and see if we can say anything better about its
4541 initial value from any initializing insns set up above. (This is done
4542 in two passes to avoid missing SETs in a PARALLEL.) */
4543 static void
4544 loop_bivs_check (struct loop *loop)
4545 {
4546 struct loop_ivs *ivs = LOOP_IVS (loop);
4547 /* Temporary list pointers for traversing ivs->list. */
4548 struct iv_class *bl;
4549 struct iv_class **backbl;
4550
4551 for (backbl = &ivs->list; (bl = *backbl); backbl = &bl->next)
4552 {
4553 rtx src;
4554 rtx note;
4555
4556 if (! bl->init_insn)
4557 continue;
4558
4559 /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value
4560 is a constant, use the value of that. */
4561 if (((note = find_reg_note (bl->init_insn, REG_EQUAL, 0)) != NULL
4562 && CONSTANT_P (XEXP (note, 0)))
4563 || ((note = find_reg_note (bl->init_insn, REG_EQUIV, 0)) != NULL
4564 && CONSTANT_P (XEXP (note, 0))))
4565 src = XEXP (note, 0);
4566 else
4567 src = SET_SRC (bl->init_set);
4568
4569 if (loop_dump_stream)
4570 fprintf (loop_dump_stream,
4571 "Biv %d: initialized at insn %d: initial value ",
4572 bl->regno, INSN_UID (bl->init_insn));
4573
4574 if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno])
4575 || GET_MODE (src) == VOIDmode)
4576 && valid_initial_value_p (src, bl->init_insn,
4577 LOOP_INFO (loop)->pre_header_has_call,
4578 loop->start))
4579 {
4580 bl->initial_value = src;
4581
4582 if (loop_dump_stream)
4583 {
4584 print_simple_rtl (loop_dump_stream, src);
4585 fputc ('\n', loop_dump_stream);
4586 }
4587 }
4588 /* If we can't make it a giv,
4589 let biv keep initial value of "itself". */
4590 else if (loop_dump_stream)
4591 fprintf (loop_dump_stream, "is complex\n");
4592 }
4593 }
4594
4595
4596 /* Search the loop for general induction variables. */
4597
4598 static void
4599 loop_givs_find (struct loop* loop)
4600 {
4601 for_each_insn_in_loop (loop, check_insn_for_givs);
4602 }
4603
4604
4605 /* For each giv for which we still don't know whether or not it is
4606 replaceable, check to see if it is replaceable because its final value
4607 can be calculated. */
4608
4609 static void
4610 loop_givs_check (struct loop *loop)
4611 {
4612 struct loop_ivs *ivs = LOOP_IVS (loop);
4613 struct iv_class *bl;
4614
4615 for (bl = ivs->list; bl; bl = bl->next)
4616 {
4617 struct induction *v;
4618
4619 for (v = bl->giv; v; v = v->next_iv)
4620 if (! v->replaceable && ! v->not_replaceable)
4621 check_final_value (loop, v);
4622 }
4623 }
4624
4625
4626 /* Return nonzero if it is possible to eliminate the biv BL provided
4627 all givs are reduced. This is possible if either the reg is not
4628 used outside the loop, or we can compute what its final value will
4629 be. */
4630
4631 static int
4632 loop_biv_eliminable_p (struct loop *loop, struct iv_class *bl,
4633 int threshold, int insn_count)
4634 {
4635 /* For architectures with a decrement_and_branch_until_zero insn,
4636 don't do this if we put a REG_NONNEG note on the endtest for this
4637 biv. */
4638
4639 #ifdef HAVE_decrement_and_branch_until_zero
4640 if (bl->nonneg)
4641 {
4642 if (loop_dump_stream)
4643 fprintf (loop_dump_stream,
4644 "Cannot eliminate nonneg biv %d.\n", bl->regno);
4645 return 0;
4646 }
4647 #endif
4648
4649 /* Check that biv is used outside loop or if it has a final value.
4650 Compare against bl->init_insn rather than loop->start. We aren't
4651 concerned with any uses of the biv between init_insn and
4652 loop->start since these won't be affected by the value of the biv
4653 elsewhere in the function, so long as init_insn doesn't use the
4654 biv itself. */
4655
4656 if ((REGNO_LAST_LUID (bl->regno) < INSN_LUID (loop->end)
4657 && bl->init_insn
4658 && INSN_UID (bl->init_insn) < max_uid_for_loop
4659 && REGNO_FIRST_LUID (bl->regno) >= INSN_LUID (bl->init_insn)
4660 && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
4661 || (bl->final_value = final_biv_value (loop, bl)))
4662 return maybe_eliminate_biv (loop, bl, 0, threshold, insn_count);
4663
4664 if (loop_dump_stream)
4665 {
4666 fprintf (loop_dump_stream,
4667 "Cannot eliminate biv %d.\n",
4668 bl->regno);
4669 fprintf (loop_dump_stream,
4670 "First use: insn %d, last use: insn %d.\n",
4671 REGNO_FIRST_UID (bl->regno),
4672 REGNO_LAST_UID (bl->regno));
4673 }
4674 return 0;
4675 }
4676
4677
4678 /* Reduce each giv of BL that we have decided to reduce. */
4679
4680 static void
4681 loop_givs_reduce (struct loop *loop, struct iv_class *bl)
4682 {
4683 struct induction *v;
4684
4685 for (v = bl->giv; v; v = v->next_iv)
4686 {
4687 struct induction *tv;
4688 if (! v->ignore && v->same == 0)
4689 {
4690 int auto_inc_opt = 0;
4691
4692 /* If the code for derived givs immediately below has already
4693 allocated a new_reg, we must keep it. */
4694 if (! v->new_reg)
4695 v->new_reg = gen_reg_rtx (v->mode);
4696
4697 #ifdef AUTO_INC_DEC
4698 /* If the target has auto-increment addressing modes, and
4699 this is an address giv, then try to put the increment
4700 immediately after its use, so that flow can create an
4701 auto-increment addressing mode. */
4702 /* Don't do this for loops entered at the bottom, to avoid
4703 this invalid transformation:
4704 jmp L; -> jmp L;
4705 TOP: TOP:
4706 use giv use giv
4707 L: inc giv
4708 inc biv L:
4709 test biv test giv
4710 cbr TOP cbr TOP
4711 */
4712 if (v->giv_type == DEST_ADDR && bl->biv_count == 1
4713 && bl->biv->always_executed && ! bl->biv->maybe_multiple
4714 /* We don't handle reversed biv's because bl->biv->insn
4715 does not have a valid INSN_LUID. */
4716 && ! bl->reversed
4717 && v->always_executed && ! v->maybe_multiple
4718 && INSN_UID (v->insn) < max_uid_for_loop
4719 && !loop->top)
4720 {
4721 /* If other giv's have been combined with this one, then
4722 this will work only if all uses of the other giv's occur
4723 before this giv's insn. This is difficult to check.
4724
4725 We simplify this by looking for the common case where
4726 there is one DEST_REG giv, and this giv's insn is the
4727 last use of the dest_reg of that DEST_REG giv. If the
4728 increment occurs after the address giv, then we can
4729 perform the optimization. (Otherwise, the increment
4730 would have to go before other_giv, and we would not be
4731 able to combine it with the address giv to get an
4732 auto-inc address.) */
4733 if (v->combined_with)
4734 {
4735 struct induction *other_giv = 0;
4736
4737 for (tv = bl->giv; tv; tv = tv->next_iv)
4738 if (tv->same == v)
4739 {
4740 if (other_giv)
4741 break;
4742 else
4743 other_giv = tv;
4744 }
4745 if (! tv && other_giv
4746 && REGNO (other_giv->dest_reg) < max_reg_before_loop
4747 && (REGNO_LAST_UID (REGNO (other_giv->dest_reg))
4748 == INSN_UID (v->insn))
4749 && INSN_LUID (v->insn) < INSN_LUID (bl->biv->insn))
4750 auto_inc_opt = 1;
4751 }
4752 /* Check for case where increment is before the address
4753 giv. Do this test in "loop order". */
4754 else if ((INSN_LUID (v->insn) > INSN_LUID (bl->biv->insn)
4755 && (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
4756 || (INSN_LUID (bl->biv->insn)
4757 > INSN_LUID (loop->scan_start))))
4758 || (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
4759 && (INSN_LUID (loop->scan_start)
4760 < INSN_LUID (bl->biv->insn))))
4761 auto_inc_opt = -1;
4762 else
4763 auto_inc_opt = 1;
4764
4765 #ifdef HAVE_cc0
4766 {
4767 rtx prev;
4768
4769 /* We can't put an insn immediately after one setting
4770 cc0, or immediately before one using cc0. */
4771 if ((auto_inc_opt == 1 && sets_cc0_p (PATTERN (v->insn)))
4772 || (auto_inc_opt == -1
4773 && (prev = prev_nonnote_insn (v->insn)) != 0
4774 && INSN_P (prev)
4775 && sets_cc0_p (PATTERN (prev))))
4776 auto_inc_opt = 0;
4777 }
4778 #endif
4779
4780 if (auto_inc_opt)
4781 v->auto_inc_opt = 1;
4782 }
4783 #endif
4784
4785 /* For each place where the biv is incremented, add an insn
4786 to increment the new, reduced reg for the giv. */
4787 for (tv = bl->biv; tv; tv = tv->next_iv)
4788 {
4789 rtx insert_before;
4790
4791 /* Skip if location is the same as a previous one. */
4792 if (tv->same)
4793 continue;
4794 if (! auto_inc_opt)
4795 insert_before = NEXT_INSN (tv->insn);
4796 else if (auto_inc_opt == 1)
4797 insert_before = NEXT_INSN (v->insn);
4798 else
4799 insert_before = v->insn;
4800
4801 if (tv->mult_val == const1_rtx)
4802 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
4803 v->new_reg, v->new_reg,
4804 0, insert_before);
4805 else /* tv->mult_val == const0_rtx */
4806 /* A multiply is acceptable here
4807 since this is presumed to be seldom executed. */
4808 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
4809 v->add_val, v->new_reg,
4810 0, insert_before);
4811 }
4812
4813 /* Add code at loop start to initialize giv's reduced reg. */
4814
4815 loop_iv_add_mult_hoist (loop,
4816 extend_value_for_giv (v, bl->initial_value),
4817 v->mult_val, v->add_val, v->new_reg);
4818 }
4819 }
4820 }
4821
4822
4823 /* Check for givs whose first use is their definition and whose
4824 last use is the definition of another giv. If so, it is likely
4825 dead and should not be used to derive another giv nor to
4826 eliminate a biv. */
4827
4828 static void
4829 loop_givs_dead_check (struct loop *loop ATTRIBUTE_UNUSED, struct iv_class *bl)
4830 {
4831 struct induction *v;
4832
4833 for (v = bl->giv; v; v = v->next_iv)
4834 {
4835 if (v->ignore
4836 || (v->same && v->same->ignore))
4837 continue;
4838
4839 if (v->giv_type == DEST_REG
4840 && REGNO_FIRST_UID (REGNO (v->dest_reg)) == INSN_UID (v->insn))
4841 {
4842 struct induction *v1;
4843
4844 for (v1 = bl->giv; v1; v1 = v1->next_iv)
4845 if (REGNO_LAST_UID (REGNO (v->dest_reg)) == INSN_UID (v1->insn))
4846 v->maybe_dead = 1;
4847 }
4848 }
4849 }
4850
4851
4852 static void
4853 loop_givs_rescan (struct loop *loop, struct iv_class *bl, rtx *reg_map)
4854 {
4855 struct induction *v;
4856
4857 for (v = bl->giv; v; v = v->next_iv)
4858 {
4859 if (v->same && v->same->ignore)
4860 v->ignore = 1;
4861
4862 if (v->ignore)
4863 continue;
4864
4865 /* Update expression if this was combined, in case other giv was
4866 replaced. */
4867 if (v->same)
4868 v->new_reg = replace_rtx (v->new_reg,
4869 v->same->dest_reg, v->same->new_reg);
4870
4871 /* See if this register is known to be a pointer to something. If
4872 so, see if we can find the alignment. First see if there is a
4873 destination register that is a pointer. If so, this shares the
4874 alignment too. Next see if we can deduce anything from the
4875 computational information. If not, and this is a DEST_ADDR
4876 giv, at least we know that it's a pointer, though we don't know
4877 the alignment. */
4878 if (REG_P (v->new_reg)
4879 && v->giv_type == DEST_REG
4880 && REG_POINTER (v->dest_reg))
4881 mark_reg_pointer (v->new_reg,
4882 REGNO_POINTER_ALIGN (REGNO (v->dest_reg)));
4883 else if (REG_P (v->new_reg)
4884 && REG_POINTER (v->src_reg))
4885 {
4886 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->src_reg));
4887
4888 if (align == 0
4889 || GET_CODE (v->add_val) != CONST_INT
4890 || INTVAL (v->add_val) % (align / BITS_PER_UNIT) != 0)
4891 align = 0;
4892
4893 mark_reg_pointer (v->new_reg, align);
4894 }
4895 else if (REG_P (v->new_reg)
4896 && REG_P (v->add_val)
4897 && REG_POINTER (v->add_val))
4898 {
4899 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->add_val));
4900
4901 if (align == 0 || GET_CODE (v->mult_val) != CONST_INT
4902 || INTVAL (v->mult_val) % (align / BITS_PER_UNIT) != 0)
4903 align = 0;
4904
4905 mark_reg_pointer (v->new_reg, align);
4906 }
4907 else if (REG_P (v->new_reg) && v->giv_type == DEST_ADDR)
4908 mark_reg_pointer (v->new_reg, 0);
4909
4910 if (v->giv_type == DEST_ADDR)
4911 /* Store reduced reg as the address in the memref where we found
4912 this giv. */
4913 validate_change (v->insn, v->location, v->new_reg, 0);
4914 else if (v->replaceable)
4915 {
4916 reg_map[REGNO (v->dest_reg)] = v->new_reg;
4917 }
4918 else
4919 {
4920 rtx original_insn = v->insn;
4921 rtx note;
4922
4923 /* Not replaceable; emit an insn to set the original giv reg from
4924 the reduced giv, same as above. */
4925 v->insn = loop_insn_emit_after (loop, 0, original_insn,
4926 gen_move_insn (v->dest_reg,
4927 v->new_reg));
4928
4929 /* The original insn may have a REG_EQUAL note. This note is
4930 now incorrect and may result in invalid substitutions later.
4931 The original insn is dead, but may be part of a libcall
4932 sequence, which doesn't seem worth the bother of handling. */
4933 note = find_reg_note (original_insn, REG_EQUAL, NULL_RTX);
4934 if (note)
4935 remove_note (original_insn, note);
4936 }
4937
4938 /* When a loop is reversed, givs which depend on the reversed
4939 biv, and which are live outside the loop, must be set to their
4940 correct final value. This insn is only needed if the giv is
4941 not replaceable. The correct final value is the same as the
4942 value that the giv starts the reversed loop with. */
4943 if (bl->reversed && ! v->replaceable)
4944 loop_iv_add_mult_sink (loop,
4945 extend_value_for_giv (v, bl->initial_value),
4946 v->mult_val, v->add_val, v->dest_reg);
4947 else if (v->final_value)
4948 loop_insn_sink_or_swim (loop,
4949 gen_load_of_final_value (v->dest_reg,
4950 v->final_value));
4951
4952 if (loop_dump_stream)
4953 {
4954 fprintf (loop_dump_stream, "giv at %d reduced to ",
4955 INSN_UID (v->insn));
4956 print_simple_rtl (loop_dump_stream, v->new_reg);
4957 fprintf (loop_dump_stream, "\n");
4958 }
4959 }
4960 }
4961
4962
4963 static int
4964 loop_giv_reduce_benefit (struct loop *loop ATTRIBUTE_UNUSED,
4965 struct iv_class *bl, struct induction *v,
4966 rtx test_reg)
4967 {
4968 int add_cost;
4969 int benefit;
4970
4971 benefit = v->benefit;
4972 PUT_MODE (test_reg, v->mode);
4973 add_cost = iv_add_mult_cost (bl->biv->add_val, v->mult_val,
4974 test_reg, test_reg);
4975
4976 /* Reduce benefit if not replaceable, since we will insert a
4977 move-insn to replace the insn that calculates this giv. Don't do
4978 this unless the giv is a user variable, since it will often be
4979 marked non-replaceable because of the duplication of the exit
4980 code outside the loop. In such a case, the copies we insert are
4981 dead and will be deleted. So they don't have a cost. Similar
4982 situations exist. */
4983 /* ??? The new final_[bg]iv_value code does a much better job of
4984 finding replaceable giv's, and hence this code may no longer be
4985 necessary. */
4986 if (! v->replaceable && ! bl->eliminable
4987 && REG_USERVAR_P (v->dest_reg))
4988 benefit -= copy_cost;
4989
4990 /* Decrease the benefit to count the add-insns that we will insert
4991 to increment the reduced reg for the giv. ??? This can
4992 overestimate the run-time cost of the additional insns, e.g. if
4993 there are multiple basic blocks that increment the biv, but only
4994 one of these blocks is executed during each iteration. There is
4995 no good way to detect cases like this with the current structure
4996 of the loop optimizer. This code is more accurate for
4997 determining code size than run-time benefits. */
4998 benefit -= add_cost * bl->biv_count;
4999
5000 /* Decide whether to strength-reduce this giv or to leave the code
5001 unchanged (recompute it from the biv each time it is used). This
5002 decision can be made independently for each giv. */
5003
5004 #ifdef AUTO_INC_DEC
5005 /* Attempt to guess whether autoincrement will handle some of the
5006 new add insns; if so, increase BENEFIT (undo the subtraction of
5007 add_cost that was done above). */
5008 if (v->giv_type == DEST_ADDR
5009 /* Increasing the benefit is risky, since this is only a guess.
5010 Avoid increasing register pressure in cases where there would
5011 be no other benefit from reducing this giv. */
5012 && benefit > 0
5013 && GET_CODE (v->mult_val) == CONST_INT)
5014 {
5015 int size = GET_MODE_SIZE (GET_MODE (v->mem));
5016
5017 if (HAVE_POST_INCREMENT
5018 && INTVAL (v->mult_val) == size)
5019 benefit += add_cost * bl->biv_count;
5020 else if (HAVE_PRE_INCREMENT
5021 && INTVAL (v->mult_val) == size)
5022 benefit += add_cost * bl->biv_count;
5023 else if (HAVE_POST_DECREMENT
5024 && -INTVAL (v->mult_val) == size)
5025 benefit += add_cost * bl->biv_count;
5026 else if (HAVE_PRE_DECREMENT
5027 && -INTVAL (v->mult_val) == size)
5028 benefit += add_cost * bl->biv_count;
5029 }
5030 #endif
5031
5032 return benefit;
5033 }
5034
5035
5036 /* Free IV structures for LOOP. */
5037
5038 static void
5039 loop_ivs_free (struct loop *loop)
5040 {
5041 struct loop_ivs *ivs = LOOP_IVS (loop);
5042 struct iv_class *iv = ivs->list;
5043
5044 free (ivs->regs);
5045
5046 while (iv)
5047 {
5048 struct iv_class *next = iv->next;
5049 struct induction *induction;
5050 struct induction *next_induction;
5051
5052 for (induction = iv->biv; induction; induction = next_induction)
5053 {
5054 next_induction = induction->next_iv;
5055 free (induction);
5056 }
5057 for (induction = iv->giv; induction; induction = next_induction)
5058 {
5059 next_induction = induction->next_iv;
5060 free (induction);
5061 }
5062
5063 free (iv);
5064 iv = next;
5065 }
5066 }
5067
5068
5069 /* Perform strength reduction and induction variable elimination.
5070
5071 Pseudo registers created during this function will be beyond the
5072 last valid index in several tables including
5073 REGS->ARRAY[I].N_TIMES_SET and REGNO_LAST_UID. This does not cause a
5074 problem here, because the added registers cannot be givs outside of
5075 their loop, and hence will never be reconsidered. But scan_loop
5076 must check regnos to make sure they are in bounds. */
5077
5078 static void
5079 strength_reduce (struct loop *loop, int flags)
5080 {
5081 struct loop_info *loop_info = LOOP_INFO (loop);
5082 struct loop_regs *regs = LOOP_REGS (loop);
5083 struct loop_ivs *ivs = LOOP_IVS (loop);
5084 rtx p;
5085 /* Temporary list pointer for traversing ivs->list. */
5086 struct iv_class *bl;
5087 /* Ratio of extra register life span we can justify
5088 for saving an instruction. More if loop doesn't call subroutines
5089 since in that case saving an insn makes more difference
5090 and more registers are available. */
5091 /* ??? could set this to last value of threshold in move_movables */
5092 int threshold = (loop_info->has_call ? 1 : 2) * (3 + n_non_fixed_regs);
5093 /* Map of pseudo-register replacements. */
5094 rtx *reg_map = NULL;
5095 int reg_map_size;
5096 int unrolled_insn_copies = 0;
5097 rtx test_reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
5098 int insn_count = count_insns_in_loop (loop);
5099
5100 addr_placeholder = gen_reg_rtx (Pmode);
5101
5102 ivs->n_regs = max_reg_before_loop;
5103 ivs->regs = xcalloc (ivs->n_regs, sizeof (struct iv));
5104
5105 /* Find all BIVs in loop. */
5106 loop_bivs_find (loop);
5107
5108 /* Exit if there are no bivs. */
5109 if (! ivs->list)
5110 {
5111 /* Can still unroll the loop anyways, but indicate that there is no
5112 strength reduction info available. */
5113 if (flags & LOOP_UNROLL)
5114 unroll_loop (loop, insn_count, 0);
5115
5116 loop_ivs_free (loop);
5117 return;
5118 }
5119
5120 /* Determine how BIVS are initialized by looking through pre-header
5121 extended basic block. */
5122 loop_bivs_init_find (loop);
5123
5124 /* Look at the each biv and see if we can say anything better about its
5125 initial value from any initializing insns set up above. */
5126 loop_bivs_check (loop);
5127
5128 /* Search the loop for general induction variables. */
5129 loop_givs_find (loop);
5130
5131 /* Try to calculate and save the number of loop iterations. This is
5132 set to zero if the actual number can not be calculated. This must
5133 be called after all giv's have been identified, since otherwise it may
5134 fail if the iteration variable is a giv. */
5135 loop_iterations (loop);
5136
5137 #ifdef HAVE_prefetch
5138 if (flags & LOOP_PREFETCH)
5139 emit_prefetch_instructions (loop);
5140 #endif
5141
5142 /* Now for each giv for which we still don't know whether or not it is
5143 replaceable, check to see if it is replaceable because its final value
5144 can be calculated. This must be done after loop_iterations is called,
5145 so that final_giv_value will work correctly. */
5146 loop_givs_check (loop);
5147
5148 /* Try to prove that the loop counter variable (if any) is always
5149 nonnegative; if so, record that fact with a REG_NONNEG note
5150 so that "decrement and branch until zero" insn can be used. */
5151 check_dbra_loop (loop, insn_count);
5152
5153 /* Create reg_map to hold substitutions for replaceable giv regs.
5154 Some givs might have been made from biv increments, so look at
5155 ivs->reg_iv_type for a suitable size. */
5156 reg_map_size = ivs->n_regs;
5157 reg_map = xcalloc (reg_map_size, sizeof (rtx));
5158
5159 /* Examine each iv class for feasibility of strength reduction/induction
5160 variable elimination. */
5161
5162 for (bl = ivs->list; bl; bl = bl->next)
5163 {
5164 struct induction *v;
5165 int benefit;
5166
5167 /* Test whether it will be possible to eliminate this biv
5168 provided all givs are reduced. */
5169 bl->eliminable = loop_biv_eliminable_p (loop, bl, threshold, insn_count);
5170
5171 /* This will be true at the end, if all givs which depend on this
5172 biv have been strength reduced.
5173 We can't (currently) eliminate the biv unless this is so. */
5174 bl->all_reduced = 1;
5175
5176 /* Check each extension dependent giv in this class to see if its
5177 root biv is safe from wrapping in the interior mode. */
5178 check_ext_dependent_givs (loop, bl);
5179
5180 /* Combine all giv's for this iv_class. */
5181 combine_givs (regs, bl);
5182
5183 for (v = bl->giv; v; v = v->next_iv)
5184 {
5185 struct induction *tv;
5186
5187 if (v->ignore || v->same)
5188 continue;
5189
5190 benefit = loop_giv_reduce_benefit (loop, bl, v, test_reg);
5191
5192 /* If an insn is not to be strength reduced, then set its ignore
5193 flag, and clear bl->all_reduced. */
5194
5195 /* A giv that depends on a reversed biv must be reduced if it is
5196 used after the loop exit, otherwise, it would have the wrong
5197 value after the loop exit. To make it simple, just reduce all
5198 of such giv's whether or not we know they are used after the loop
5199 exit. */
5200
5201 if (! flag_reduce_all_givs
5202 && v->lifetime * threshold * benefit < insn_count
5203 && ! bl->reversed)
5204 {
5205 if (loop_dump_stream)
5206 fprintf (loop_dump_stream,
5207 "giv of insn %d not worth while, %d vs %d.\n",
5208 INSN_UID (v->insn),
5209 v->lifetime * threshold * benefit, insn_count);
5210 v->ignore = 1;
5211 bl->all_reduced = 0;
5212 }
5213 else
5214 {
5215 /* Check that we can increment the reduced giv without a
5216 multiply insn. If not, reject it. */
5217
5218 for (tv = bl->biv; tv; tv = tv->next_iv)
5219 if (tv->mult_val == const1_rtx
5220 && ! product_cheap_p (tv->add_val, v->mult_val))
5221 {
5222 if (loop_dump_stream)
5223 fprintf (loop_dump_stream,
5224 "giv of insn %d: would need a multiply.\n",
5225 INSN_UID (v->insn));
5226 v->ignore = 1;
5227 bl->all_reduced = 0;
5228 break;
5229 }
5230 }
5231 }
5232
5233 /* Check for givs whose first use is their definition and whose
5234 last use is the definition of another giv. If so, it is likely
5235 dead and should not be used to derive another giv nor to
5236 eliminate a biv. */
5237 loop_givs_dead_check (loop, bl);
5238
5239 /* Reduce each giv that we decided to reduce. */
5240 loop_givs_reduce (loop, bl);
5241
5242 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
5243 as not reduced.
5244
5245 For each giv register that can be reduced now: if replaceable,
5246 substitute reduced reg wherever the old giv occurs;
5247 else add new move insn "giv_reg = reduced_reg". */
5248 loop_givs_rescan (loop, bl, reg_map);
5249
5250 /* All the givs based on the biv bl have been reduced if they
5251 merit it. */
5252
5253 /* For each giv not marked as maybe dead that has been combined with a
5254 second giv, clear any "maybe dead" mark on that second giv.
5255 v->new_reg will either be or refer to the register of the giv it
5256 combined with.
5257
5258 Doing this clearing avoids problems in biv elimination where
5259 a giv's new_reg is a complex value that can't be put in the
5260 insn but the giv combined with (with a reg as new_reg) is
5261 marked maybe_dead. Since the register will be used in either
5262 case, we'd prefer it be used from the simpler giv. */
5263
5264 for (v = bl->giv; v; v = v->next_iv)
5265 if (! v->maybe_dead && v->same)
5266 v->same->maybe_dead = 0;
5267
5268 /* Try to eliminate the biv, if it is a candidate.
5269 This won't work if ! bl->all_reduced,
5270 since the givs we planned to use might not have been reduced.
5271
5272 We have to be careful that we didn't initially think we could
5273 eliminate this biv because of a giv that we now think may be
5274 dead and shouldn't be used as a biv replacement.
5275
5276 Also, there is the possibility that we may have a giv that looks
5277 like it can be used to eliminate a biv, but the resulting insn
5278 isn't valid. This can happen, for example, on the 88k, where a
5279 JUMP_INSN can compare a register only with zero. Attempts to
5280 replace it with a compare with a constant will fail.
5281
5282 Note that in cases where this call fails, we may have replaced some
5283 of the occurrences of the biv with a giv, but no harm was done in
5284 doing so in the rare cases where it can occur. */
5285
5286 if (bl->all_reduced == 1 && bl->eliminable
5287 && maybe_eliminate_biv (loop, bl, 1, threshold, insn_count))
5288 {
5289 /* ?? If we created a new test to bypass the loop entirely,
5290 or otherwise drop straight in, based on this test, then
5291 we might want to rewrite it also. This way some later
5292 pass has more hope of removing the initialization of this
5293 biv entirely. */
5294
5295 /* If final_value != 0, then the biv may be used after loop end
5296 and we must emit an insn to set it just in case.
5297
5298 Reversed bivs already have an insn after the loop setting their
5299 value, so we don't need another one. We can't calculate the
5300 proper final value for such a biv here anyways. */
5301 if (bl->final_value && ! bl->reversed)
5302 loop_insn_sink_or_swim (loop,
5303 gen_load_of_final_value (bl->biv->dest_reg,
5304 bl->final_value));
5305
5306 if (loop_dump_stream)
5307 fprintf (loop_dump_stream, "Reg %d: biv eliminated\n",
5308 bl->regno);
5309 }
5310 /* See above note wrt final_value. But since we couldn't eliminate
5311 the biv, we must set the value after the loop instead of before. */
5312 else if (bl->final_value && ! bl->reversed)
5313 loop_insn_sink (loop, gen_load_of_final_value (bl->biv->dest_reg,
5314 bl->final_value));
5315 }
5316
5317 /* Go through all the instructions in the loop, making all the
5318 register substitutions scheduled in REG_MAP. */
5319
5320 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
5321 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5322 || GET_CODE (p) == CALL_INSN)
5323 {
5324 replace_regs (PATTERN (p), reg_map, reg_map_size, 0);
5325 replace_regs (REG_NOTES (p), reg_map, reg_map_size, 0);
5326 INSN_CODE (p) = -1;
5327 }
5328
5329 if (loop_info->n_iterations > 0)
5330 {
5331 /* When we completely unroll a loop we will likely not need the increment
5332 of the loop BIV and we will not need the conditional branch at the
5333 end of the loop. */
5334 unrolled_insn_copies = insn_count - 2;
5335
5336 #ifdef HAVE_cc0
5337 /* When we completely unroll a loop on a HAVE_cc0 machine we will not
5338 need the comparison before the conditional branch at the end of the
5339 loop. */
5340 unrolled_insn_copies -= 1;
5341 #endif
5342
5343 /* We'll need one copy for each loop iteration. */
5344 unrolled_insn_copies *= loop_info->n_iterations;
5345
5346 /* A little slop to account for the ability to remove initialization
5347 code, better CSE, and other secondary benefits of completely
5348 unrolling some loops. */
5349 unrolled_insn_copies -= 1;
5350
5351 /* Clamp the value. */
5352 if (unrolled_insn_copies < 0)
5353 unrolled_insn_copies = 0;
5354 }
5355
5356 /* Unroll loops from within strength reduction so that we can use the
5357 induction variable information that strength_reduce has already
5358 collected. Always unroll loops that would be as small or smaller
5359 unrolled than when rolled. */
5360 if ((flags & LOOP_UNROLL)
5361 || ((flags & LOOP_AUTO_UNROLL)
5362 && loop_info->n_iterations > 0
5363 && unrolled_insn_copies <= insn_count))
5364 unroll_loop (loop, insn_count, 1);
5365
5366 if (loop_dump_stream)
5367 fprintf (loop_dump_stream, "\n");
5368
5369 loop_ivs_free (loop);
5370 if (reg_map)
5371 free (reg_map);
5372 }
5373 \f
5374 /*Record all basic induction variables calculated in the insn. */
5375 static rtx
5376 check_insn_for_bivs (struct loop *loop, rtx p, int not_every_iteration,
5377 int maybe_multiple)
5378 {
5379 struct loop_ivs *ivs = LOOP_IVS (loop);
5380 rtx set;
5381 rtx dest_reg;
5382 rtx inc_val;
5383 rtx mult_val;
5384 rtx *location;
5385
5386 if (GET_CODE (p) == INSN
5387 && (set = single_set (p))
5388 && REG_P (SET_DEST (set)))
5389 {
5390 dest_reg = SET_DEST (set);
5391 if (REGNO (dest_reg) < max_reg_before_loop
5392 && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER
5393 && REG_IV_TYPE (ivs, REGNO (dest_reg)) != NOT_BASIC_INDUCT)
5394 {
5395 if (basic_induction_var (loop, SET_SRC (set),
5396 GET_MODE (SET_SRC (set)),
5397 dest_reg, p, &inc_val, &mult_val,
5398 &location))
5399 {
5400 /* It is a possible basic induction variable.
5401 Create and initialize an induction structure for it. */
5402
5403 struct induction *v = xmalloc (sizeof (struct induction));
5404
5405 record_biv (loop, v, p, dest_reg, inc_val, mult_val, location,
5406 not_every_iteration, maybe_multiple);
5407 REG_IV_TYPE (ivs, REGNO (dest_reg)) = BASIC_INDUCT;
5408 }
5409 else if (REGNO (dest_reg) < ivs->n_regs)
5410 REG_IV_TYPE (ivs, REGNO (dest_reg)) = NOT_BASIC_INDUCT;
5411 }
5412 }
5413 return p;
5414 }
5415 \f
5416 /* Record all givs calculated in the insn.
5417 A register is a giv if: it is only set once, it is a function of a
5418 biv and a constant (or invariant), and it is not a biv. */
5419 static rtx
5420 check_insn_for_givs (struct loop *loop, rtx p, int not_every_iteration,
5421 int maybe_multiple)
5422 {
5423 struct loop_regs *regs = LOOP_REGS (loop);
5424
5425 rtx set;
5426 /* Look for a general induction variable in a register. */
5427 if (GET_CODE (p) == INSN
5428 && (set = single_set (p))
5429 && REG_P (SET_DEST (set))
5430 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
5431 {
5432 rtx src_reg;
5433 rtx dest_reg;
5434 rtx add_val;
5435 rtx mult_val;
5436 rtx ext_val;
5437 int benefit;
5438 rtx regnote = 0;
5439 rtx last_consec_insn;
5440
5441 dest_reg = SET_DEST (set);
5442 if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER)
5443 return p;
5444
5445 if (/* SET_SRC is a giv. */
5446 (general_induction_var (loop, SET_SRC (set), &src_reg, &add_val,
5447 &mult_val, &ext_val, 0, &benefit, VOIDmode)
5448 /* Equivalent expression is a giv. */
5449 || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX))
5450 && general_induction_var (loop, XEXP (regnote, 0), &src_reg,
5451 &add_val, &mult_val, &ext_val, 0,
5452 &benefit, VOIDmode)))
5453 /* Don't try to handle any regs made by loop optimization.
5454 We have nothing on them in regno_first_uid, etc. */
5455 && REGNO (dest_reg) < max_reg_before_loop
5456 /* Don't recognize a BASIC_INDUCT_VAR here. */
5457 && dest_reg != src_reg
5458 /* This must be the only place where the register is set. */
5459 && (regs->array[REGNO (dest_reg)].n_times_set == 1
5460 /* or all sets must be consecutive and make a giv. */
5461 || (benefit = consec_sets_giv (loop, benefit, p,
5462 src_reg, dest_reg,
5463 &add_val, &mult_val, &ext_val,
5464 &last_consec_insn))))
5465 {
5466 struct induction *v = xmalloc (sizeof (struct induction));
5467
5468 /* If this is a library call, increase benefit. */
5469 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
5470 benefit += libcall_benefit (p);
5471
5472 /* Skip the consecutive insns, if there are any. */
5473 if (regs->array[REGNO (dest_reg)].n_times_set != 1)
5474 p = last_consec_insn;
5475
5476 record_giv (loop, v, p, src_reg, dest_reg, mult_val, add_val,
5477 ext_val, benefit, DEST_REG, not_every_iteration,
5478 maybe_multiple, (rtx*) 0);
5479
5480 }
5481 }
5482
5483 /* Look for givs which are memory addresses. */
5484 if (GET_CODE (p) == INSN)
5485 find_mem_givs (loop, PATTERN (p), p, not_every_iteration,
5486 maybe_multiple);
5487
5488 /* Update the status of whether giv can derive other givs. This can
5489 change when we pass a label or an insn that updates a biv. */
5490 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5491 || GET_CODE (p) == CODE_LABEL)
5492 update_giv_derive (loop, p);
5493 return p;
5494 }
5495 \f
5496 /* Return 1 if X is a valid source for an initial value (or as value being
5497 compared against in an initial test).
5498
5499 X must be either a register or constant and must not be clobbered between
5500 the current insn and the start of the loop.
5501
5502 INSN is the insn containing X. */
5503
5504 static int
5505 valid_initial_value_p (rtx x, rtx insn, int call_seen, rtx loop_start)
5506 {
5507 if (CONSTANT_P (x))
5508 return 1;
5509
5510 /* Only consider pseudos we know about initialized in insns whose luids
5511 we know. */
5512 if (!REG_P (x)
5513 || REGNO (x) >= max_reg_before_loop)
5514 return 0;
5515
5516 /* Don't use call-clobbered registers across a call which clobbers it. On
5517 some machines, don't use any hard registers at all. */
5518 if (REGNO (x) < FIRST_PSEUDO_REGISTER
5519 && (SMALL_REGISTER_CLASSES
5520 || (call_used_regs[REGNO (x)] && call_seen)))
5521 return 0;
5522
5523 /* Don't use registers that have been clobbered before the start of the
5524 loop. */
5525 if (reg_set_between_p (x, insn, loop_start))
5526 return 0;
5527
5528 return 1;
5529 }
5530 \f
5531 /* Scan X for memory refs and check each memory address
5532 as a possible giv. INSN is the insn whose pattern X comes from.
5533 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
5534 every loop iteration. MAYBE_MULTIPLE is 1 if the insn might be executed
5535 more than once in each loop iteration. */
5536
5537 static void
5538 find_mem_givs (const struct loop *loop, rtx x, rtx insn,
5539 int not_every_iteration, int maybe_multiple)
5540 {
5541 int i, j;
5542 enum rtx_code code;
5543 const char *fmt;
5544
5545 if (x == 0)
5546 return;
5547
5548 code = GET_CODE (x);
5549 switch (code)
5550 {
5551 case REG:
5552 case CONST_INT:
5553 case CONST:
5554 case CONST_DOUBLE:
5555 case SYMBOL_REF:
5556 case LABEL_REF:
5557 case PC:
5558 case CC0:
5559 case ADDR_VEC:
5560 case ADDR_DIFF_VEC:
5561 case USE:
5562 case CLOBBER:
5563 return;
5564
5565 case MEM:
5566 {
5567 rtx src_reg;
5568 rtx add_val;
5569 rtx mult_val;
5570 rtx ext_val;
5571 int benefit;
5572
5573 /* This code used to disable creating GIVs with mult_val == 1 and
5574 add_val == 0. However, this leads to lost optimizations when
5575 it comes time to combine a set of related DEST_ADDR GIVs, since
5576 this one would not be seen. */
5577
5578 if (general_induction_var (loop, XEXP (x, 0), &src_reg, &add_val,
5579 &mult_val, &ext_val, 1, &benefit,
5580 GET_MODE (x)))
5581 {
5582 /* Found one; record it. */
5583 struct induction *v = xmalloc (sizeof (struct induction));
5584
5585 record_giv (loop, v, insn, src_reg, addr_placeholder, mult_val,
5586 add_val, ext_val, benefit, DEST_ADDR,
5587 not_every_iteration, maybe_multiple, &XEXP (x, 0));
5588
5589 v->mem = x;
5590 }
5591 }
5592 return;
5593
5594 default:
5595 break;
5596 }
5597
5598 /* Recursively scan the subexpressions for other mem refs. */
5599
5600 fmt = GET_RTX_FORMAT (code);
5601 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5602 if (fmt[i] == 'e')
5603 find_mem_givs (loop, XEXP (x, i), insn, not_every_iteration,
5604 maybe_multiple);
5605 else if (fmt[i] == 'E')
5606 for (j = 0; j < XVECLEN (x, i); j++)
5607 find_mem_givs (loop, XVECEXP (x, i, j), insn, not_every_iteration,
5608 maybe_multiple);
5609 }
5610 \f
5611 /* Fill in the data about one biv update.
5612 V is the `struct induction' in which we record the biv. (It is
5613 allocated by the caller, with alloca.)
5614 INSN is the insn that sets it.
5615 DEST_REG is the biv's reg.
5616
5617 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
5618 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
5619 being set to INC_VAL.
5620
5621 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
5622 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
5623 can be executed more than once per iteration. If MAYBE_MULTIPLE
5624 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
5625 executed exactly once per iteration. */
5626
5627 static void
5628 record_biv (struct loop *loop, struct induction *v, rtx insn, rtx dest_reg,
5629 rtx inc_val, rtx mult_val, rtx *location,
5630 int not_every_iteration, int maybe_multiple)
5631 {
5632 struct loop_ivs *ivs = LOOP_IVS (loop);
5633 struct iv_class *bl;
5634
5635 v->insn = insn;
5636 v->src_reg = dest_reg;
5637 v->dest_reg = dest_reg;
5638 v->mult_val = mult_val;
5639 v->add_val = inc_val;
5640 v->ext_dependent = NULL_RTX;
5641 v->location = location;
5642 v->mode = GET_MODE (dest_reg);
5643 v->always_computable = ! not_every_iteration;
5644 v->always_executed = ! not_every_iteration;
5645 v->maybe_multiple = maybe_multiple;
5646 v->same = 0;
5647
5648 /* Add this to the reg's iv_class, creating a class
5649 if this is the first incrementation of the reg. */
5650
5651 bl = REG_IV_CLASS (ivs, REGNO (dest_reg));
5652 if (bl == 0)
5653 {
5654 /* Create and initialize new iv_class. */
5655
5656 bl = xmalloc (sizeof (struct iv_class));
5657
5658 bl->regno = REGNO (dest_reg);
5659 bl->biv = 0;
5660 bl->giv = 0;
5661 bl->biv_count = 0;
5662 bl->giv_count = 0;
5663
5664 /* Set initial value to the reg itself. */
5665 bl->initial_value = dest_reg;
5666 bl->final_value = 0;
5667 /* We haven't seen the initializing insn yet. */
5668 bl->init_insn = 0;
5669 bl->init_set = 0;
5670 bl->initial_test = 0;
5671 bl->incremented = 0;
5672 bl->eliminable = 0;
5673 bl->nonneg = 0;
5674 bl->reversed = 0;
5675 bl->total_benefit = 0;
5676
5677 /* Add this class to ivs->list. */
5678 bl->next = ivs->list;
5679 ivs->list = bl;
5680
5681 /* Put it in the array of biv register classes. */
5682 REG_IV_CLASS (ivs, REGNO (dest_reg)) = bl;
5683 }
5684 else
5685 {
5686 /* Check if location is the same as a previous one. */
5687 struct induction *induction;
5688 for (induction = bl->biv; induction; induction = induction->next_iv)
5689 if (location == induction->location)
5690 {
5691 v->same = induction;
5692 break;
5693 }
5694 }
5695
5696 /* Update IV_CLASS entry for this biv. */
5697 v->next_iv = bl->biv;
5698 bl->biv = v;
5699 bl->biv_count++;
5700 if (mult_val == const1_rtx)
5701 bl->incremented = 1;
5702
5703 if (loop_dump_stream)
5704 loop_biv_dump (v, loop_dump_stream, 0);
5705 }
5706 \f
5707 /* Fill in the data about one giv.
5708 V is the `struct induction' in which we record the giv. (It is
5709 allocated by the caller, with alloca.)
5710 INSN is the insn that sets it.
5711 BENEFIT estimates the savings from deleting this insn.
5712 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
5713 into a register or is used as a memory address.
5714
5715 SRC_REG is the biv reg which the giv is computed from.
5716 DEST_REG is the giv's reg (if the giv is stored in a reg).
5717 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
5718 LOCATION points to the place where this giv's value appears in INSN. */
5719
5720 static void
5721 record_giv (const struct loop *loop, struct induction *v, rtx insn,
5722 rtx src_reg, rtx dest_reg, rtx mult_val, rtx add_val,
5723 rtx ext_val, int benefit, enum g_types type,
5724 int not_every_iteration, int maybe_multiple, rtx *location)
5725 {
5726 struct loop_ivs *ivs = LOOP_IVS (loop);
5727 struct induction *b;
5728 struct iv_class *bl;
5729 rtx set = single_set (insn);
5730 rtx temp;
5731
5732 /* Attempt to prove constantness of the values. Don't let simplify_rtx
5733 undo the MULT canonicalization that we performed earlier. */
5734 temp = simplify_rtx (add_val);
5735 if (temp
5736 && ! (GET_CODE (add_val) == MULT
5737 && GET_CODE (temp) == ASHIFT))
5738 add_val = temp;
5739
5740 v->insn = insn;
5741 v->src_reg = src_reg;
5742 v->giv_type = type;
5743 v->dest_reg = dest_reg;
5744 v->mult_val = mult_val;
5745 v->add_val = add_val;
5746 v->ext_dependent = ext_val;
5747 v->benefit = benefit;
5748 v->location = location;
5749 v->cant_derive = 0;
5750 v->combined_with = 0;
5751 v->maybe_multiple = maybe_multiple;
5752 v->maybe_dead = 0;
5753 v->derive_adjustment = 0;
5754 v->same = 0;
5755 v->ignore = 0;
5756 v->new_reg = 0;
5757 v->final_value = 0;
5758 v->same_insn = 0;
5759 v->auto_inc_opt = 0;
5760 v->unrolled = 0;
5761 v->shared = 0;
5762
5763 /* The v->always_computable field is used in update_giv_derive, to
5764 determine whether a giv can be used to derive another giv. For a
5765 DEST_REG giv, INSN computes a new value for the giv, so its value
5766 isn't computable if INSN insn't executed every iteration.
5767 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
5768 it does not compute a new value. Hence the value is always computable
5769 regardless of whether INSN is executed each iteration. */
5770
5771 if (type == DEST_ADDR)
5772 v->always_computable = 1;
5773 else
5774 v->always_computable = ! not_every_iteration;
5775
5776 v->always_executed = ! not_every_iteration;
5777
5778 if (type == DEST_ADDR)
5779 {
5780 v->mode = GET_MODE (*location);
5781 v->lifetime = 1;
5782 }
5783 else /* type == DEST_REG */
5784 {
5785 v->mode = GET_MODE (SET_DEST (set));
5786
5787 v->lifetime = LOOP_REG_LIFETIME (loop, REGNO (dest_reg));
5788
5789 /* If the lifetime is zero, it means that this register is
5790 really a dead store. So mark this as a giv that can be
5791 ignored. This will not prevent the biv from being eliminated. */
5792 if (v->lifetime == 0)
5793 v->ignore = 1;
5794
5795 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
5796 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
5797 }
5798
5799 /* Add the giv to the class of givs computed from one biv. */
5800
5801 bl = REG_IV_CLASS (ivs, REGNO (src_reg));
5802 if (bl)
5803 {
5804 v->next_iv = bl->giv;
5805 bl->giv = v;
5806 /* Don't count DEST_ADDR. This is supposed to count the number of
5807 insns that calculate givs. */
5808 if (type == DEST_REG)
5809 bl->giv_count++;
5810 bl->total_benefit += benefit;
5811 }
5812 else
5813 /* Fatal error, biv missing for this giv? */
5814 abort ();
5815
5816 if (type == DEST_ADDR)
5817 {
5818 v->replaceable = 1;
5819 v->not_replaceable = 0;
5820 }
5821 else
5822 {
5823 /* The giv can be replaced outright by the reduced register only if all
5824 of the following conditions are true:
5825 - the insn that sets the giv is always executed on any iteration
5826 on which the giv is used at all
5827 (there are two ways to deduce this:
5828 either the insn is executed on every iteration,
5829 or all uses follow that insn in the same basic block),
5830 - the giv is not used outside the loop
5831 - no assignments to the biv occur during the giv's lifetime. */
5832
5833 if (REGNO_FIRST_UID (REGNO (dest_reg)) == INSN_UID (insn)
5834 /* Previous line always fails if INSN was moved by loop opt. */
5835 && REGNO_LAST_LUID (REGNO (dest_reg))
5836 < INSN_LUID (loop->end)
5837 && (! not_every_iteration
5838 || last_use_this_basic_block (dest_reg, insn)))
5839 {
5840 /* Now check that there are no assignments to the biv within the
5841 giv's lifetime. This requires two separate checks. */
5842
5843 /* Check each biv update, and fail if any are between the first
5844 and last use of the giv.
5845
5846 If this loop contains an inner loop that was unrolled, then
5847 the insn modifying the biv may have been emitted by the loop
5848 unrolling code, and hence does not have a valid luid. Just
5849 mark the biv as not replaceable in this case. It is not very
5850 useful as a biv, because it is used in two different loops.
5851 It is very unlikely that we would be able to optimize the giv
5852 using this biv anyways. */
5853
5854 v->replaceable = 1;
5855 v->not_replaceable = 0;
5856 for (b = bl->biv; b; b = b->next_iv)
5857 {
5858 if (INSN_UID (b->insn) >= max_uid_for_loop
5859 || ((INSN_LUID (b->insn)
5860 >= REGNO_FIRST_LUID (REGNO (dest_reg)))
5861 && (INSN_LUID (b->insn)
5862 <= REGNO_LAST_LUID (REGNO (dest_reg)))))
5863 {
5864 v->replaceable = 0;
5865 v->not_replaceable = 1;
5866 break;
5867 }
5868 }
5869
5870 /* If there are any backwards branches that go from after the
5871 biv update to before it, then this giv is not replaceable. */
5872 if (v->replaceable)
5873 for (b = bl->biv; b; b = b->next_iv)
5874 if (back_branch_in_range_p (loop, b->insn))
5875 {
5876 v->replaceable = 0;
5877 v->not_replaceable = 1;
5878 break;
5879 }
5880 }
5881 else
5882 {
5883 /* May still be replaceable, we don't have enough info here to
5884 decide. */
5885 v->replaceable = 0;
5886 v->not_replaceable = 0;
5887 }
5888 }
5889
5890 /* Record whether the add_val contains a const_int, for later use by
5891 combine_givs. */
5892 {
5893 rtx tem = add_val;
5894
5895 v->no_const_addval = 1;
5896 if (tem == const0_rtx)
5897 ;
5898 else if (CONSTANT_P (add_val))
5899 v->no_const_addval = 0;
5900 if (GET_CODE (tem) == PLUS)
5901 {
5902 while (1)
5903 {
5904 if (GET_CODE (XEXP (tem, 0)) == PLUS)
5905 tem = XEXP (tem, 0);
5906 else if (GET_CODE (XEXP (tem, 1)) == PLUS)
5907 tem = XEXP (tem, 1);
5908 else
5909 break;
5910 }
5911 if (CONSTANT_P (XEXP (tem, 1)))
5912 v->no_const_addval = 0;
5913 }
5914 }
5915
5916 if (loop_dump_stream)
5917 loop_giv_dump (v, loop_dump_stream, 0);
5918 }
5919
5920 /* All this does is determine whether a giv can be made replaceable because
5921 its final value can be calculated. This code can not be part of record_giv
5922 above, because final_giv_value requires that the number of loop iterations
5923 be known, and that can not be accurately calculated until after all givs
5924 have been identified. */
5925
5926 static void
5927 check_final_value (const struct loop *loop, struct induction *v)
5928 {
5929 rtx final_value = 0;
5930
5931 /* DEST_ADDR givs will never reach here, because they are always marked
5932 replaceable above in record_giv. */
5933
5934 /* The giv can be replaced outright by the reduced register only if all
5935 of the following conditions are true:
5936 - the insn that sets the giv is always executed on any iteration
5937 on which the giv is used at all
5938 (there are two ways to deduce this:
5939 either the insn is executed on every iteration,
5940 or all uses follow that insn in the same basic block),
5941 - its final value can be calculated (this condition is different
5942 than the one above in record_giv)
5943 - it's not used before the it's set
5944 - no assignments to the biv occur during the giv's lifetime. */
5945
5946 #if 0
5947 /* This is only called now when replaceable is known to be false. */
5948 /* Clear replaceable, so that it won't confuse final_giv_value. */
5949 v->replaceable = 0;
5950 #endif
5951
5952 if ((final_value = final_giv_value (loop, v))
5953 && (v->always_executed
5954 || last_use_this_basic_block (v->dest_reg, v->insn)))
5955 {
5956 int biv_increment_seen = 0, before_giv_insn = 0;
5957 rtx p = v->insn;
5958 rtx last_giv_use;
5959
5960 v->replaceable = 1;
5961 v->not_replaceable = 0;
5962
5963 /* When trying to determine whether or not a biv increment occurs
5964 during the lifetime of the giv, we can ignore uses of the variable
5965 outside the loop because final_value is true. Hence we can not
5966 use regno_last_uid and regno_first_uid as above in record_giv. */
5967
5968 /* Search the loop to determine whether any assignments to the
5969 biv occur during the giv's lifetime. Start with the insn
5970 that sets the giv, and search around the loop until we come
5971 back to that insn again.
5972
5973 Also fail if there is a jump within the giv's lifetime that jumps
5974 to somewhere outside the lifetime but still within the loop. This
5975 catches spaghetti code where the execution order is not linear, and
5976 hence the above test fails. Here we assume that the giv lifetime
5977 does not extend from one iteration of the loop to the next, so as
5978 to make the test easier. Since the lifetime isn't known yet,
5979 this requires two loops. See also record_giv above. */
5980
5981 last_giv_use = v->insn;
5982
5983 while (1)
5984 {
5985 p = NEXT_INSN (p);
5986 if (p == loop->end)
5987 {
5988 before_giv_insn = 1;
5989 p = NEXT_INSN (loop->start);
5990 }
5991 if (p == v->insn)
5992 break;
5993
5994 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5995 || GET_CODE (p) == CALL_INSN)
5996 {
5997 /* It is possible for the BIV increment to use the GIV if we
5998 have a cycle. Thus we must be sure to check each insn for
5999 both BIV and GIV uses, and we must check for BIV uses
6000 first. */
6001
6002 if (! biv_increment_seen
6003 && reg_set_p (v->src_reg, PATTERN (p)))
6004 biv_increment_seen = 1;
6005
6006 if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
6007 {
6008 if (biv_increment_seen || before_giv_insn)
6009 {
6010 v->replaceable = 0;
6011 v->not_replaceable = 1;
6012 break;
6013 }
6014 last_giv_use = p;
6015 }
6016 }
6017 }
6018
6019 /* Now that the lifetime of the giv is known, check for branches
6020 from within the lifetime to outside the lifetime if it is still
6021 replaceable. */
6022
6023 if (v->replaceable)
6024 {
6025 p = v->insn;
6026 while (1)
6027 {
6028 p = NEXT_INSN (p);
6029 if (p == loop->end)
6030 p = NEXT_INSN (loop->start);
6031 if (p == last_giv_use)
6032 break;
6033
6034 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
6035 && LABEL_NAME (JUMP_LABEL (p))
6036 && ((loop_insn_first_p (JUMP_LABEL (p), v->insn)
6037 && loop_insn_first_p (loop->start, JUMP_LABEL (p)))
6038 || (loop_insn_first_p (last_giv_use, JUMP_LABEL (p))
6039 && loop_insn_first_p (JUMP_LABEL (p), loop->end))))
6040 {
6041 v->replaceable = 0;
6042 v->not_replaceable = 1;
6043
6044 if (loop_dump_stream)
6045 fprintf (loop_dump_stream,
6046 "Found branch outside giv lifetime.\n");
6047
6048 break;
6049 }
6050 }
6051 }
6052
6053 /* If it is replaceable, then save the final value. */
6054 if (v->replaceable)
6055 v->final_value = final_value;
6056 }
6057
6058 if (loop_dump_stream && v->replaceable)
6059 fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n",
6060 INSN_UID (v->insn), REGNO (v->dest_reg));
6061 }
6062 \f
6063 /* Update the status of whether a giv can derive other givs.
6064
6065 We need to do something special if there is or may be an update to the biv
6066 between the time the giv is defined and the time it is used to derive
6067 another giv.
6068
6069 In addition, a giv that is only conditionally set is not allowed to
6070 derive another giv once a label has been passed.
6071
6072 The cases we look at are when a label or an update to a biv is passed. */
6073
6074 static void
6075 update_giv_derive (const struct loop *loop, rtx p)
6076 {
6077 struct loop_ivs *ivs = LOOP_IVS (loop);
6078 struct iv_class *bl;
6079 struct induction *biv, *giv;
6080 rtx tem;
6081 int dummy;
6082
6083 /* Search all IV classes, then all bivs, and finally all givs.
6084
6085 There are three cases we are concerned with. First we have the situation
6086 of a giv that is only updated conditionally. In that case, it may not
6087 derive any givs after a label is passed.
6088
6089 The second case is when a biv update occurs, or may occur, after the
6090 definition of a giv. For certain biv updates (see below) that are
6091 known to occur between the giv definition and use, we can adjust the
6092 giv definition. For others, or when the biv update is conditional,
6093 we must prevent the giv from deriving any other givs. There are two
6094 sub-cases within this case.
6095
6096 If this is a label, we are concerned with any biv update that is done
6097 conditionally, since it may be done after the giv is defined followed by
6098 a branch here (actually, we need to pass both a jump and a label, but
6099 this extra tracking doesn't seem worth it).
6100
6101 If this is a jump, we are concerned about any biv update that may be
6102 executed multiple times. We are actually only concerned about
6103 backward jumps, but it is probably not worth performing the test
6104 on the jump again here.
6105
6106 If this is a biv update, we must adjust the giv status to show that a
6107 subsequent biv update was performed. If this adjustment cannot be done,
6108 the giv cannot derive further givs. */
6109
6110 for (bl = ivs->list; bl; bl = bl->next)
6111 for (biv = bl->biv; biv; biv = biv->next_iv)
6112 if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN
6113 || biv->insn == p)
6114 {
6115 /* Skip if location is the same as a previous one. */
6116 if (biv->same)
6117 continue;
6118
6119 for (giv = bl->giv; giv; giv = giv->next_iv)
6120 {
6121 /* If cant_derive is already true, there is no point in
6122 checking all of these conditions again. */
6123 if (giv->cant_derive)
6124 continue;
6125
6126 /* If this giv is conditionally set and we have passed a label,
6127 it cannot derive anything. */
6128 if (GET_CODE (p) == CODE_LABEL && ! giv->always_computable)
6129 giv->cant_derive = 1;
6130
6131 /* Skip givs that have mult_val == 0, since
6132 they are really invariants. Also skip those that are
6133 replaceable, since we know their lifetime doesn't contain
6134 any biv update. */
6135 else if (giv->mult_val == const0_rtx || giv->replaceable)
6136 continue;
6137
6138 /* The only way we can allow this giv to derive another
6139 is if this is a biv increment and we can form the product
6140 of biv->add_val and giv->mult_val. In this case, we will
6141 be able to compute a compensation. */
6142 else if (biv->insn == p)
6143 {
6144 rtx ext_val_dummy;
6145
6146 tem = 0;
6147 if (biv->mult_val == const1_rtx)
6148 tem = simplify_giv_expr (loop,
6149 gen_rtx_MULT (giv->mode,
6150 biv->add_val,
6151 giv->mult_val),
6152 &ext_val_dummy, &dummy);
6153
6154 if (tem && giv->derive_adjustment)
6155 tem = simplify_giv_expr
6156 (loop,
6157 gen_rtx_PLUS (giv->mode, tem, giv->derive_adjustment),
6158 &ext_val_dummy, &dummy);
6159
6160 if (tem)
6161 giv->derive_adjustment = tem;
6162 else
6163 giv->cant_derive = 1;
6164 }
6165 else if ((GET_CODE (p) == CODE_LABEL && ! biv->always_computable)
6166 || (GET_CODE (p) == JUMP_INSN && biv->maybe_multiple))
6167 giv->cant_derive = 1;
6168 }
6169 }
6170 }
6171 \f
6172 /* Check whether an insn is an increment legitimate for a basic induction var.
6173 X is the source of insn P, or a part of it.
6174 MODE is the mode in which X should be interpreted.
6175
6176 DEST_REG is the putative biv, also the destination of the insn.
6177 We accept patterns of these forms:
6178 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
6179 REG = INVARIANT + REG
6180
6181 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
6182 store the additive term into *INC_VAL, and store the place where
6183 we found the additive term into *LOCATION.
6184
6185 If X is an assignment of an invariant into DEST_REG, we set
6186 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
6187
6188 We also want to detect a BIV when it corresponds to a variable
6189 whose mode was promoted. In that case, an increment
6190 of the variable may be a PLUS that adds a SUBREG of that variable to
6191 an invariant and then sign- or zero-extends the result of the PLUS
6192 into the variable.
6193
6194 Most GIVs in such cases will be in the promoted mode, since that is the
6195 probably the natural computation mode (and almost certainly the mode
6196 used for addresses) on the machine. So we view the pseudo-reg containing
6197 the variable as the BIV, as if it were simply incremented.
6198
6199 Note that treating the entire pseudo as a BIV will result in making
6200 simple increments to any GIVs based on it. However, if the variable
6201 overflows in its declared mode but not its promoted mode, the result will
6202 be incorrect. This is acceptable if the variable is signed, since
6203 overflows in such cases are undefined, but not if it is unsigned, since
6204 those overflows are defined. So we only check for SIGN_EXTEND and
6205 not ZERO_EXTEND.
6206
6207 If we cannot find a biv, we return 0. */
6208
6209 static int
6210 basic_induction_var (const struct loop *loop, rtx x, enum machine_mode mode,
6211 rtx dest_reg, rtx p, rtx *inc_val, rtx *mult_val,
6212 rtx **location)
6213 {
6214 enum rtx_code code;
6215 rtx *argp, arg;
6216 rtx insn, set = 0, last, inc;
6217
6218 code = GET_CODE (x);
6219 *location = NULL;
6220 switch (code)
6221 {
6222 case PLUS:
6223 if (rtx_equal_p (XEXP (x, 0), dest_reg)
6224 || (GET_CODE (XEXP (x, 0)) == SUBREG
6225 && SUBREG_PROMOTED_VAR_P (XEXP (x, 0))
6226 && SUBREG_REG (XEXP (x, 0)) == dest_reg))
6227 {
6228 argp = &XEXP (x, 1);
6229 }
6230 else if (rtx_equal_p (XEXP (x, 1), dest_reg)
6231 || (GET_CODE (XEXP (x, 1)) == SUBREG
6232 && SUBREG_PROMOTED_VAR_P (XEXP (x, 1))
6233 && SUBREG_REG (XEXP (x, 1)) == dest_reg))
6234 {
6235 argp = &XEXP (x, 0);
6236 }
6237 else
6238 return 0;
6239
6240 arg = *argp;
6241 if (loop_invariant_p (loop, arg) != 1)
6242 return 0;
6243
6244 /* convert_modes can emit new instructions, e.g. when arg is a loop
6245 invariant MEM and dest_reg has a different mode.
6246 These instructions would be emitted after the end of the function
6247 and then *inc_val would be an uninitialized pseudo.
6248 Detect this and bail in this case.
6249 Other alternatives to solve this can be introducing a convert_modes
6250 variant which is allowed to fail but not allowed to emit new
6251 instructions, emit these instructions before loop start and let
6252 it be garbage collected if *inc_val is never used or saving the
6253 *inc_val initialization sequence generated here and when *inc_val
6254 is going to be actually used, emit it at some suitable place. */
6255 last = get_last_insn ();
6256 inc = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
6257 if (get_last_insn () != last)
6258 {
6259 delete_insns_since (last);
6260 return 0;
6261 }
6262
6263 *inc_val = inc;
6264 *mult_val = const1_rtx;
6265 *location = argp;
6266 return 1;
6267
6268 case SUBREG:
6269 /* If what's inside the SUBREG is a BIV, then the SUBREG. This will
6270 handle addition of promoted variables.
6271 ??? The comment at the start of this function is wrong: promoted
6272 variable increments don't look like it says they do. */
6273 return basic_induction_var (loop, SUBREG_REG (x),
6274 GET_MODE (SUBREG_REG (x)),
6275 dest_reg, p, inc_val, mult_val, location);
6276
6277 case REG:
6278 /* If this register is assigned in a previous insn, look at its
6279 source, but don't go outside the loop or past a label. */
6280
6281 /* If this sets a register to itself, we would repeat any previous
6282 biv increment if we applied this strategy blindly. */
6283 if (rtx_equal_p (dest_reg, x))
6284 return 0;
6285
6286 insn = p;
6287 while (1)
6288 {
6289 rtx dest;
6290 do
6291 {
6292 insn = PREV_INSN (insn);
6293 }
6294 while (insn && GET_CODE (insn) == NOTE
6295 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
6296
6297 if (!insn)
6298 break;
6299 set = single_set (insn);
6300 if (set == 0)
6301 break;
6302 dest = SET_DEST (set);
6303 if (dest == x
6304 || (GET_CODE (dest) == SUBREG
6305 && (GET_MODE_SIZE (GET_MODE (dest)) <= UNITS_PER_WORD)
6306 && (GET_MODE_CLASS (GET_MODE (dest)) == MODE_INT)
6307 && SUBREG_REG (dest) == x))
6308 return basic_induction_var (loop, SET_SRC (set),
6309 (GET_MODE (SET_SRC (set)) == VOIDmode
6310 ? GET_MODE (x)
6311 : GET_MODE (SET_SRC (set))),
6312 dest_reg, insn,
6313 inc_val, mult_val, location);
6314
6315 while (GET_CODE (dest) == SIGN_EXTRACT
6316 || GET_CODE (dest) == ZERO_EXTRACT
6317 || GET_CODE (dest) == SUBREG
6318 || GET_CODE (dest) == STRICT_LOW_PART)
6319 dest = XEXP (dest, 0);
6320 if (dest == x)
6321 break;
6322 }
6323 /* Fall through. */
6324
6325 /* Can accept constant setting of biv only when inside inner most loop.
6326 Otherwise, a biv of an inner loop may be incorrectly recognized
6327 as a biv of the outer loop,
6328 causing code to be moved INTO the inner loop. */
6329 case MEM:
6330 if (loop_invariant_p (loop, x) != 1)
6331 return 0;
6332 case CONST_INT:
6333 case SYMBOL_REF:
6334 case CONST:
6335 /* convert_modes aborts if we try to convert to or from CCmode, so just
6336 exclude that case. It is very unlikely that a condition code value
6337 would be a useful iterator anyways. convert_modes aborts if we try to
6338 convert a float mode to non-float or vice versa too. */
6339 if (loop->level == 1
6340 && GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (dest_reg))
6341 && GET_MODE_CLASS (mode) != MODE_CC)
6342 {
6343 /* Possible bug here? Perhaps we don't know the mode of X. */
6344 last = get_last_insn ();
6345 inc = convert_modes (GET_MODE (dest_reg), mode, x, 0);
6346 if (get_last_insn () != last)
6347 {
6348 delete_insns_since (last);
6349 return 0;
6350 }
6351
6352 *inc_val = inc;
6353 *mult_val = const0_rtx;
6354 return 1;
6355 }
6356 else
6357 return 0;
6358
6359 case SIGN_EXTEND:
6360 /* Ignore this BIV if signed arithmetic overflow is defined. */
6361 if (flag_wrapv)
6362 return 0;
6363 return basic_induction_var (loop, XEXP (x, 0), GET_MODE (XEXP (x, 0)),
6364 dest_reg, p, inc_val, mult_val, location);
6365
6366 case ASHIFTRT:
6367 /* Similar, since this can be a sign extension. */
6368 for (insn = PREV_INSN (p);
6369 (insn && GET_CODE (insn) == NOTE
6370 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
6371 insn = PREV_INSN (insn))
6372 ;
6373
6374 if (insn)
6375 set = single_set (insn);
6376
6377 if (! rtx_equal_p (dest_reg, XEXP (x, 0))
6378 && set && SET_DEST (set) == XEXP (x, 0)
6379 && GET_CODE (XEXP (x, 1)) == CONST_INT
6380 && INTVAL (XEXP (x, 1)) >= 0
6381 && GET_CODE (SET_SRC (set)) == ASHIFT
6382 && XEXP (x, 1) == XEXP (SET_SRC (set), 1))
6383 return basic_induction_var (loop, XEXP (SET_SRC (set), 0),
6384 GET_MODE (XEXP (x, 0)),
6385 dest_reg, insn, inc_val, mult_val,
6386 location);
6387 return 0;
6388
6389 default:
6390 return 0;
6391 }
6392 }
6393 \f
6394 /* A general induction variable (giv) is any quantity that is a linear
6395 function of a basic induction variable,
6396 i.e. giv = biv * mult_val + add_val.
6397 The coefficients can be any loop invariant quantity.
6398 A giv need not be computed directly from the biv;
6399 it can be computed by way of other givs. */
6400
6401 /* Determine whether X computes a giv.
6402 If it does, return a nonzero value
6403 which is the benefit from eliminating the computation of X;
6404 set *SRC_REG to the register of the biv that it is computed from;
6405 set *ADD_VAL and *MULT_VAL to the coefficients,
6406 such that the value of X is biv * mult + add; */
6407
6408 static int
6409 general_induction_var (const struct loop *loop, rtx x, rtx *src_reg,
6410 rtx *add_val, rtx *mult_val, rtx *ext_val,
6411 int is_addr, int *pbenefit,
6412 enum machine_mode addr_mode)
6413 {
6414 struct loop_ivs *ivs = LOOP_IVS (loop);
6415 rtx orig_x = x;
6416
6417 /* If this is an invariant, forget it, it isn't a giv. */
6418 if (loop_invariant_p (loop, x) == 1)
6419 return 0;
6420
6421 *pbenefit = 0;
6422 *ext_val = NULL_RTX;
6423 x = simplify_giv_expr (loop, x, ext_val, pbenefit);
6424 if (x == 0)
6425 return 0;
6426
6427 switch (GET_CODE (x))
6428 {
6429 case USE:
6430 case CONST_INT:
6431 /* Since this is now an invariant and wasn't before, it must be a giv
6432 with MULT_VAL == 0. It doesn't matter which BIV we associate this
6433 with. */
6434 *src_reg = ivs->list->biv->dest_reg;
6435 *mult_val = const0_rtx;
6436 *add_val = x;
6437 break;
6438
6439 case REG:
6440 /* This is equivalent to a BIV. */
6441 *src_reg = x;
6442 *mult_val = const1_rtx;
6443 *add_val = const0_rtx;
6444 break;
6445
6446 case PLUS:
6447 /* Either (plus (biv) (invar)) or
6448 (plus (mult (biv) (invar_1)) (invar_2)). */
6449 if (GET_CODE (XEXP (x, 0)) == MULT)
6450 {
6451 *src_reg = XEXP (XEXP (x, 0), 0);
6452 *mult_val = XEXP (XEXP (x, 0), 1);
6453 }
6454 else
6455 {
6456 *src_reg = XEXP (x, 0);
6457 *mult_val = const1_rtx;
6458 }
6459 *add_val = XEXP (x, 1);
6460 break;
6461
6462 case MULT:
6463 /* ADD_VAL is zero. */
6464 *src_reg = XEXP (x, 0);
6465 *mult_val = XEXP (x, 1);
6466 *add_val = const0_rtx;
6467 break;
6468
6469 default:
6470 abort ();
6471 }
6472
6473 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
6474 unless they are CONST_INT). */
6475 if (GET_CODE (*add_val) == USE)
6476 *add_val = XEXP (*add_val, 0);
6477 if (GET_CODE (*mult_val) == USE)
6478 *mult_val = XEXP (*mult_val, 0);
6479
6480 if (is_addr)
6481 *pbenefit += address_cost (orig_x, addr_mode) - reg_address_cost;
6482 else
6483 *pbenefit += rtx_cost (orig_x, SET);
6484
6485 /* Always return true if this is a giv so it will be detected as such,
6486 even if the benefit is zero or negative. This allows elimination
6487 of bivs that might otherwise not be eliminated. */
6488 return 1;
6489 }
6490 \f
6491 /* Given an expression, X, try to form it as a linear function of a biv.
6492 We will canonicalize it to be of the form
6493 (plus (mult (BIV) (invar_1))
6494 (invar_2))
6495 with possible degeneracies.
6496
6497 The invariant expressions must each be of a form that can be used as a
6498 machine operand. We surround then with a USE rtx (a hack, but localized
6499 and certainly unambiguous!) if not a CONST_INT for simplicity in this
6500 routine; it is the caller's responsibility to strip them.
6501
6502 If no such canonicalization is possible (i.e., two biv's are used or an
6503 expression that is neither invariant nor a biv or giv), this routine
6504 returns 0.
6505
6506 For a nonzero return, the result will have a code of CONST_INT, USE,
6507 REG (for a BIV), PLUS, or MULT. No other codes will occur.
6508
6509 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
6510
6511 static rtx sge_plus (enum machine_mode, rtx, rtx);
6512 static rtx sge_plus_constant (rtx, rtx);
6513
6514 static rtx
6515 simplify_giv_expr (const struct loop *loop, rtx x, rtx *ext_val, int *benefit)
6516 {
6517 struct loop_ivs *ivs = LOOP_IVS (loop);
6518 struct loop_regs *regs = LOOP_REGS (loop);
6519 enum machine_mode mode = GET_MODE (x);
6520 rtx arg0, arg1;
6521 rtx tem;
6522
6523 /* If this is not an integer mode, or if we cannot do arithmetic in this
6524 mode, this can't be a giv. */
6525 if (mode != VOIDmode
6526 && (GET_MODE_CLASS (mode) != MODE_INT
6527 || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT))
6528 return NULL_RTX;
6529
6530 switch (GET_CODE (x))
6531 {
6532 case PLUS:
6533 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6534 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
6535 if (arg0 == 0 || arg1 == 0)
6536 return NULL_RTX;
6537
6538 /* Put constant last, CONST_INT last if both constant. */
6539 if ((GET_CODE (arg0) == USE
6540 || GET_CODE (arg0) == CONST_INT)
6541 && ! ((GET_CODE (arg0) == USE
6542 && GET_CODE (arg1) == USE)
6543 || GET_CODE (arg1) == CONST_INT))
6544 tem = arg0, arg0 = arg1, arg1 = tem;
6545
6546 /* Handle addition of zero, then addition of an invariant. */
6547 if (arg1 == const0_rtx)
6548 return arg0;
6549 else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE)
6550 switch (GET_CODE (arg0))
6551 {
6552 case CONST_INT:
6553 case USE:
6554 /* Adding two invariants must result in an invariant, so enclose
6555 addition operation inside a USE and return it. */
6556 if (GET_CODE (arg0) == USE)
6557 arg0 = XEXP (arg0, 0);
6558 if (GET_CODE (arg1) == USE)
6559 arg1 = XEXP (arg1, 0);
6560
6561 if (GET_CODE (arg0) == CONST_INT)
6562 tem = arg0, arg0 = arg1, arg1 = tem;
6563 if (GET_CODE (arg1) == CONST_INT)
6564 tem = sge_plus_constant (arg0, arg1);
6565 else
6566 tem = sge_plus (mode, arg0, arg1);
6567
6568 if (GET_CODE (tem) != CONST_INT)
6569 tem = gen_rtx_USE (mode, tem);
6570 return tem;
6571
6572 case REG:
6573 case MULT:
6574 /* biv + invar or mult + invar. Return sum. */
6575 return gen_rtx_PLUS (mode, arg0, arg1);
6576
6577 case PLUS:
6578 /* (a + invar_1) + invar_2. Associate. */
6579 return
6580 simplify_giv_expr (loop,
6581 gen_rtx_PLUS (mode,
6582 XEXP (arg0, 0),
6583 gen_rtx_PLUS (mode,
6584 XEXP (arg0, 1),
6585 arg1)),
6586 ext_val, benefit);
6587
6588 default:
6589 abort ();
6590 }
6591
6592 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
6593 MULT to reduce cases. */
6594 if (REG_P (arg0))
6595 arg0 = gen_rtx_MULT (mode, arg0, const1_rtx);
6596 if (REG_P (arg1))
6597 arg1 = gen_rtx_MULT (mode, arg1, const1_rtx);
6598
6599 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
6600 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
6601 Recurse to associate the second PLUS. */
6602 if (GET_CODE (arg1) == MULT)
6603 tem = arg0, arg0 = arg1, arg1 = tem;
6604
6605 if (GET_CODE (arg1) == PLUS)
6606 return
6607 simplify_giv_expr (loop,
6608 gen_rtx_PLUS (mode,
6609 gen_rtx_PLUS (mode, arg0,
6610 XEXP (arg1, 0)),
6611 XEXP (arg1, 1)),
6612 ext_val, benefit);
6613
6614 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
6615 if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT)
6616 return NULL_RTX;
6617
6618 if (!rtx_equal_p (arg0, arg1))
6619 return NULL_RTX;
6620
6621 return simplify_giv_expr (loop,
6622 gen_rtx_MULT (mode,
6623 XEXP (arg0, 0),
6624 gen_rtx_PLUS (mode,
6625 XEXP (arg0, 1),
6626 XEXP (arg1, 1))),
6627 ext_val, benefit);
6628
6629 case MINUS:
6630 /* Handle "a - b" as "a + b * (-1)". */
6631 return simplify_giv_expr (loop,
6632 gen_rtx_PLUS (mode,
6633 XEXP (x, 0),
6634 gen_rtx_MULT (mode,
6635 XEXP (x, 1),
6636 constm1_rtx)),
6637 ext_val, benefit);
6638
6639 case MULT:
6640 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6641 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
6642 if (arg0 == 0 || arg1 == 0)
6643 return NULL_RTX;
6644
6645 /* Put constant last, CONST_INT last if both constant. */
6646 if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT)
6647 && GET_CODE (arg1) != CONST_INT)
6648 tem = arg0, arg0 = arg1, arg1 = tem;
6649
6650 /* If second argument is not now constant, not giv. */
6651 if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT)
6652 return NULL_RTX;
6653
6654 /* Handle multiply by 0 or 1. */
6655 if (arg1 == const0_rtx)
6656 return const0_rtx;
6657
6658 else if (arg1 == const1_rtx)
6659 return arg0;
6660
6661 switch (GET_CODE (arg0))
6662 {
6663 case REG:
6664 /* biv * invar. Done. */
6665 return gen_rtx_MULT (mode, arg0, arg1);
6666
6667 case CONST_INT:
6668 /* Product of two constants. */
6669 return GEN_INT (INTVAL (arg0) * INTVAL (arg1));
6670
6671 case USE:
6672 /* invar * invar is a giv, but attempt to simplify it somehow. */
6673 if (GET_CODE (arg1) != CONST_INT)
6674 return NULL_RTX;
6675
6676 arg0 = XEXP (arg0, 0);
6677 if (GET_CODE (arg0) == MULT)
6678 {
6679 /* (invar_0 * invar_1) * invar_2. Associate. */
6680 return simplify_giv_expr (loop,
6681 gen_rtx_MULT (mode,
6682 XEXP (arg0, 0),
6683 gen_rtx_MULT (mode,
6684 XEXP (arg0,
6685 1),
6686 arg1)),
6687 ext_val, benefit);
6688 }
6689 /* Propagate the MULT expressions to the innermost nodes. */
6690 else if (GET_CODE (arg0) == PLUS)
6691 {
6692 /* (invar_0 + invar_1) * invar_2. Distribute. */
6693 return simplify_giv_expr (loop,
6694 gen_rtx_PLUS (mode,
6695 gen_rtx_MULT (mode,
6696 XEXP (arg0,
6697 0),
6698 arg1),
6699 gen_rtx_MULT (mode,
6700 XEXP (arg0,
6701 1),
6702 arg1)),
6703 ext_val, benefit);
6704 }
6705 return gen_rtx_USE (mode, gen_rtx_MULT (mode, arg0, arg1));
6706
6707 case MULT:
6708 /* (a * invar_1) * invar_2. Associate. */
6709 return simplify_giv_expr (loop,
6710 gen_rtx_MULT (mode,
6711 XEXP (arg0, 0),
6712 gen_rtx_MULT (mode,
6713 XEXP (arg0, 1),
6714 arg1)),
6715 ext_val, benefit);
6716
6717 case PLUS:
6718 /* (a + invar_1) * invar_2. Distribute. */
6719 return simplify_giv_expr (loop,
6720 gen_rtx_PLUS (mode,
6721 gen_rtx_MULT (mode,
6722 XEXP (arg0, 0),
6723 arg1),
6724 gen_rtx_MULT (mode,
6725 XEXP (arg0, 1),
6726 arg1)),
6727 ext_val, benefit);
6728
6729 default:
6730 abort ();
6731 }
6732
6733 case ASHIFT:
6734 /* Shift by constant is multiply by power of two. */
6735 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
6736 return 0;
6737
6738 return
6739 simplify_giv_expr (loop,
6740 gen_rtx_MULT (mode,
6741 XEXP (x, 0),
6742 GEN_INT ((HOST_WIDE_INT) 1
6743 << INTVAL (XEXP (x, 1)))),
6744 ext_val, benefit);
6745
6746 case NEG:
6747 /* "-a" is "a * (-1)" */
6748 return simplify_giv_expr (loop,
6749 gen_rtx_MULT (mode, XEXP (x, 0), constm1_rtx),
6750 ext_val, benefit);
6751
6752 case NOT:
6753 /* "~a" is "-a - 1". Silly, but easy. */
6754 return simplify_giv_expr (loop,
6755 gen_rtx_MINUS (mode,
6756 gen_rtx_NEG (mode, XEXP (x, 0)),
6757 const1_rtx),
6758 ext_val, benefit);
6759
6760 case USE:
6761 /* Already in proper form for invariant. */
6762 return x;
6763
6764 case SIGN_EXTEND:
6765 case ZERO_EXTEND:
6766 case TRUNCATE:
6767 /* Conditionally recognize extensions of simple IVs. After we've
6768 computed loop traversal counts and verified the range of the
6769 source IV, we'll reevaluate this as a GIV. */
6770 if (*ext_val == NULL_RTX)
6771 {
6772 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6773 if (arg0 && *ext_val == NULL_RTX && REG_P (arg0))
6774 {
6775 *ext_val = gen_rtx_fmt_e (GET_CODE (x), mode, arg0);
6776 return arg0;
6777 }
6778 }
6779 goto do_default;
6780
6781 case REG:
6782 /* If this is a new register, we can't deal with it. */
6783 if (REGNO (x) >= max_reg_before_loop)
6784 return 0;
6785
6786 /* Check for biv or giv. */
6787 switch (REG_IV_TYPE (ivs, REGNO (x)))
6788 {
6789 case BASIC_INDUCT:
6790 return x;
6791 case GENERAL_INDUCT:
6792 {
6793 struct induction *v = REG_IV_INFO (ivs, REGNO (x));
6794
6795 /* Form expression from giv and add benefit. Ensure this giv
6796 can derive another and subtract any needed adjustment if so. */
6797
6798 /* Increasing the benefit here is risky. The only case in which it
6799 is arguably correct is if this is the only use of V. In other
6800 cases, this will artificially inflate the benefit of the current
6801 giv, and lead to suboptimal code. Thus, it is disabled, since
6802 potentially not reducing an only marginally beneficial giv is
6803 less harmful than reducing many givs that are not really
6804 beneficial. */
6805 {
6806 rtx single_use = regs->array[REGNO (x)].single_usage;
6807 if (single_use && single_use != const0_rtx)
6808 *benefit += v->benefit;
6809 }
6810
6811 if (v->cant_derive)
6812 return 0;
6813
6814 tem = gen_rtx_PLUS (mode, gen_rtx_MULT (mode,
6815 v->src_reg, v->mult_val),
6816 v->add_val);
6817
6818 if (v->derive_adjustment)
6819 tem = gen_rtx_MINUS (mode, tem, v->derive_adjustment);
6820 arg0 = simplify_giv_expr (loop, tem, ext_val, benefit);
6821 if (*ext_val)
6822 {
6823 if (!v->ext_dependent)
6824 return arg0;
6825 }
6826 else
6827 {
6828 *ext_val = v->ext_dependent;
6829 return arg0;
6830 }
6831 return 0;
6832 }
6833
6834 default:
6835 do_default:
6836 /* If it isn't an induction variable, and it is invariant, we
6837 may be able to simplify things further by looking through
6838 the bits we just moved outside the loop. */
6839 if (loop_invariant_p (loop, x) == 1)
6840 {
6841 struct movable *m;
6842 struct loop_movables *movables = LOOP_MOVABLES (loop);
6843
6844 for (m = movables->head; m; m = m->next)
6845 if (rtx_equal_p (x, m->set_dest))
6846 {
6847 /* Ok, we found a match. Substitute and simplify. */
6848
6849 /* If we match another movable, we must use that, as
6850 this one is going away. */
6851 if (m->match)
6852 return simplify_giv_expr (loop, m->match->set_dest,
6853 ext_val, benefit);
6854
6855 /* If consec is nonzero, this is a member of a group of
6856 instructions that were moved together. We handle this
6857 case only to the point of seeking to the last insn and
6858 looking for a REG_EQUAL. Fail if we don't find one. */
6859 if (m->consec != 0)
6860 {
6861 int i = m->consec;
6862 tem = m->insn;
6863 do
6864 {
6865 tem = NEXT_INSN (tem);
6866 }
6867 while (--i > 0);
6868
6869 tem = find_reg_note (tem, REG_EQUAL, NULL_RTX);
6870 if (tem)
6871 tem = XEXP (tem, 0);
6872 }
6873 else
6874 {
6875 tem = single_set (m->insn);
6876 if (tem)
6877 tem = SET_SRC (tem);
6878 }
6879
6880 if (tem)
6881 {
6882 /* What we are most interested in is pointer
6883 arithmetic on invariants -- only take
6884 patterns we may be able to do something with. */
6885 if (GET_CODE (tem) == PLUS
6886 || GET_CODE (tem) == MULT
6887 || GET_CODE (tem) == ASHIFT
6888 || GET_CODE (tem) == CONST_INT
6889 || GET_CODE (tem) == SYMBOL_REF)
6890 {
6891 tem = simplify_giv_expr (loop, tem, ext_val,
6892 benefit);
6893 if (tem)
6894 return tem;
6895 }
6896 else if (GET_CODE (tem) == CONST
6897 && GET_CODE (XEXP (tem, 0)) == PLUS
6898 && GET_CODE (XEXP (XEXP (tem, 0), 0)) == SYMBOL_REF
6899 && GET_CODE (XEXP (XEXP (tem, 0), 1)) == CONST_INT)
6900 {
6901 tem = simplify_giv_expr (loop, XEXP (tem, 0),
6902 ext_val, benefit);
6903 if (tem)
6904 return tem;
6905 }
6906 }
6907 break;
6908 }
6909 }
6910 break;
6911 }
6912
6913 /* Fall through to general case. */
6914 default:
6915 /* If invariant, return as USE (unless CONST_INT).
6916 Otherwise, not giv. */
6917 if (GET_CODE (x) == USE)
6918 x = XEXP (x, 0);
6919
6920 if (loop_invariant_p (loop, x) == 1)
6921 {
6922 if (GET_CODE (x) == CONST_INT)
6923 return x;
6924 if (GET_CODE (x) == CONST
6925 && GET_CODE (XEXP (x, 0)) == PLUS
6926 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
6927 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
6928 x = XEXP (x, 0);
6929 return gen_rtx_USE (mode, x);
6930 }
6931 else
6932 return 0;
6933 }
6934 }
6935
6936 /* This routine folds invariants such that there is only ever one
6937 CONST_INT in the summation. It is only used by simplify_giv_expr. */
6938
6939 static rtx
6940 sge_plus_constant (rtx x, rtx c)
6941 {
6942 if (GET_CODE (x) == CONST_INT)
6943 return GEN_INT (INTVAL (x) + INTVAL (c));
6944 else if (GET_CODE (x) != PLUS)
6945 return gen_rtx_PLUS (GET_MODE (x), x, c);
6946 else if (GET_CODE (XEXP (x, 1)) == CONST_INT)
6947 {
6948 return gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
6949 GEN_INT (INTVAL (XEXP (x, 1)) + INTVAL (c)));
6950 }
6951 else if (GET_CODE (XEXP (x, 0)) == PLUS
6952 || GET_CODE (XEXP (x, 1)) != PLUS)
6953 {
6954 return gen_rtx_PLUS (GET_MODE (x),
6955 sge_plus_constant (XEXP (x, 0), c), XEXP (x, 1));
6956 }
6957 else
6958 {
6959 return gen_rtx_PLUS (GET_MODE (x),
6960 sge_plus_constant (XEXP (x, 1), c), XEXP (x, 0));
6961 }
6962 }
6963
6964 static rtx
6965 sge_plus (enum machine_mode mode, rtx x, rtx y)
6966 {
6967 while (GET_CODE (y) == PLUS)
6968 {
6969 rtx a = XEXP (y, 0);
6970 if (GET_CODE (a) == CONST_INT)
6971 x = sge_plus_constant (x, a);
6972 else
6973 x = gen_rtx_PLUS (mode, x, a);
6974 y = XEXP (y, 1);
6975 }
6976 if (GET_CODE (y) == CONST_INT)
6977 x = sge_plus_constant (x, y);
6978 else
6979 x = gen_rtx_PLUS (mode, x, y);
6980 return x;
6981 }
6982 \f
6983 /* Help detect a giv that is calculated by several consecutive insns;
6984 for example,
6985 giv = biv * M
6986 giv = giv + A
6987 The caller has already identified the first insn P as having a giv as dest;
6988 we check that all other insns that set the same register follow
6989 immediately after P, that they alter nothing else,
6990 and that the result of the last is still a giv.
6991
6992 The value is 0 if the reg set in P is not really a giv.
6993 Otherwise, the value is the amount gained by eliminating
6994 all the consecutive insns that compute the value.
6995
6996 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
6997 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
6998
6999 The coefficients of the ultimate giv value are stored in
7000 *MULT_VAL and *ADD_VAL. */
7001
7002 static int
7003 consec_sets_giv (const struct loop *loop, int first_benefit, rtx p,
7004 rtx src_reg, rtx dest_reg, rtx *add_val, rtx *mult_val,
7005 rtx *ext_val, rtx *last_consec_insn)
7006 {
7007 struct loop_ivs *ivs = LOOP_IVS (loop);
7008 struct loop_regs *regs = LOOP_REGS (loop);
7009 int count;
7010 enum rtx_code code;
7011 int benefit;
7012 rtx temp;
7013 rtx set;
7014
7015 /* Indicate that this is a giv so that we can update the value produced in
7016 each insn of the multi-insn sequence.
7017
7018 This induction structure will be used only by the call to
7019 general_induction_var below, so we can allocate it on our stack.
7020 If this is a giv, our caller will replace the induct var entry with
7021 a new induction structure. */
7022 struct induction *v;
7023
7024 if (REG_IV_TYPE (ivs, REGNO (dest_reg)) != UNKNOWN_INDUCT)
7025 return 0;
7026
7027 v = alloca (sizeof (struct induction));
7028 v->src_reg = src_reg;
7029 v->mult_val = *mult_val;
7030 v->add_val = *add_val;
7031 v->benefit = first_benefit;
7032 v->cant_derive = 0;
7033 v->derive_adjustment = 0;
7034 v->ext_dependent = NULL_RTX;
7035
7036 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
7037 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
7038
7039 count = regs->array[REGNO (dest_reg)].n_times_set - 1;
7040
7041 while (count > 0)
7042 {
7043 p = NEXT_INSN (p);
7044 code = GET_CODE (p);
7045
7046 /* If libcall, skip to end of call sequence. */
7047 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
7048 p = XEXP (temp, 0);
7049
7050 if (code == INSN
7051 && (set = single_set (p))
7052 && REG_P (SET_DEST (set))
7053 && SET_DEST (set) == dest_reg
7054 && (general_induction_var (loop, SET_SRC (set), &src_reg,
7055 add_val, mult_val, ext_val, 0,
7056 &benefit, VOIDmode)
7057 /* Giv created by equivalent expression. */
7058 || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
7059 && general_induction_var (loop, XEXP (temp, 0), &src_reg,
7060 add_val, mult_val, ext_val, 0,
7061 &benefit, VOIDmode)))
7062 && src_reg == v->src_reg)
7063 {
7064 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
7065 benefit += libcall_benefit (p);
7066
7067 count--;
7068 v->mult_val = *mult_val;
7069 v->add_val = *add_val;
7070 v->benefit += benefit;
7071 }
7072 else if (code != NOTE)
7073 {
7074 /* Allow insns that set something other than this giv to a
7075 constant. Such insns are needed on machines which cannot
7076 include long constants and should not disqualify a giv. */
7077 if (code == INSN
7078 && (set = single_set (p))
7079 && SET_DEST (set) != dest_reg
7080 && CONSTANT_P (SET_SRC (set)))
7081 continue;
7082
7083 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
7084 return 0;
7085 }
7086 }
7087
7088 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
7089 *last_consec_insn = p;
7090 return v->benefit;
7091 }
7092 \f
7093 /* Return an rtx, if any, that expresses giv G2 as a function of the register
7094 represented by G1. If no such expression can be found, or it is clear that
7095 it cannot possibly be a valid address, 0 is returned.
7096
7097 To perform the computation, we note that
7098 G1 = x * v + a and
7099 G2 = y * v + b
7100 where `v' is the biv.
7101
7102 So G2 = (y/b) * G1 + (b - a*y/x).
7103
7104 Note that MULT = y/x.
7105
7106 Update: A and B are now allowed to be additive expressions such that
7107 B contains all variables in A. That is, computing B-A will not require
7108 subtracting variables. */
7109
7110 static rtx
7111 express_from_1 (rtx a, rtx b, rtx mult)
7112 {
7113 /* If MULT is zero, then A*MULT is zero, and our expression is B. */
7114
7115 if (mult == const0_rtx)
7116 return b;
7117
7118 /* If MULT is not 1, we cannot handle A with non-constants, since we
7119 would then be required to subtract multiples of the registers in A.
7120 This is theoretically possible, and may even apply to some Fortran
7121 constructs, but it is a lot of work and we do not attempt it here. */
7122
7123 if (mult != const1_rtx && GET_CODE (a) != CONST_INT)
7124 return NULL_RTX;
7125
7126 /* In general these structures are sorted top to bottom (down the PLUS
7127 chain), but not left to right across the PLUS. If B is a higher
7128 order giv than A, we can strip one level and recurse. If A is higher
7129 order, we'll eventually bail out, but won't know that until the end.
7130 If they are the same, we'll strip one level around this loop. */
7131
7132 while (GET_CODE (a) == PLUS && GET_CODE (b) == PLUS)
7133 {
7134 rtx ra, rb, oa, ob, tmp;
7135
7136 ra = XEXP (a, 0), oa = XEXP (a, 1);
7137 if (GET_CODE (ra) == PLUS)
7138 tmp = ra, ra = oa, oa = tmp;
7139
7140 rb = XEXP (b, 0), ob = XEXP (b, 1);
7141 if (GET_CODE (rb) == PLUS)
7142 tmp = rb, rb = ob, ob = tmp;
7143
7144 if (rtx_equal_p (ra, rb))
7145 /* We matched: remove one reg completely. */
7146 a = oa, b = ob;
7147 else if (GET_CODE (ob) != PLUS && rtx_equal_p (ra, ob))
7148 /* An alternate match. */
7149 a = oa, b = rb;
7150 else if (GET_CODE (oa) != PLUS && rtx_equal_p (oa, rb))
7151 /* An alternate match. */
7152 a = ra, b = ob;
7153 else
7154 {
7155 /* Indicates an extra register in B. Strip one level from B and
7156 recurse, hoping B was the higher order expression. */
7157 ob = express_from_1 (a, ob, mult);
7158 if (ob == NULL_RTX)
7159 return NULL_RTX;
7160 return gen_rtx_PLUS (GET_MODE (b), rb, ob);
7161 }
7162 }
7163
7164 /* Here we are at the last level of A, go through the cases hoping to
7165 get rid of everything but a constant. */
7166
7167 if (GET_CODE (a) == PLUS)
7168 {
7169 rtx ra, oa;
7170
7171 ra = XEXP (a, 0), oa = XEXP (a, 1);
7172 if (rtx_equal_p (oa, b))
7173 oa = ra;
7174 else if (!rtx_equal_p (ra, b))
7175 return NULL_RTX;
7176
7177 if (GET_CODE (oa) != CONST_INT)
7178 return NULL_RTX;
7179
7180 return GEN_INT (-INTVAL (oa) * INTVAL (mult));
7181 }
7182 else if (GET_CODE (a) == CONST_INT)
7183 {
7184 return plus_constant (b, -INTVAL (a) * INTVAL (mult));
7185 }
7186 else if (CONSTANT_P (a))
7187 {
7188 enum machine_mode mode_a = GET_MODE (a);
7189 enum machine_mode mode_b = GET_MODE (b);
7190 enum machine_mode mode = mode_b == VOIDmode ? mode_a : mode_b;
7191 return simplify_gen_binary (MINUS, mode, b, a);
7192 }
7193 else if (GET_CODE (b) == PLUS)
7194 {
7195 if (rtx_equal_p (a, XEXP (b, 0)))
7196 return XEXP (b, 1);
7197 else if (rtx_equal_p (a, XEXP (b, 1)))
7198 return XEXP (b, 0);
7199 else
7200 return NULL_RTX;
7201 }
7202 else if (rtx_equal_p (a, b))
7203 return const0_rtx;
7204
7205 return NULL_RTX;
7206 }
7207
7208 rtx
7209 express_from (struct induction *g1, struct induction *g2)
7210 {
7211 rtx mult, add;
7212
7213 /* The value that G1 will be multiplied by must be a constant integer. Also,
7214 the only chance we have of getting a valid address is if b*c/a (see above
7215 for notation) is also an integer. */
7216 if (GET_CODE (g1->mult_val) == CONST_INT
7217 && GET_CODE (g2->mult_val) == CONST_INT)
7218 {
7219 if (g1->mult_val == const0_rtx
7220 || (g1->mult_val == constm1_rtx
7221 && INTVAL (g2->mult_val)
7222 == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1))
7223 || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0)
7224 return NULL_RTX;
7225 mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val));
7226 }
7227 else if (rtx_equal_p (g1->mult_val, g2->mult_val))
7228 mult = const1_rtx;
7229 else
7230 {
7231 /* ??? Find out if the one is a multiple of the other? */
7232 return NULL_RTX;
7233 }
7234
7235 add = express_from_1 (g1->add_val, g2->add_val, mult);
7236 if (add == NULL_RTX)
7237 {
7238 /* Failed. If we've got a multiplication factor between G1 and G2,
7239 scale G1's addend and try again. */
7240 if (INTVAL (mult) > 1)
7241 {
7242 rtx g1_add_val = g1->add_val;
7243 if (GET_CODE (g1_add_val) == MULT
7244 && GET_CODE (XEXP (g1_add_val, 1)) == CONST_INT)
7245 {
7246 HOST_WIDE_INT m;
7247 m = INTVAL (mult) * INTVAL (XEXP (g1_add_val, 1));
7248 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val),
7249 XEXP (g1_add_val, 0), GEN_INT (m));
7250 }
7251 else
7252 {
7253 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val), g1_add_val,
7254 mult);
7255 }
7256
7257 add = express_from_1 (g1_add_val, g2->add_val, const1_rtx);
7258 }
7259 }
7260 if (add == NULL_RTX)
7261 return NULL_RTX;
7262
7263 /* Form simplified final result. */
7264 if (mult == const0_rtx)
7265 return add;
7266 else if (mult == const1_rtx)
7267 mult = g1->dest_reg;
7268 else
7269 mult = gen_rtx_MULT (g2->mode, g1->dest_reg, mult);
7270
7271 if (add == const0_rtx)
7272 return mult;
7273 else
7274 {
7275 if (GET_CODE (add) == PLUS
7276 && CONSTANT_P (XEXP (add, 1)))
7277 {
7278 rtx tem = XEXP (add, 1);
7279 mult = gen_rtx_PLUS (g2->mode, mult, XEXP (add, 0));
7280 add = tem;
7281 }
7282
7283 return gen_rtx_PLUS (g2->mode, mult, add);
7284 }
7285 }
7286 \f
7287 /* Return an rtx, if any, that expresses giv G2 as a function of the register
7288 represented by G1. This indicates that G2 should be combined with G1 and
7289 that G2 can use (either directly or via an address expression) a register
7290 used to represent G1. */
7291
7292 static rtx
7293 combine_givs_p (struct induction *g1, struct induction *g2)
7294 {
7295 rtx comb, ret;
7296
7297 /* With the introduction of ext dependent givs, we must care for modes.
7298 G2 must not use a wider mode than G1. */
7299 if (GET_MODE_SIZE (g1->mode) < GET_MODE_SIZE (g2->mode))
7300 return NULL_RTX;
7301
7302 ret = comb = express_from (g1, g2);
7303 if (comb == NULL_RTX)
7304 return NULL_RTX;
7305 if (g1->mode != g2->mode)
7306 ret = gen_lowpart (g2->mode, comb);
7307
7308 /* If these givs are identical, they can be combined. We use the results
7309 of express_from because the addends are not in a canonical form, so
7310 rtx_equal_p is a weaker test. */
7311 /* But don't combine a DEST_REG giv with a DEST_ADDR giv; we want the
7312 combination to be the other way round. */
7313 if (comb == g1->dest_reg
7314 && (g1->giv_type == DEST_REG || g2->giv_type == DEST_ADDR))
7315 {
7316 return ret;
7317 }
7318
7319 /* If G2 can be expressed as a function of G1 and that function is valid
7320 as an address and no more expensive than using a register for G2,
7321 the expression of G2 in terms of G1 can be used. */
7322 if (ret != NULL_RTX
7323 && g2->giv_type == DEST_ADDR
7324 && memory_address_p (GET_MODE (g2->mem), ret))
7325 return ret;
7326
7327 return NULL_RTX;
7328 }
7329 \f
7330 /* Check each extension dependent giv in this class to see if its
7331 root biv is safe from wrapping in the interior mode, which would
7332 make the giv illegal. */
7333
7334 static void
7335 check_ext_dependent_givs (const struct loop *loop, struct iv_class *bl)
7336 {
7337 struct loop_info *loop_info = LOOP_INFO (loop);
7338 int ze_ok = 0, se_ok = 0, info_ok = 0;
7339 enum machine_mode biv_mode = GET_MODE (bl->biv->src_reg);
7340 HOST_WIDE_INT start_val;
7341 unsigned HOST_WIDE_INT u_end_val = 0;
7342 unsigned HOST_WIDE_INT u_start_val = 0;
7343 rtx incr = pc_rtx;
7344 struct induction *v;
7345
7346 /* Make sure the iteration data is available. We must have
7347 constants in order to be certain of no overflow. */
7348 if (loop_info->n_iterations > 0
7349 && bl->initial_value
7350 && GET_CODE (bl->initial_value) == CONST_INT
7351 && (incr = biv_total_increment (bl))
7352 && GET_CODE (incr) == CONST_INT
7353 /* Make sure the host can represent the arithmetic. */
7354 && HOST_BITS_PER_WIDE_INT >= GET_MODE_BITSIZE (biv_mode))
7355 {
7356 unsigned HOST_WIDE_INT abs_incr, total_incr;
7357 HOST_WIDE_INT s_end_val;
7358 int neg_incr;
7359
7360 info_ok = 1;
7361 start_val = INTVAL (bl->initial_value);
7362 u_start_val = start_val;
7363
7364 neg_incr = 0, abs_incr = INTVAL (incr);
7365 if (INTVAL (incr) < 0)
7366 neg_incr = 1, abs_incr = -abs_incr;
7367 total_incr = abs_incr * loop_info->n_iterations;
7368
7369 /* Check for host arithmetic overflow. */
7370 if (total_incr / loop_info->n_iterations == abs_incr)
7371 {
7372 unsigned HOST_WIDE_INT u_max;
7373 HOST_WIDE_INT s_max;
7374
7375 u_end_val = start_val + (neg_incr ? -total_incr : total_incr);
7376 s_end_val = u_end_val;
7377 u_max = GET_MODE_MASK (biv_mode);
7378 s_max = u_max >> 1;
7379
7380 /* Check zero extension of biv ok. */
7381 if (start_val >= 0
7382 /* Check for host arithmetic overflow. */
7383 && (neg_incr
7384 ? u_end_val < u_start_val
7385 : u_end_val > u_start_val)
7386 /* Check for target arithmetic overflow. */
7387 && (neg_incr
7388 ? 1 /* taken care of with host overflow */
7389 : u_end_val <= u_max))
7390 {
7391 ze_ok = 1;
7392 }
7393
7394 /* Check sign extension of biv ok. */
7395 /* ??? While it is true that overflow with signed and pointer
7396 arithmetic is undefined, I fear too many programmers don't
7397 keep this fact in mind -- myself included on occasion.
7398 So leave alone with the signed overflow optimizations. */
7399 if (start_val >= -s_max - 1
7400 /* Check for host arithmetic overflow. */
7401 && (neg_incr
7402 ? s_end_val < start_val
7403 : s_end_val > start_val)
7404 /* Check for target arithmetic overflow. */
7405 && (neg_incr
7406 ? s_end_val >= -s_max - 1
7407 : s_end_val <= s_max))
7408 {
7409 se_ok = 1;
7410 }
7411 }
7412 }
7413
7414 /* If we know the BIV is compared at run-time against an
7415 invariant value, and the increment is +/- 1, we may also
7416 be able to prove that the BIV cannot overflow. */
7417 else if (bl->biv->src_reg == loop_info->iteration_var
7418 && loop_info->comparison_value
7419 && loop_invariant_p (loop, loop_info->comparison_value)
7420 && (incr = biv_total_increment (bl))
7421 && GET_CODE (incr) == CONST_INT)
7422 {
7423 /* If the increment is +1, and the exit test is a <,
7424 the BIV cannot overflow. (For <=, we have the
7425 problematic case that the comparison value might
7426 be the maximum value of the range.) */
7427 if (INTVAL (incr) == 1)
7428 {
7429 if (loop_info->comparison_code == LT)
7430 se_ok = ze_ok = 1;
7431 else if (loop_info->comparison_code == LTU)
7432 ze_ok = 1;
7433 }
7434
7435 /* Likewise for increment -1 and exit test >. */
7436 if (INTVAL (incr) == -1)
7437 {
7438 if (loop_info->comparison_code == GT)
7439 se_ok = ze_ok = 1;
7440 else if (loop_info->comparison_code == GTU)
7441 ze_ok = 1;
7442 }
7443 }
7444
7445 /* Invalidate givs that fail the tests. */
7446 for (v = bl->giv; v; v = v->next_iv)
7447 if (v->ext_dependent)
7448 {
7449 enum rtx_code code = GET_CODE (v->ext_dependent);
7450 int ok = 0;
7451
7452 switch (code)
7453 {
7454 case SIGN_EXTEND:
7455 ok = se_ok;
7456 break;
7457 case ZERO_EXTEND:
7458 ok = ze_ok;
7459 break;
7460
7461 case TRUNCATE:
7462 /* We don't know whether this value is being used as either
7463 signed or unsigned, so to safely truncate we must satisfy
7464 both. The initial check here verifies the BIV itself;
7465 once that is successful we may check its range wrt the
7466 derived GIV. This works only if we were able to determine
7467 constant start and end values above. */
7468 if (se_ok && ze_ok && info_ok)
7469 {
7470 enum machine_mode outer_mode = GET_MODE (v->ext_dependent);
7471 unsigned HOST_WIDE_INT max = GET_MODE_MASK (outer_mode) >> 1;
7472
7473 /* We know from the above that both endpoints are nonnegative,
7474 and that there is no wrapping. Verify that both endpoints
7475 are within the (signed) range of the outer mode. */
7476 if (u_start_val <= max && u_end_val <= max)
7477 ok = 1;
7478 }
7479 break;
7480
7481 default:
7482 abort ();
7483 }
7484
7485 if (ok)
7486 {
7487 if (loop_dump_stream)
7488 {
7489 fprintf (loop_dump_stream,
7490 "Verified ext dependent giv at %d of reg %d\n",
7491 INSN_UID (v->insn), bl->regno);
7492 }
7493 }
7494 else
7495 {
7496 if (loop_dump_stream)
7497 {
7498 const char *why;
7499
7500 if (info_ok)
7501 why = "biv iteration values overflowed";
7502 else
7503 {
7504 if (incr == pc_rtx)
7505 incr = biv_total_increment (bl);
7506 if (incr == const1_rtx)
7507 why = "biv iteration info incomplete; incr by 1";
7508 else
7509 why = "biv iteration info incomplete";
7510 }
7511
7512 fprintf (loop_dump_stream,
7513 "Failed ext dependent giv at %d, %s\n",
7514 INSN_UID (v->insn), why);
7515 }
7516 v->ignore = 1;
7517 bl->all_reduced = 0;
7518 }
7519 }
7520 }
7521
7522 /* Generate a version of VALUE in a mode appropriate for initializing V. */
7523
7524 rtx
7525 extend_value_for_giv (struct induction *v, rtx value)
7526 {
7527 rtx ext_dep = v->ext_dependent;
7528
7529 if (! ext_dep)
7530 return value;
7531
7532 /* Recall that check_ext_dependent_givs verified that the known bounds
7533 of a biv did not overflow or wrap with respect to the extension for
7534 the giv. Therefore, constants need no additional adjustment. */
7535 if (CONSTANT_P (value) && GET_MODE (value) == VOIDmode)
7536 return value;
7537
7538 /* Otherwise, we must adjust the value to compensate for the
7539 differing modes of the biv and the giv. */
7540 return gen_rtx_fmt_e (GET_CODE (ext_dep), GET_MODE (ext_dep), value);
7541 }
7542 \f
7543 struct combine_givs_stats
7544 {
7545 int giv_number;
7546 int total_benefit;
7547 };
7548
7549 static int
7550 cmp_combine_givs_stats (const void *xp, const void *yp)
7551 {
7552 const struct combine_givs_stats * const x =
7553 (const struct combine_givs_stats *) xp;
7554 const struct combine_givs_stats * const y =
7555 (const struct combine_givs_stats *) yp;
7556 int d;
7557 d = y->total_benefit - x->total_benefit;
7558 /* Stabilize the sort. */
7559 if (!d)
7560 d = x->giv_number - y->giv_number;
7561 return d;
7562 }
7563
7564 /* Check all pairs of givs for iv_class BL and see if any can be combined with
7565 any other. If so, point SAME to the giv combined with and set NEW_REG to
7566 be an expression (in terms of the other giv's DEST_REG) equivalent to the
7567 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
7568
7569 static void
7570 combine_givs (struct loop_regs *regs, struct iv_class *bl)
7571 {
7572 /* Additional benefit to add for being combined multiple times. */
7573 const int extra_benefit = 3;
7574
7575 struct induction *g1, *g2, **giv_array;
7576 int i, j, k, giv_count;
7577 struct combine_givs_stats *stats;
7578 rtx *can_combine;
7579
7580 /* Count givs, because bl->giv_count is incorrect here. */
7581 giv_count = 0;
7582 for (g1 = bl->giv; g1; g1 = g1->next_iv)
7583 if (!g1->ignore)
7584 giv_count++;
7585
7586 giv_array = alloca (giv_count * sizeof (struct induction *));
7587 i = 0;
7588 for (g1 = bl->giv; g1; g1 = g1->next_iv)
7589 if (!g1->ignore)
7590 giv_array[i++] = g1;
7591
7592 stats = xcalloc (giv_count, sizeof (*stats));
7593 can_combine = xcalloc (giv_count, giv_count * sizeof (rtx));
7594
7595 for (i = 0; i < giv_count; i++)
7596 {
7597 int this_benefit;
7598 rtx single_use;
7599
7600 g1 = giv_array[i];
7601 stats[i].giv_number = i;
7602
7603 /* If a DEST_REG GIV is used only once, do not allow it to combine
7604 with anything, for in doing so we will gain nothing that cannot
7605 be had by simply letting the GIV with which we would have combined
7606 to be reduced on its own. The losage shows up in particular with
7607 DEST_ADDR targets on hosts with reg+reg addressing, though it can
7608 be seen elsewhere as well. */
7609 if (g1->giv_type == DEST_REG
7610 && (single_use = regs->array[REGNO (g1->dest_reg)].single_usage)
7611 && single_use != const0_rtx)
7612 continue;
7613
7614 this_benefit = g1->benefit;
7615 /* Add an additional weight for zero addends. */
7616 if (g1->no_const_addval)
7617 this_benefit += 1;
7618
7619 for (j = 0; j < giv_count; j++)
7620 {
7621 rtx this_combine;
7622
7623 g2 = giv_array[j];
7624 if (g1 != g2
7625 && (this_combine = combine_givs_p (g1, g2)) != NULL_RTX)
7626 {
7627 can_combine[i * giv_count + j] = this_combine;
7628 this_benefit += g2->benefit + extra_benefit;
7629 }
7630 }
7631 stats[i].total_benefit = this_benefit;
7632 }
7633
7634 /* Iterate, combining until we can't. */
7635 restart:
7636 qsort (stats, giv_count, sizeof (*stats), cmp_combine_givs_stats);
7637
7638 if (loop_dump_stream)
7639 {
7640 fprintf (loop_dump_stream, "Sorted combine statistics:\n");
7641 for (k = 0; k < giv_count; k++)
7642 {
7643 g1 = giv_array[stats[k].giv_number];
7644 if (!g1->combined_with && !g1->same)
7645 fprintf (loop_dump_stream, " {%d, %d}",
7646 INSN_UID (giv_array[stats[k].giv_number]->insn),
7647 stats[k].total_benefit);
7648 }
7649 putc ('\n', loop_dump_stream);
7650 }
7651
7652 for (k = 0; k < giv_count; k++)
7653 {
7654 int g1_add_benefit = 0;
7655
7656 i = stats[k].giv_number;
7657 g1 = giv_array[i];
7658
7659 /* If it has already been combined, skip. */
7660 if (g1->combined_with || g1->same)
7661 continue;
7662
7663 for (j = 0; j < giv_count; j++)
7664 {
7665 g2 = giv_array[j];
7666 if (g1 != g2 && can_combine[i * giv_count + j]
7667 /* If it has already been combined, skip. */
7668 && ! g2->same && ! g2->combined_with)
7669 {
7670 int l;
7671
7672 g2->new_reg = can_combine[i * giv_count + j];
7673 g2->same = g1;
7674 /* For destination, we now may replace by mem expression instead
7675 of register. This changes the costs considerably, so add the
7676 compensation. */
7677 if (g2->giv_type == DEST_ADDR)
7678 g2->benefit = (g2->benefit + reg_address_cost
7679 - address_cost (g2->new_reg,
7680 GET_MODE (g2->mem)));
7681 g1->combined_with++;
7682 g1->lifetime += g2->lifetime;
7683
7684 g1_add_benefit += g2->benefit;
7685
7686 /* ??? The new final_[bg]iv_value code does a much better job
7687 of finding replaceable giv's, and hence this code may no
7688 longer be necessary. */
7689 if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg))
7690 g1_add_benefit -= copy_cost;
7691
7692 /* To help optimize the next set of combinations, remove
7693 this giv from the benefits of other potential mates. */
7694 for (l = 0; l < giv_count; ++l)
7695 {
7696 int m = stats[l].giv_number;
7697 if (can_combine[m * giv_count + j])
7698 stats[l].total_benefit -= g2->benefit + extra_benefit;
7699 }
7700
7701 if (loop_dump_stream)
7702 fprintf (loop_dump_stream,
7703 "giv at %d combined with giv at %d; new benefit %d + %d, lifetime %d\n",
7704 INSN_UID (g2->insn), INSN_UID (g1->insn),
7705 g1->benefit, g1_add_benefit, g1->lifetime);
7706 }
7707 }
7708
7709 /* To help optimize the next set of combinations, remove
7710 this giv from the benefits of other potential mates. */
7711 if (g1->combined_with)
7712 {
7713 for (j = 0; j < giv_count; ++j)
7714 {
7715 int m = stats[j].giv_number;
7716 if (can_combine[m * giv_count + i])
7717 stats[j].total_benefit -= g1->benefit + extra_benefit;
7718 }
7719
7720 g1->benefit += g1_add_benefit;
7721
7722 /* We've finished with this giv, and everything it touched.
7723 Restart the combination so that proper weights for the
7724 rest of the givs are properly taken into account. */
7725 /* ??? Ideally we would compact the arrays at this point, so
7726 as to not cover old ground. But sanely compacting
7727 can_combine is tricky. */
7728 goto restart;
7729 }
7730 }
7731
7732 /* Clean up. */
7733 free (stats);
7734 free (can_combine);
7735 }
7736 \f
7737 /* Generate sequence for REG = B * M + A. B is the initial value of
7738 the basic induction variable, M a multiplicative constant, A an
7739 additive constant and REG the destination register. */
7740
7741 static rtx
7742 gen_add_mult (rtx b, rtx m, rtx a, rtx reg)
7743 {
7744 rtx seq;
7745 rtx result;
7746
7747 start_sequence ();
7748 /* Use unsigned arithmetic. */
7749 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
7750 if (reg != result)
7751 emit_move_insn (reg, result);
7752 seq = get_insns ();
7753 end_sequence ();
7754
7755 return seq;
7756 }
7757
7758
7759 /* Update registers created in insn sequence SEQ. */
7760
7761 static void
7762 loop_regs_update (const struct loop *loop ATTRIBUTE_UNUSED, rtx seq)
7763 {
7764 rtx insn;
7765
7766 /* Update register info for alias analysis. */
7767
7768 insn = seq;
7769 while (insn != NULL_RTX)
7770 {
7771 rtx set = single_set (insn);
7772
7773 if (set && REG_P (SET_DEST (set)))
7774 record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0);
7775
7776 insn = NEXT_INSN (insn);
7777 }
7778 }
7779
7780
7781 /* EMIT code before BEFORE_BB/BEFORE_INSN to set REG = B * M + A. B
7782 is the initial value of the basic induction variable, M a
7783 multiplicative constant, A an additive constant and REG the
7784 destination register. */
7785
7786 void
7787 loop_iv_add_mult_emit_before (const struct loop *loop, rtx b, rtx m, rtx a,
7788 rtx reg, basic_block before_bb, rtx before_insn)
7789 {
7790 rtx seq;
7791
7792 if (! before_insn)
7793 {
7794 loop_iv_add_mult_hoist (loop, b, m, a, reg);
7795 return;
7796 }
7797
7798 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7799 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
7800
7801 /* Increase the lifetime of any invariants moved further in code. */
7802 update_reg_last_use (a, before_insn);
7803 update_reg_last_use (b, before_insn);
7804 update_reg_last_use (m, before_insn);
7805
7806 /* It is possible that the expansion created lots of new registers.
7807 Iterate over the sequence we just created and record them all. We
7808 must do this before inserting the sequence. */
7809 loop_regs_update (loop, seq);
7810
7811 loop_insn_emit_before (loop, before_bb, before_insn, seq);
7812 }
7813
7814
7815 /* Emit insns in loop pre-header to set REG = B * M + A. B is the
7816 initial value of the basic induction variable, M a multiplicative
7817 constant, A an additive constant and REG the destination
7818 register. */
7819
7820 void
7821 loop_iv_add_mult_sink (const struct loop *loop, rtx b, rtx m, rtx a, rtx reg)
7822 {
7823 rtx seq;
7824
7825 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7826 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
7827
7828 /* Increase the lifetime of any invariants moved further in code.
7829 ???? Is this really necessary? */
7830 update_reg_last_use (a, loop->sink);
7831 update_reg_last_use (b, loop->sink);
7832 update_reg_last_use (m, loop->sink);
7833
7834 /* It is possible that the expansion created lots of new registers.
7835 Iterate over the sequence we just created and record them all. We
7836 must do this before inserting the sequence. */
7837 loop_regs_update (loop, seq);
7838
7839 loop_insn_sink (loop, seq);
7840 }
7841
7842
7843 /* Emit insns after loop to set REG = B * M + A. B is the initial
7844 value of the basic induction variable, M a multiplicative constant,
7845 A an additive constant and REG the destination register. */
7846
7847 void
7848 loop_iv_add_mult_hoist (const struct loop *loop, rtx b, rtx m, rtx a, rtx reg)
7849 {
7850 rtx seq;
7851
7852 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7853 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
7854
7855 /* It is possible that the expansion created lots of new registers.
7856 Iterate over the sequence we just created and record them all. We
7857 must do this before inserting the sequence. */
7858 loop_regs_update (loop, seq);
7859
7860 loop_insn_hoist (loop, seq);
7861 }
7862
7863
7864
7865 /* Similar to gen_add_mult, but compute cost rather than generating
7866 sequence. */
7867
7868 static int
7869 iv_add_mult_cost (rtx b, rtx m, rtx a, rtx reg)
7870 {
7871 int cost = 0;
7872 rtx last, result;
7873
7874 start_sequence ();
7875 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
7876 if (reg != result)
7877 emit_move_insn (reg, result);
7878 last = get_last_insn ();
7879 while (last)
7880 {
7881 rtx t = single_set (last);
7882 if (t)
7883 cost += rtx_cost (SET_SRC (t), SET);
7884 last = PREV_INSN (last);
7885 }
7886 end_sequence ();
7887 return cost;
7888 }
7889 \f
7890 /* Test whether A * B can be computed without
7891 an actual multiply insn. Value is 1 if so.
7892
7893 ??? This function stinks because it generates a ton of wasted RTL
7894 ??? and as a result fragments GC memory to no end. There are other
7895 ??? places in the compiler which are invoked a lot and do the same
7896 ??? thing, generate wasted RTL just to see if something is possible. */
7897
7898 static int
7899 product_cheap_p (rtx a, rtx b)
7900 {
7901 rtx tmp;
7902 int win, n_insns;
7903
7904 /* If only one is constant, make it B. */
7905 if (GET_CODE (a) == CONST_INT)
7906 tmp = a, a = b, b = tmp;
7907
7908 /* If first constant, both constant, so don't need multiply. */
7909 if (GET_CODE (a) == CONST_INT)
7910 return 1;
7911
7912 /* If second not constant, neither is constant, so would need multiply. */
7913 if (GET_CODE (b) != CONST_INT)
7914 return 0;
7915
7916 /* One operand is constant, so might not need multiply insn. Generate the
7917 code for the multiply and see if a call or multiply, or long sequence
7918 of insns is generated. */
7919
7920 start_sequence ();
7921 expand_mult (GET_MODE (a), a, b, NULL_RTX, 1);
7922 tmp = get_insns ();
7923 end_sequence ();
7924
7925 win = 1;
7926 if (INSN_P (tmp))
7927 {
7928 n_insns = 0;
7929 while (tmp != NULL_RTX)
7930 {
7931 rtx next = NEXT_INSN (tmp);
7932
7933 if (++n_insns > 3
7934 || GET_CODE (tmp) != INSN
7935 || (GET_CODE (PATTERN (tmp)) == SET
7936 && GET_CODE (SET_SRC (PATTERN (tmp))) == MULT)
7937 || (GET_CODE (PATTERN (tmp)) == PARALLEL
7938 && GET_CODE (XVECEXP (PATTERN (tmp), 0, 0)) == SET
7939 && GET_CODE (SET_SRC (XVECEXP (PATTERN (tmp), 0, 0))) == MULT))
7940 {
7941 win = 0;
7942 break;
7943 }
7944
7945 tmp = next;
7946 }
7947 }
7948 else if (GET_CODE (tmp) == SET
7949 && GET_CODE (SET_SRC (tmp)) == MULT)
7950 win = 0;
7951 else if (GET_CODE (tmp) == PARALLEL
7952 && GET_CODE (XVECEXP (tmp, 0, 0)) == SET
7953 && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT)
7954 win = 0;
7955
7956 return win;
7957 }
7958 \f
7959 /* Check to see if loop can be terminated by a "decrement and branch until
7960 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
7961 Also try reversing an increment loop to a decrement loop
7962 to see if the optimization can be performed.
7963 Value is nonzero if optimization was performed. */
7964
7965 /* This is useful even if the architecture doesn't have such an insn,
7966 because it might change a loops which increments from 0 to n to a loop
7967 which decrements from n to 0. A loop that decrements to zero is usually
7968 faster than one that increments from zero. */
7969
7970 /* ??? This could be rewritten to use some of the loop unrolling procedures,
7971 such as approx_final_value, biv_total_increment, loop_iterations, and
7972 final_[bg]iv_value. */
7973
7974 static int
7975 check_dbra_loop (struct loop *loop, int insn_count)
7976 {
7977 struct loop_info *loop_info = LOOP_INFO (loop);
7978 struct loop_regs *regs = LOOP_REGS (loop);
7979 struct loop_ivs *ivs = LOOP_IVS (loop);
7980 struct iv_class *bl;
7981 rtx reg;
7982 enum machine_mode mode;
7983 rtx jump_label;
7984 rtx final_value;
7985 rtx start_value;
7986 rtx new_add_val;
7987 rtx comparison;
7988 rtx before_comparison;
7989 rtx p;
7990 rtx jump;
7991 rtx first_compare;
7992 int compare_and_branch;
7993 rtx loop_start = loop->start;
7994 rtx loop_end = loop->end;
7995
7996 /* If last insn is a conditional branch, and the insn before tests a
7997 register value, try to optimize it. Otherwise, we can't do anything. */
7998
7999 jump = PREV_INSN (loop_end);
8000 comparison = get_condition_for_loop (loop, jump);
8001 if (comparison == 0)
8002 return 0;
8003 if (!onlyjump_p (jump))
8004 return 0;
8005
8006 /* Try to compute whether the compare/branch at the loop end is one or
8007 two instructions. */
8008 get_condition (jump, &first_compare, false);
8009 if (first_compare == jump)
8010 compare_and_branch = 1;
8011 else if (first_compare == prev_nonnote_insn (jump))
8012 compare_and_branch = 2;
8013 else
8014 return 0;
8015
8016 {
8017 /* If more than one condition is present to control the loop, then
8018 do not proceed, as this function does not know how to rewrite
8019 loop tests with more than one condition.
8020
8021 Look backwards from the first insn in the last comparison
8022 sequence and see if we've got another comparison sequence. */
8023
8024 rtx jump1;
8025 if ((jump1 = prev_nonnote_insn (first_compare)) != loop->cont)
8026 if (GET_CODE (jump1) == JUMP_INSN)
8027 return 0;
8028 }
8029
8030 /* Check all of the bivs to see if the compare uses one of them.
8031 Skip biv's set more than once because we can't guarantee that
8032 it will be zero on the last iteration. Also skip if the biv is
8033 used between its update and the test insn. */
8034
8035 for (bl = ivs->list; bl; bl = bl->next)
8036 {
8037 if (bl->biv_count == 1
8038 && ! bl->biv->maybe_multiple
8039 && bl->biv->dest_reg == XEXP (comparison, 0)
8040 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
8041 first_compare))
8042 break;
8043 }
8044
8045 /* Try swapping the comparison to identify a suitable biv. */
8046 if (!bl)
8047 for (bl = ivs->list; bl; bl = bl->next)
8048 if (bl->biv_count == 1
8049 && ! bl->biv->maybe_multiple
8050 && bl->biv->dest_reg == XEXP (comparison, 1)
8051 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
8052 first_compare))
8053 {
8054 comparison = gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)),
8055 VOIDmode,
8056 XEXP (comparison, 1),
8057 XEXP (comparison, 0));
8058 break;
8059 }
8060
8061 if (! bl)
8062 return 0;
8063
8064 /* Look for the case where the basic induction variable is always
8065 nonnegative, and equals zero on the last iteration.
8066 In this case, add a reg_note REG_NONNEG, which allows the
8067 m68k DBRA instruction to be used. */
8068
8069 if (((GET_CODE (comparison) == GT && XEXP (comparison, 1) == constm1_rtx)
8070 || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx))
8071 && GET_CODE (bl->biv->add_val) == CONST_INT
8072 && INTVAL (bl->biv->add_val) < 0)
8073 {
8074 /* Initial value must be greater than 0,
8075 init_val % -dec_value == 0 to ensure that it equals zero on
8076 the last iteration */
8077
8078 if (GET_CODE (bl->initial_value) == CONST_INT
8079 && INTVAL (bl->initial_value) > 0
8080 && (INTVAL (bl->initial_value)
8081 % (-INTVAL (bl->biv->add_val))) == 0)
8082 {
8083 /* Register always nonnegative, add REG_NOTE to branch. */
8084 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
8085 REG_NOTES (jump)
8086 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
8087 REG_NOTES (jump));
8088 bl->nonneg = 1;
8089
8090 return 1;
8091 }
8092
8093 /* If the decrement is 1 and the value was tested as >= 0 before
8094 the loop, then we can safely optimize. */
8095 for (p = loop_start; p; p = PREV_INSN (p))
8096 {
8097 if (GET_CODE (p) == CODE_LABEL)
8098 break;
8099 if (GET_CODE (p) != JUMP_INSN)
8100 continue;
8101
8102 before_comparison = get_condition_for_loop (loop, p);
8103 if (before_comparison
8104 && XEXP (before_comparison, 0) == bl->biv->dest_reg
8105 && (GET_CODE (before_comparison) == LT
8106 || GET_CODE (before_comparison) == LTU)
8107 && XEXP (before_comparison, 1) == const0_rtx
8108 && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start)
8109 && INTVAL (bl->biv->add_val) == -1)
8110 {
8111 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
8112 REG_NOTES (jump)
8113 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
8114 REG_NOTES (jump));
8115 bl->nonneg = 1;
8116
8117 return 1;
8118 }
8119 }
8120 }
8121 else if (GET_CODE (bl->biv->add_val) == CONST_INT
8122 && INTVAL (bl->biv->add_val) > 0)
8123 {
8124 /* Try to change inc to dec, so can apply above optimization. */
8125 /* Can do this if:
8126 all registers modified are induction variables or invariant,
8127 all memory references have non-overlapping addresses
8128 (obviously true if only one write)
8129 allow 2 insns for the compare/jump at the end of the loop. */
8130 /* Also, we must avoid any instructions which use both the reversed
8131 biv and another biv. Such instructions will fail if the loop is
8132 reversed. We meet this condition by requiring that either
8133 no_use_except_counting is true, or else that there is only
8134 one biv. */
8135 int num_nonfixed_reads = 0;
8136 /* 1 if the iteration var is used only to count iterations. */
8137 int no_use_except_counting = 0;
8138 /* 1 if the loop has no memory store, or it has a single memory store
8139 which is reversible. */
8140 int reversible_mem_store = 1;
8141
8142 if (bl->giv_count == 0
8143 && !loop->exit_count
8144 && !loop_info->has_multiple_exit_targets)
8145 {
8146 rtx bivreg = regno_reg_rtx[bl->regno];
8147 struct iv_class *blt;
8148
8149 /* If there are no givs for this biv, and the only exit is the
8150 fall through at the end of the loop, then
8151 see if perhaps there are no uses except to count. */
8152 no_use_except_counting = 1;
8153 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8154 if (INSN_P (p))
8155 {
8156 rtx set = single_set (p);
8157
8158 if (set && REG_P (SET_DEST (set))
8159 && REGNO (SET_DEST (set)) == bl->regno)
8160 /* An insn that sets the biv is okay. */
8161 ;
8162 else if (!reg_mentioned_p (bivreg, PATTERN (p)))
8163 /* An insn that doesn't mention the biv is okay. */
8164 ;
8165 else if (p == prev_nonnote_insn (prev_nonnote_insn (loop_end))
8166 || p == prev_nonnote_insn (loop_end))
8167 {
8168 /* If either of these insns uses the biv and sets a pseudo
8169 that has more than one usage, then the biv has uses
8170 other than counting since it's used to derive a value
8171 that is used more than one time. */
8172 note_stores (PATTERN (p), note_set_pseudo_multiple_uses,
8173 regs);
8174 if (regs->multiple_uses)
8175 {
8176 no_use_except_counting = 0;
8177 break;
8178 }
8179 }
8180 else
8181 {
8182 no_use_except_counting = 0;
8183 break;
8184 }
8185 }
8186
8187 /* A biv has uses besides counting if it is used to set
8188 another biv. */
8189 for (blt = ivs->list; blt; blt = blt->next)
8190 if (blt->init_set
8191 && reg_mentioned_p (bivreg, SET_SRC (blt->init_set)))
8192 {
8193 no_use_except_counting = 0;
8194 break;
8195 }
8196 }
8197
8198 if (no_use_except_counting)
8199 /* No need to worry about MEMs. */
8200 ;
8201 else if (loop_info->num_mem_sets <= 1)
8202 {
8203 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8204 if (INSN_P (p))
8205 num_nonfixed_reads += count_nonfixed_reads (loop, PATTERN (p));
8206
8207 /* If the loop has a single store, and the destination address is
8208 invariant, then we can't reverse the loop, because this address
8209 might then have the wrong value at loop exit.
8210 This would work if the source was invariant also, however, in that
8211 case, the insn should have been moved out of the loop. */
8212
8213 if (loop_info->num_mem_sets == 1)
8214 {
8215 struct induction *v;
8216
8217 /* If we could prove that each of the memory locations
8218 written to was different, then we could reverse the
8219 store -- but we don't presently have any way of
8220 knowing that. */
8221 reversible_mem_store = 0;
8222
8223 /* If the store depends on a register that is set after the
8224 store, it depends on the initial value, and is thus not
8225 reversible. */
8226 for (v = bl->giv; reversible_mem_store && v; v = v->next_iv)
8227 {
8228 if (v->giv_type == DEST_REG
8229 && reg_mentioned_p (v->dest_reg,
8230 PATTERN (loop_info->first_loop_store_insn))
8231 && loop_insn_first_p (loop_info->first_loop_store_insn,
8232 v->insn))
8233 reversible_mem_store = 0;
8234 }
8235 }
8236 }
8237 else
8238 return 0;
8239
8240 /* This code only acts for innermost loops. Also it simplifies
8241 the memory address check by only reversing loops with
8242 zero or one memory access.
8243 Two memory accesses could involve parts of the same array,
8244 and that can't be reversed.
8245 If the biv is used only for counting, than we don't need to worry
8246 about all these things. */
8247
8248 if ((num_nonfixed_reads <= 1
8249 && ! loop_info->has_nonconst_call
8250 && ! loop_info->has_prefetch
8251 && ! loop_info->has_volatile
8252 && reversible_mem_store
8253 && (bl->giv_count + bl->biv_count + loop_info->num_mem_sets
8254 + num_unmoved_movables (loop) + compare_and_branch == insn_count)
8255 && (bl == ivs->list && bl->next == 0))
8256 || (no_use_except_counting && ! loop_info->has_prefetch))
8257 {
8258 rtx tem;
8259
8260 /* Loop can be reversed. */
8261 if (loop_dump_stream)
8262 fprintf (loop_dump_stream, "Can reverse loop\n");
8263
8264 /* Now check other conditions:
8265
8266 The increment must be a constant, as must the initial value,
8267 and the comparison code must be LT.
8268
8269 This test can probably be improved since +/- 1 in the constant
8270 can be obtained by changing LT to LE and vice versa; this is
8271 confusing. */
8272
8273 if (comparison
8274 /* for constants, LE gets turned into LT */
8275 && (GET_CODE (comparison) == LT
8276 || (GET_CODE (comparison) == LE
8277 && no_use_except_counting)
8278 || GET_CODE (comparison) == LTU))
8279 {
8280 HOST_WIDE_INT add_val, add_adjust, comparison_val = 0;
8281 rtx initial_value, comparison_value;
8282 int nonneg = 0;
8283 enum rtx_code cmp_code;
8284 int comparison_const_width;
8285 unsigned HOST_WIDE_INT comparison_sign_mask;
8286
8287 add_val = INTVAL (bl->biv->add_val);
8288 comparison_value = XEXP (comparison, 1);
8289 if (GET_MODE (comparison_value) == VOIDmode)
8290 comparison_const_width
8291 = GET_MODE_BITSIZE (GET_MODE (XEXP (comparison, 0)));
8292 else
8293 comparison_const_width
8294 = GET_MODE_BITSIZE (GET_MODE (comparison_value));
8295 if (comparison_const_width > HOST_BITS_PER_WIDE_INT)
8296 comparison_const_width = HOST_BITS_PER_WIDE_INT;
8297 comparison_sign_mask
8298 = (unsigned HOST_WIDE_INT) 1 << (comparison_const_width - 1);
8299
8300 /* If the comparison value is not a loop invariant, then we
8301 can not reverse this loop.
8302
8303 ??? If the insns which initialize the comparison value as
8304 a whole compute an invariant result, then we could move
8305 them out of the loop and proceed with loop reversal. */
8306 if (! loop_invariant_p (loop, comparison_value))
8307 return 0;
8308
8309 if (GET_CODE (comparison_value) == CONST_INT)
8310 comparison_val = INTVAL (comparison_value);
8311 initial_value = bl->initial_value;
8312
8313 /* Normalize the initial value if it is an integer and
8314 has no other use except as a counter. This will allow
8315 a few more loops to be reversed. */
8316 if (no_use_except_counting
8317 && GET_CODE (comparison_value) == CONST_INT
8318 && GET_CODE (initial_value) == CONST_INT)
8319 {
8320 comparison_val = comparison_val - INTVAL (bl->initial_value);
8321 /* The code below requires comparison_val to be a multiple
8322 of add_val in order to do the loop reversal, so
8323 round up comparison_val to a multiple of add_val.
8324 Since comparison_value is constant, we know that the
8325 current comparison code is LT. */
8326 comparison_val = comparison_val + add_val - 1;
8327 comparison_val
8328 -= (unsigned HOST_WIDE_INT) comparison_val % add_val;
8329 /* We postpone overflow checks for COMPARISON_VAL here;
8330 even if there is an overflow, we might still be able to
8331 reverse the loop, if converting the loop exit test to
8332 NE is possible. */
8333 initial_value = const0_rtx;
8334 }
8335
8336 /* First check if we can do a vanilla loop reversal. */
8337 if (initial_value == const0_rtx
8338 /* If we have a decrement_and_branch_on_count,
8339 prefer the NE test, since this will allow that
8340 instruction to be generated. Note that we must
8341 use a vanilla loop reversal if the biv is used to
8342 calculate a giv or has a non-counting use. */
8343 #if ! defined (HAVE_decrement_and_branch_until_zero) \
8344 && defined (HAVE_decrement_and_branch_on_count)
8345 && (! (add_val == 1 && loop->vtop
8346 && (bl->biv_count == 0
8347 || no_use_except_counting)))
8348 #endif
8349 && GET_CODE (comparison_value) == CONST_INT
8350 /* Now do postponed overflow checks on COMPARISON_VAL. */
8351 && ! (((comparison_val - add_val) ^ INTVAL (comparison_value))
8352 & comparison_sign_mask))
8353 {
8354 /* Register will always be nonnegative, with value
8355 0 on last iteration */
8356 add_adjust = add_val;
8357 nonneg = 1;
8358 cmp_code = GE;
8359 }
8360 else if (add_val == 1 && loop->vtop
8361 && (bl->biv_count == 0
8362 || no_use_except_counting))
8363 {
8364 add_adjust = 0;
8365 cmp_code = NE;
8366 }
8367 else
8368 return 0;
8369
8370 if (GET_CODE (comparison) == LE)
8371 add_adjust -= add_val;
8372
8373 /* If the initial value is not zero, or if the comparison
8374 value is not an exact multiple of the increment, then we
8375 can not reverse this loop. */
8376 if (initial_value == const0_rtx
8377 && GET_CODE (comparison_value) == CONST_INT)
8378 {
8379 if (((unsigned HOST_WIDE_INT) comparison_val % add_val) != 0)
8380 return 0;
8381 }
8382 else
8383 {
8384 if (! no_use_except_counting || add_val != 1)
8385 return 0;
8386 }
8387
8388 final_value = comparison_value;
8389
8390 /* Reset these in case we normalized the initial value
8391 and comparison value above. */
8392 if (GET_CODE (comparison_value) == CONST_INT
8393 && GET_CODE (initial_value) == CONST_INT)
8394 {
8395 comparison_value = GEN_INT (comparison_val);
8396 final_value
8397 = GEN_INT (comparison_val + INTVAL (bl->initial_value));
8398 }
8399 bl->initial_value = initial_value;
8400
8401 /* Save some info needed to produce the new insns. */
8402 reg = bl->biv->dest_reg;
8403 mode = GET_MODE (reg);
8404 jump_label = condjump_label (PREV_INSN (loop_end));
8405 new_add_val = GEN_INT (-INTVAL (bl->biv->add_val));
8406
8407 /* Set start_value; if this is not a CONST_INT, we need
8408 to generate a SUB.
8409 Initialize biv to start_value before loop start.
8410 The old initializing insn will be deleted as a
8411 dead store by flow.c. */
8412 if (initial_value == const0_rtx
8413 && GET_CODE (comparison_value) == CONST_INT)
8414 {
8415 start_value
8416 = gen_int_mode (comparison_val - add_adjust, mode);
8417 loop_insn_hoist (loop, gen_move_insn (reg, start_value));
8418 }
8419 else if (GET_CODE (initial_value) == CONST_INT)
8420 {
8421 rtx offset = GEN_INT (-INTVAL (initial_value) - add_adjust);
8422 rtx add_insn = gen_add3_insn (reg, comparison_value, offset);
8423
8424 if (add_insn == 0)
8425 return 0;
8426
8427 start_value
8428 = gen_rtx_PLUS (mode, comparison_value, offset);
8429 loop_insn_hoist (loop, add_insn);
8430 if (GET_CODE (comparison) == LE)
8431 final_value = gen_rtx_PLUS (mode, comparison_value,
8432 GEN_INT (add_val));
8433 }
8434 else if (! add_adjust)
8435 {
8436 rtx sub_insn = gen_sub3_insn (reg, comparison_value,
8437 initial_value);
8438
8439 if (sub_insn == 0)
8440 return 0;
8441 start_value
8442 = gen_rtx_MINUS (mode, comparison_value, initial_value);
8443 loop_insn_hoist (loop, sub_insn);
8444 }
8445 else
8446 /* We could handle the other cases too, but it'll be
8447 better to have a testcase first. */
8448 return 0;
8449
8450 /* We may not have a single insn which can increment a reg, so
8451 create a sequence to hold all the insns from expand_inc. */
8452 start_sequence ();
8453 expand_inc (reg, new_add_val);
8454 tem = get_insns ();
8455 end_sequence ();
8456
8457 p = loop_insn_emit_before (loop, 0, bl->biv->insn, tem);
8458 delete_insn (bl->biv->insn);
8459
8460 /* Update biv info to reflect its new status. */
8461 bl->biv->insn = p;
8462 bl->initial_value = start_value;
8463 bl->biv->add_val = new_add_val;
8464
8465 /* Update loop info. */
8466 loop_info->initial_value = reg;
8467 loop_info->initial_equiv_value = reg;
8468 loop_info->final_value = const0_rtx;
8469 loop_info->final_equiv_value = const0_rtx;
8470 loop_info->comparison_value = const0_rtx;
8471 loop_info->comparison_code = cmp_code;
8472 loop_info->increment = new_add_val;
8473
8474 /* Inc LABEL_NUSES so that delete_insn will
8475 not delete the label. */
8476 LABEL_NUSES (XEXP (jump_label, 0))++;
8477
8478 /* Emit an insn after the end of the loop to set the biv's
8479 proper exit value if it is used anywhere outside the loop. */
8480 if ((REGNO_LAST_UID (bl->regno) != INSN_UID (first_compare))
8481 || ! bl->init_insn
8482 || REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn))
8483 loop_insn_sink (loop, gen_load_of_final_value (reg, final_value));
8484
8485 /* Delete compare/branch at end of loop. */
8486 delete_related_insns (PREV_INSN (loop_end));
8487 if (compare_and_branch == 2)
8488 delete_related_insns (first_compare);
8489
8490 /* Add new compare/branch insn at end of loop. */
8491 start_sequence ();
8492 emit_cmp_and_jump_insns (reg, const0_rtx, cmp_code, NULL_RTX,
8493 mode, 0,
8494 XEXP (jump_label, 0));
8495 tem = get_insns ();
8496 end_sequence ();
8497 emit_jump_insn_before (tem, loop_end);
8498
8499 for (tem = PREV_INSN (loop_end);
8500 tem && GET_CODE (tem) != JUMP_INSN;
8501 tem = PREV_INSN (tem))
8502 ;
8503
8504 if (tem)
8505 JUMP_LABEL (tem) = XEXP (jump_label, 0);
8506
8507 if (nonneg)
8508 {
8509 if (tem)
8510 {
8511 /* Increment of LABEL_NUSES done above. */
8512 /* Register is now always nonnegative,
8513 so add REG_NONNEG note to the branch. */
8514 REG_NOTES (tem) = gen_rtx_EXPR_LIST (REG_NONNEG, reg,
8515 REG_NOTES (tem));
8516 }
8517 bl->nonneg = 1;
8518 }
8519
8520 /* No insn may reference both the reversed and another biv or it
8521 will fail (see comment near the top of the loop reversal
8522 code).
8523 Earlier on, we have verified that the biv has no use except
8524 counting, or it is the only biv in this function.
8525 However, the code that computes no_use_except_counting does
8526 not verify reg notes. It's possible to have an insn that
8527 references another biv, and has a REG_EQUAL note with an
8528 expression based on the reversed biv. To avoid this case,
8529 remove all REG_EQUAL notes based on the reversed biv
8530 here. */
8531 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8532 if (INSN_P (p))
8533 {
8534 rtx *pnote;
8535 rtx set = single_set (p);
8536 /* If this is a set of a GIV based on the reversed biv, any
8537 REG_EQUAL notes should still be correct. */
8538 if (! set
8539 || !REG_P (SET_DEST (set))
8540 || (size_t) REGNO (SET_DEST (set)) >= ivs->n_regs
8541 || REG_IV_TYPE (ivs, REGNO (SET_DEST (set))) != GENERAL_INDUCT
8542 || REG_IV_INFO (ivs, REGNO (SET_DEST (set)))->src_reg != bl->biv->src_reg)
8543 for (pnote = &REG_NOTES (p); *pnote;)
8544 {
8545 if (REG_NOTE_KIND (*pnote) == REG_EQUAL
8546 && reg_mentioned_p (regno_reg_rtx[bl->regno],
8547 XEXP (*pnote, 0)))
8548 *pnote = XEXP (*pnote, 1);
8549 else
8550 pnote = &XEXP (*pnote, 1);
8551 }
8552 }
8553
8554 /* Mark that this biv has been reversed. Each giv which depends
8555 on this biv, and which is also live past the end of the loop
8556 will have to be fixed up. */
8557
8558 bl->reversed = 1;
8559
8560 if (loop_dump_stream)
8561 {
8562 fprintf (loop_dump_stream, "Reversed loop");
8563 if (bl->nonneg)
8564 fprintf (loop_dump_stream, " and added reg_nonneg\n");
8565 else
8566 fprintf (loop_dump_stream, "\n");
8567 }
8568
8569 return 1;
8570 }
8571 }
8572 }
8573
8574 return 0;
8575 }
8576 \f
8577 /* Verify whether the biv BL appears to be eliminable,
8578 based on the insns in the loop that refer to it.
8579
8580 If ELIMINATE_P is nonzero, actually do the elimination.
8581
8582 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
8583 determine whether invariant insns should be placed inside or at the
8584 start of the loop. */
8585
8586 static int
8587 maybe_eliminate_biv (const struct loop *loop, struct iv_class *bl,
8588 int eliminate_p, int threshold, int insn_count)
8589 {
8590 struct loop_ivs *ivs = LOOP_IVS (loop);
8591 rtx reg = bl->biv->dest_reg;
8592 rtx p;
8593
8594 /* Scan all insns in the loop, stopping if we find one that uses the
8595 biv in a way that we cannot eliminate. */
8596
8597 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
8598 {
8599 enum rtx_code code = GET_CODE (p);
8600 basic_block where_bb = 0;
8601 rtx where_insn = threshold >= insn_count ? 0 : p;
8602 rtx note;
8603
8604 /* If this is a libcall that sets a giv, skip ahead to its end. */
8605 if (INSN_P (p))
8606 {
8607 note = find_reg_note (p, REG_LIBCALL, NULL_RTX);
8608
8609 if (note)
8610 {
8611 rtx last = XEXP (note, 0);
8612 rtx set = single_set (last);
8613
8614 if (set && REG_P (SET_DEST (set)))
8615 {
8616 unsigned int regno = REGNO (SET_DEST (set));
8617
8618 if (regno < ivs->n_regs
8619 && REG_IV_TYPE (ivs, regno) == GENERAL_INDUCT
8620 && REG_IV_INFO (ivs, regno)->src_reg == bl->biv->src_reg)
8621 p = last;
8622 }
8623 }
8624 }
8625
8626 /* Closely examine the insn if the biv is mentioned. */
8627 if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
8628 && reg_mentioned_p (reg, PATTERN (p))
8629 && ! maybe_eliminate_biv_1 (loop, PATTERN (p), p, bl,
8630 eliminate_p, where_bb, where_insn))
8631 {
8632 if (loop_dump_stream)
8633 fprintf (loop_dump_stream,
8634 "Cannot eliminate biv %d: biv used in insn %d.\n",
8635 bl->regno, INSN_UID (p));
8636 break;
8637 }
8638
8639 /* If we are eliminating, kill REG_EQUAL notes mentioning the biv. */
8640 if (eliminate_p
8641 && (note = find_reg_note (p, REG_EQUAL, NULL_RTX)) != NULL_RTX
8642 && reg_mentioned_p (reg, XEXP (note, 0)))
8643 remove_note (p, note);
8644 }
8645
8646 if (p == loop->end)
8647 {
8648 if (loop_dump_stream)
8649 fprintf (loop_dump_stream, "biv %d %s eliminated.\n",
8650 bl->regno, eliminate_p ? "was" : "can be");
8651 return 1;
8652 }
8653
8654 return 0;
8655 }
8656 \f
8657 /* INSN and REFERENCE are instructions in the same insn chain.
8658 Return nonzero if INSN is first. */
8659
8660 int
8661 loop_insn_first_p (rtx insn, rtx reference)
8662 {
8663 rtx p, q;
8664
8665 for (p = insn, q = reference;;)
8666 {
8667 /* Start with test for not first so that INSN == REFERENCE yields not
8668 first. */
8669 if (q == insn || ! p)
8670 return 0;
8671 if (p == reference || ! q)
8672 return 1;
8673
8674 /* Either of P or Q might be a NOTE. Notes have the same LUID as the
8675 previous insn, hence the <= comparison below does not work if
8676 P is a note. */
8677 if (INSN_UID (p) < max_uid_for_loop
8678 && INSN_UID (q) < max_uid_for_loop
8679 && GET_CODE (p) != NOTE)
8680 return INSN_LUID (p) <= INSN_LUID (q);
8681
8682 if (INSN_UID (p) >= max_uid_for_loop
8683 || GET_CODE (p) == NOTE)
8684 p = NEXT_INSN (p);
8685 if (INSN_UID (q) >= max_uid_for_loop)
8686 q = NEXT_INSN (q);
8687 }
8688 }
8689
8690 /* We are trying to eliminate BIV in INSN using GIV. Return nonzero if
8691 the offset that we have to take into account due to auto-increment /
8692 div derivation is zero. */
8693 static int
8694 biv_elimination_giv_has_0_offset (struct induction *biv,
8695 struct induction *giv, rtx insn)
8696 {
8697 /* If the giv V had the auto-inc address optimization applied
8698 to it, and INSN occurs between the giv insn and the biv
8699 insn, then we'd have to adjust the value used here.
8700 This is rare, so we don't bother to make this possible. */
8701 if (giv->auto_inc_opt
8702 && ((loop_insn_first_p (giv->insn, insn)
8703 && loop_insn_first_p (insn, biv->insn))
8704 || (loop_insn_first_p (biv->insn, insn)
8705 && loop_insn_first_p (insn, giv->insn))))
8706 return 0;
8707
8708 return 1;
8709 }
8710
8711 /* If BL appears in X (part of the pattern of INSN), see if we can
8712 eliminate its use. If so, return 1. If not, return 0.
8713
8714 If BIV does not appear in X, return 1.
8715
8716 If ELIMINATE_P is nonzero, actually do the elimination.
8717 WHERE_INSN/WHERE_BB indicate where extra insns should be added.
8718 Depending on how many items have been moved out of the loop, it
8719 will either be before INSN (when WHERE_INSN is nonzero) or at the
8720 start of the loop (when WHERE_INSN is zero). */
8721
8722 static int
8723 maybe_eliminate_biv_1 (const struct loop *loop, rtx x, rtx insn,
8724 struct iv_class *bl, int eliminate_p,
8725 basic_block where_bb, rtx where_insn)
8726 {
8727 enum rtx_code code = GET_CODE (x);
8728 rtx reg = bl->biv->dest_reg;
8729 enum machine_mode mode = GET_MODE (reg);
8730 struct induction *v;
8731 rtx arg, tem;
8732 #ifdef HAVE_cc0
8733 rtx new;
8734 #endif
8735 int arg_operand;
8736 const char *fmt;
8737 int i, j;
8738
8739 switch (code)
8740 {
8741 case REG:
8742 /* If we haven't already been able to do something with this BIV,
8743 we can't eliminate it. */
8744 if (x == reg)
8745 return 0;
8746 return 1;
8747
8748 case SET:
8749 /* If this sets the BIV, it is not a problem. */
8750 if (SET_DEST (x) == reg)
8751 return 1;
8752
8753 /* If this is an insn that defines a giv, it is also ok because
8754 it will go away when the giv is reduced. */
8755 for (v = bl->giv; v; v = v->next_iv)
8756 if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg)
8757 return 1;
8758
8759 #ifdef HAVE_cc0
8760 if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg)
8761 {
8762 /* Can replace with any giv that was reduced and
8763 that has (MULT_VAL != 0) and (ADD_VAL == 0).
8764 Require a constant for MULT_VAL, so we know it's nonzero.
8765 ??? We disable this optimization to avoid potential
8766 overflows. */
8767
8768 for (v = bl->giv; v; v = v->next_iv)
8769 if (GET_CODE (v->mult_val) == CONST_INT && v->mult_val != const0_rtx
8770 && v->add_val == const0_rtx
8771 && ! v->ignore && ! v->maybe_dead && v->always_computable
8772 && v->mode == mode
8773 && 0)
8774 {
8775 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8776 continue;
8777
8778 if (! eliminate_p)
8779 return 1;
8780
8781 /* If the giv has the opposite direction of change,
8782 then reverse the comparison. */
8783 if (INTVAL (v->mult_val) < 0)
8784 new = gen_rtx_COMPARE (GET_MODE (v->new_reg),
8785 const0_rtx, v->new_reg);
8786 else
8787 new = v->new_reg;
8788
8789 /* We can probably test that giv's reduced reg. */
8790 if (validate_change (insn, &SET_SRC (x), new, 0))
8791 return 1;
8792 }
8793
8794 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
8795 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
8796 Require a constant for MULT_VAL, so we know it's nonzero.
8797 ??? Do this only if ADD_VAL is a pointer to avoid a potential
8798 overflow problem. */
8799
8800 for (v = bl->giv; v; v = v->next_iv)
8801 if (GET_CODE (v->mult_val) == CONST_INT
8802 && v->mult_val != const0_rtx
8803 && ! v->ignore && ! v->maybe_dead && v->always_computable
8804 && v->mode == mode
8805 && (GET_CODE (v->add_val) == SYMBOL_REF
8806 || GET_CODE (v->add_val) == LABEL_REF
8807 || GET_CODE (v->add_val) == CONST
8808 || (REG_P (v->add_val)
8809 && REG_POINTER (v->add_val))))
8810 {
8811 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8812 continue;
8813
8814 if (! eliminate_p)
8815 return 1;
8816
8817 /* If the giv has the opposite direction of change,
8818 then reverse the comparison. */
8819 if (INTVAL (v->mult_val) < 0)
8820 new = gen_rtx_COMPARE (VOIDmode, copy_rtx (v->add_val),
8821 v->new_reg);
8822 else
8823 new = gen_rtx_COMPARE (VOIDmode, v->new_reg,
8824 copy_rtx (v->add_val));
8825
8826 /* Replace biv with the giv's reduced register. */
8827 update_reg_last_use (v->add_val, insn);
8828 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
8829 return 1;
8830
8831 /* Insn doesn't support that constant or invariant. Copy it
8832 into a register (it will be a loop invariant.) */
8833 tem = gen_reg_rtx (GET_MODE (v->new_reg));
8834
8835 loop_insn_emit_before (loop, 0, where_insn,
8836 gen_move_insn (tem,
8837 copy_rtx (v->add_val)));
8838
8839 /* Substitute the new register for its invariant value in
8840 the compare expression. */
8841 XEXP (new, (INTVAL (v->mult_val) < 0) ? 0 : 1) = tem;
8842 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
8843 return 1;
8844 }
8845 }
8846 #endif
8847 break;
8848
8849 case COMPARE:
8850 case EQ: case NE:
8851 case GT: case GE: case GTU: case GEU:
8852 case LT: case LE: case LTU: case LEU:
8853 /* See if either argument is the biv. */
8854 if (XEXP (x, 0) == reg)
8855 arg = XEXP (x, 1), arg_operand = 1;
8856 else if (XEXP (x, 1) == reg)
8857 arg = XEXP (x, 0), arg_operand = 0;
8858 else
8859 break;
8860
8861 if (CONSTANT_P (arg))
8862 {
8863 /* First try to replace with any giv that has constant positive
8864 mult_val and constant add_val. We might be able to support
8865 negative mult_val, but it seems complex to do it in general. */
8866
8867 for (v = bl->giv; v; v = v->next_iv)
8868 if (GET_CODE (v->mult_val) == CONST_INT
8869 && INTVAL (v->mult_val) > 0
8870 && (GET_CODE (v->add_val) == SYMBOL_REF
8871 || GET_CODE (v->add_val) == LABEL_REF
8872 || GET_CODE (v->add_val) == CONST
8873 || (REG_P (v->add_val)
8874 && REG_POINTER (v->add_val)))
8875 && ! v->ignore && ! v->maybe_dead && v->always_computable
8876 && v->mode == mode)
8877 {
8878 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8879 continue;
8880
8881 /* Don't eliminate if the linear combination that makes up
8882 the giv overflows when it is applied to ARG. */
8883 if (GET_CODE (arg) == CONST_INT)
8884 {
8885 rtx add_val;
8886
8887 if (GET_CODE (v->add_val) == CONST_INT)
8888 add_val = v->add_val;
8889 else
8890 add_val = const0_rtx;
8891
8892 if (const_mult_add_overflow_p (arg, v->mult_val,
8893 add_val, mode, 1))
8894 continue;
8895 }
8896
8897 if (! eliminate_p)
8898 return 1;
8899
8900 /* Replace biv with the giv's reduced reg. */
8901 validate_change (insn, &XEXP (x, 1 - arg_operand), v->new_reg, 1);
8902
8903 /* If all constants are actually constant integers and
8904 the derived constant can be directly placed in the COMPARE,
8905 do so. */
8906 if (GET_CODE (arg) == CONST_INT
8907 && GET_CODE (v->add_val) == CONST_INT)
8908 {
8909 tem = expand_mult_add (arg, NULL_RTX, v->mult_val,
8910 v->add_val, mode, 1);
8911 }
8912 else
8913 {
8914 /* Otherwise, load it into a register. */
8915 tem = gen_reg_rtx (mode);
8916 loop_iv_add_mult_emit_before (loop, arg,
8917 v->mult_val, v->add_val,
8918 tem, where_bb, where_insn);
8919 }
8920
8921 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8922
8923 if (apply_change_group ())
8924 return 1;
8925 }
8926
8927 /* Look for giv with positive constant mult_val and nonconst add_val.
8928 Insert insns to calculate new compare value.
8929 ??? Turn this off due to possible overflow. */
8930
8931 for (v = bl->giv; v; v = v->next_iv)
8932 if (GET_CODE (v->mult_val) == CONST_INT
8933 && INTVAL (v->mult_val) > 0
8934 && ! v->ignore && ! v->maybe_dead && v->always_computable
8935 && v->mode == mode
8936 && 0)
8937 {
8938 rtx tem;
8939
8940 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8941 continue;
8942
8943 if (! eliminate_p)
8944 return 1;
8945
8946 tem = gen_reg_rtx (mode);
8947
8948 /* Replace biv with giv's reduced register. */
8949 validate_change (insn, &XEXP (x, 1 - arg_operand),
8950 v->new_reg, 1);
8951
8952 /* Compute value to compare against. */
8953 loop_iv_add_mult_emit_before (loop, arg,
8954 v->mult_val, v->add_val,
8955 tem, where_bb, where_insn);
8956 /* Use it in this insn. */
8957 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8958 if (apply_change_group ())
8959 return 1;
8960 }
8961 }
8962 else if (REG_P (arg) || GET_CODE (arg) == MEM)
8963 {
8964 if (loop_invariant_p (loop, arg) == 1)
8965 {
8966 /* Look for giv with constant positive mult_val and nonconst
8967 add_val. Insert insns to compute new compare value.
8968 ??? Turn this off due to possible overflow. */
8969
8970 for (v = bl->giv; v; v = v->next_iv)
8971 if (GET_CODE (v->mult_val) == CONST_INT && INTVAL (v->mult_val) > 0
8972 && ! v->ignore && ! v->maybe_dead && v->always_computable
8973 && v->mode == mode
8974 && 0)
8975 {
8976 rtx tem;
8977
8978 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8979 continue;
8980
8981 if (! eliminate_p)
8982 return 1;
8983
8984 tem = gen_reg_rtx (mode);
8985
8986 /* Replace biv with giv's reduced register. */
8987 validate_change (insn, &XEXP (x, 1 - arg_operand),
8988 v->new_reg, 1);
8989
8990 /* Compute value to compare against. */
8991 loop_iv_add_mult_emit_before (loop, arg,
8992 v->mult_val, v->add_val,
8993 tem, where_bb, where_insn);
8994 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8995 if (apply_change_group ())
8996 return 1;
8997 }
8998 }
8999
9000 /* This code has problems. Basically, you can't know when
9001 seeing if we will eliminate BL, whether a particular giv
9002 of ARG will be reduced. If it isn't going to be reduced,
9003 we can't eliminate BL. We can try forcing it to be reduced,
9004 but that can generate poor code.
9005
9006 The problem is that the benefit of reducing TV, below should
9007 be increased if BL can actually be eliminated, but this means
9008 we might have to do a topological sort of the order in which
9009 we try to process biv. It doesn't seem worthwhile to do
9010 this sort of thing now. */
9011
9012 #if 0
9013 /* Otherwise the reg compared with had better be a biv. */
9014 if (!REG_P (arg)
9015 || REG_IV_TYPE (ivs, REGNO (arg)) != BASIC_INDUCT)
9016 return 0;
9017
9018 /* Look for a pair of givs, one for each biv,
9019 with identical coefficients. */
9020 for (v = bl->giv; v; v = v->next_iv)
9021 {
9022 struct induction *tv;
9023
9024 if (v->ignore || v->maybe_dead || v->mode != mode)
9025 continue;
9026
9027 for (tv = REG_IV_CLASS (ivs, REGNO (arg))->giv; tv;
9028 tv = tv->next_iv)
9029 if (! tv->ignore && ! tv->maybe_dead
9030 && rtx_equal_p (tv->mult_val, v->mult_val)
9031 && rtx_equal_p (tv->add_val, v->add_val)
9032 && tv->mode == mode)
9033 {
9034 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
9035 continue;
9036
9037 if (! eliminate_p)
9038 return 1;
9039
9040 /* Replace biv with its giv's reduced reg. */
9041 XEXP (x, 1 - arg_operand) = v->new_reg;
9042 /* Replace other operand with the other giv's
9043 reduced reg. */
9044 XEXP (x, arg_operand) = tv->new_reg;
9045 return 1;
9046 }
9047 }
9048 #endif
9049 }
9050
9051 /* If we get here, the biv can't be eliminated. */
9052 return 0;
9053
9054 case MEM:
9055 /* If this address is a DEST_ADDR giv, it doesn't matter if the
9056 biv is used in it, since it will be replaced. */
9057 for (v = bl->giv; v; v = v->next_iv)
9058 if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0))
9059 return 1;
9060 break;
9061
9062 default:
9063 break;
9064 }
9065
9066 /* See if any subexpression fails elimination. */
9067 fmt = GET_RTX_FORMAT (code);
9068 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
9069 {
9070 switch (fmt[i])
9071 {
9072 case 'e':
9073 if (! maybe_eliminate_biv_1 (loop, XEXP (x, i), insn, bl,
9074 eliminate_p, where_bb, where_insn))
9075 return 0;
9076 break;
9077
9078 case 'E':
9079 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9080 if (! maybe_eliminate_biv_1 (loop, XVECEXP (x, i, j), insn, bl,
9081 eliminate_p, where_bb, where_insn))
9082 return 0;
9083 break;
9084 }
9085 }
9086
9087 return 1;
9088 }
9089 \f
9090 /* Return nonzero if the last use of REG
9091 is in an insn following INSN in the same basic block. */
9092
9093 static int
9094 last_use_this_basic_block (rtx reg, rtx insn)
9095 {
9096 rtx n;
9097 for (n = insn;
9098 n && GET_CODE (n) != CODE_LABEL && GET_CODE (n) != JUMP_INSN;
9099 n = NEXT_INSN (n))
9100 {
9101 if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (n))
9102 return 1;
9103 }
9104 return 0;
9105 }
9106 \f
9107 /* Called via `note_stores' to record the initial value of a biv. Here we
9108 just record the location of the set and process it later. */
9109
9110 static void
9111 record_initial (rtx dest, rtx set, void *data ATTRIBUTE_UNUSED)
9112 {
9113 struct loop_ivs *ivs = (struct loop_ivs *) data;
9114 struct iv_class *bl;
9115
9116 if (!REG_P (dest)
9117 || REGNO (dest) >= ivs->n_regs
9118 || REG_IV_TYPE (ivs, REGNO (dest)) != BASIC_INDUCT)
9119 return;
9120
9121 bl = REG_IV_CLASS (ivs, REGNO (dest));
9122
9123 /* If this is the first set found, record it. */
9124 if (bl->init_insn == 0)
9125 {
9126 bl->init_insn = note_insn;
9127 bl->init_set = set;
9128 }
9129 }
9130 \f
9131 /* If any of the registers in X are "old" and currently have a last use earlier
9132 than INSN, update them to have a last use of INSN. Their actual last use
9133 will be the previous insn but it will not have a valid uid_luid so we can't
9134 use it. X must be a source expression only. */
9135
9136 static void
9137 update_reg_last_use (rtx x, rtx insn)
9138 {
9139 /* Check for the case where INSN does not have a valid luid. In this case,
9140 there is no need to modify the regno_last_uid, as this can only happen
9141 when code is inserted after the loop_end to set a pseudo's final value,
9142 and hence this insn will never be the last use of x.
9143 ???? This comment is not correct. See for example loop_givs_reduce.
9144 This may insert an insn before another new insn. */
9145 if (REG_P (x) && REGNO (x) < max_reg_before_loop
9146 && INSN_UID (insn) < max_uid_for_loop
9147 && REGNO_LAST_LUID (REGNO (x)) < INSN_LUID (insn))
9148 {
9149 REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn);
9150 }
9151 else
9152 {
9153 int i, j;
9154 const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
9155 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
9156 {
9157 if (fmt[i] == 'e')
9158 update_reg_last_use (XEXP (x, i), insn);
9159 else if (fmt[i] == 'E')
9160 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9161 update_reg_last_use (XVECEXP (x, i, j), insn);
9162 }
9163 }
9164 }
9165 \f
9166 /* Given an insn INSN and condition COND, return the condition in a
9167 canonical form to simplify testing by callers. Specifically:
9168
9169 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
9170 (2) Both operands will be machine operands; (cc0) will have been replaced.
9171 (3) If an operand is a constant, it will be the second operand.
9172 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
9173 for GE, GEU, and LEU.
9174
9175 If the condition cannot be understood, or is an inequality floating-point
9176 comparison which needs to be reversed, 0 will be returned.
9177
9178 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
9179
9180 If EARLIEST is nonzero, it is a pointer to a place where the earliest
9181 insn used in locating the condition was found. If a replacement test
9182 of the condition is desired, it should be placed in front of that
9183 insn and we will be sure that the inputs are still valid.
9184
9185 If WANT_REG is nonzero, we wish the condition to be relative to that
9186 register, if possible. Therefore, do not canonicalize the condition
9187 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
9188 to be a compare to a CC mode register. */
9189
9190 rtx
9191 canonicalize_condition (rtx insn, rtx cond, int reverse, rtx *earliest,
9192 rtx want_reg, int allow_cc_mode)
9193 {
9194 enum rtx_code code;
9195 rtx prev = insn;
9196 rtx set;
9197 rtx tem;
9198 rtx op0, op1;
9199 int reverse_code = 0;
9200 enum machine_mode mode;
9201
9202 code = GET_CODE (cond);
9203 mode = GET_MODE (cond);
9204 op0 = XEXP (cond, 0);
9205 op1 = XEXP (cond, 1);
9206
9207 if (reverse)
9208 code = reversed_comparison_code (cond, insn);
9209 if (code == UNKNOWN)
9210 return 0;
9211
9212 if (earliest)
9213 *earliest = insn;
9214
9215 /* If we are comparing a register with zero, see if the register is set
9216 in the previous insn to a COMPARE or a comparison operation. Perform
9217 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
9218 in cse.c */
9219
9220 while ((GET_RTX_CLASS (code) == RTX_COMPARE
9221 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
9222 && op1 == CONST0_RTX (GET_MODE (op0))
9223 && op0 != want_reg)
9224 {
9225 /* Set nonzero when we find something of interest. */
9226 rtx x = 0;
9227
9228 #ifdef HAVE_cc0
9229 /* If comparison with cc0, import actual comparison from compare
9230 insn. */
9231 if (op0 == cc0_rtx)
9232 {
9233 if ((prev = prev_nonnote_insn (prev)) == 0
9234 || GET_CODE (prev) != INSN
9235 || (set = single_set (prev)) == 0
9236 || SET_DEST (set) != cc0_rtx)
9237 return 0;
9238
9239 op0 = SET_SRC (set);
9240 op1 = CONST0_RTX (GET_MODE (op0));
9241 if (earliest)
9242 *earliest = prev;
9243 }
9244 #endif
9245
9246 /* If this is a COMPARE, pick up the two things being compared. */
9247 if (GET_CODE (op0) == COMPARE)
9248 {
9249 op1 = XEXP (op0, 1);
9250 op0 = XEXP (op0, 0);
9251 continue;
9252 }
9253 else if (!REG_P (op0))
9254 break;
9255
9256 /* Go back to the previous insn. Stop if it is not an INSN. We also
9257 stop if it isn't a single set or if it has a REG_INC note because
9258 we don't want to bother dealing with it. */
9259
9260 if ((prev = prev_nonnote_insn (prev)) == 0
9261 || GET_CODE (prev) != INSN
9262 || FIND_REG_INC_NOTE (prev, NULL_RTX))
9263 break;
9264
9265 set = set_of (op0, prev);
9266
9267 if (set
9268 && (GET_CODE (set) != SET
9269 || !rtx_equal_p (SET_DEST (set), op0)))
9270 break;
9271
9272 /* If this is setting OP0, get what it sets it to if it looks
9273 relevant. */
9274 if (set)
9275 {
9276 enum machine_mode inner_mode = GET_MODE (SET_DEST (set));
9277 #ifdef FLOAT_STORE_FLAG_VALUE
9278 REAL_VALUE_TYPE fsfv;
9279 #endif
9280
9281 /* ??? We may not combine comparisons done in a CCmode with
9282 comparisons not done in a CCmode. This is to aid targets
9283 like Alpha that have an IEEE compliant EQ instruction, and
9284 a non-IEEE compliant BEQ instruction. The use of CCmode is
9285 actually artificial, simply to prevent the combination, but
9286 should not affect other platforms.
9287
9288 However, we must allow VOIDmode comparisons to match either
9289 CCmode or non-CCmode comparison, because some ports have
9290 modeless comparisons inside branch patterns.
9291
9292 ??? This mode check should perhaps look more like the mode check
9293 in simplify_comparison in combine. */
9294
9295 if ((GET_CODE (SET_SRC (set)) == COMPARE
9296 || (((code == NE
9297 || (code == LT
9298 && GET_MODE_CLASS (inner_mode) == MODE_INT
9299 && (GET_MODE_BITSIZE (inner_mode)
9300 <= HOST_BITS_PER_WIDE_INT)
9301 && (STORE_FLAG_VALUE
9302 & ((HOST_WIDE_INT) 1
9303 << (GET_MODE_BITSIZE (inner_mode) - 1))))
9304 #ifdef FLOAT_STORE_FLAG_VALUE
9305 || (code == LT
9306 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
9307 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
9308 REAL_VALUE_NEGATIVE (fsfv)))
9309 #endif
9310 ))
9311 && COMPARISON_P (SET_SRC (set))))
9312 && (((GET_MODE_CLASS (mode) == MODE_CC)
9313 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
9314 || mode == VOIDmode || inner_mode == VOIDmode))
9315 x = SET_SRC (set);
9316 else if (((code == EQ
9317 || (code == GE
9318 && (GET_MODE_BITSIZE (inner_mode)
9319 <= HOST_BITS_PER_WIDE_INT)
9320 && GET_MODE_CLASS (inner_mode) == MODE_INT
9321 && (STORE_FLAG_VALUE
9322 & ((HOST_WIDE_INT) 1
9323 << (GET_MODE_BITSIZE (inner_mode) - 1))))
9324 #ifdef FLOAT_STORE_FLAG_VALUE
9325 || (code == GE
9326 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
9327 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
9328 REAL_VALUE_NEGATIVE (fsfv)))
9329 #endif
9330 ))
9331 && COMPARISON_P (SET_SRC (set))
9332 && (((GET_MODE_CLASS (mode) == MODE_CC)
9333 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
9334 || mode == VOIDmode || inner_mode == VOIDmode))
9335
9336 {
9337 reverse_code = 1;
9338 x = SET_SRC (set);
9339 }
9340 else
9341 break;
9342 }
9343
9344 else if (reg_set_p (op0, prev))
9345 /* If this sets OP0, but not directly, we have to give up. */
9346 break;
9347
9348 if (x)
9349 {
9350 if (COMPARISON_P (x))
9351 code = GET_CODE (x);
9352 if (reverse_code)
9353 {
9354 code = reversed_comparison_code (x, prev);
9355 if (code == UNKNOWN)
9356 return 0;
9357 reverse_code = 0;
9358 }
9359
9360 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
9361 if (earliest)
9362 *earliest = prev;
9363 }
9364 }
9365
9366 /* If constant is first, put it last. */
9367 if (CONSTANT_P (op0))
9368 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
9369
9370 /* If OP0 is the result of a comparison, we weren't able to find what
9371 was really being compared, so fail. */
9372 if (!allow_cc_mode
9373 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
9374 return 0;
9375
9376 /* Canonicalize any ordered comparison with integers involving equality
9377 if we can do computations in the relevant mode and we do not
9378 overflow. */
9379
9380 if (GET_MODE_CLASS (GET_MODE (op0)) != MODE_CC
9381 && GET_CODE (op1) == CONST_INT
9382 && GET_MODE (op0) != VOIDmode
9383 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
9384 {
9385 HOST_WIDE_INT const_val = INTVAL (op1);
9386 unsigned HOST_WIDE_INT uconst_val = const_val;
9387 unsigned HOST_WIDE_INT max_val
9388 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
9389
9390 switch (code)
9391 {
9392 case LE:
9393 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
9394 code = LT, op1 = gen_int_mode (const_val + 1, GET_MODE (op0));
9395 break;
9396
9397 /* When cross-compiling, const_val might be sign-extended from
9398 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
9399 case GE:
9400 if ((HOST_WIDE_INT) (const_val & max_val)
9401 != (((HOST_WIDE_INT) 1
9402 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
9403 code = GT, op1 = gen_int_mode (const_val - 1, GET_MODE (op0));
9404 break;
9405
9406 case LEU:
9407 if (uconst_val < max_val)
9408 code = LTU, op1 = gen_int_mode (uconst_val + 1, GET_MODE (op0));
9409 break;
9410
9411 case GEU:
9412 if (uconst_val != 0)
9413 code = GTU, op1 = gen_int_mode (uconst_val - 1, GET_MODE (op0));
9414 break;
9415
9416 default:
9417 break;
9418 }
9419 }
9420
9421 /* Never return CC0; return zero instead. */
9422 if (CC0_P (op0))
9423 return 0;
9424
9425 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
9426 }
9427
9428 /* Given a jump insn JUMP, return the condition that will cause it to branch
9429 to its JUMP_LABEL. If the condition cannot be understood, or is an
9430 inequality floating-point comparison which needs to be reversed, 0 will
9431 be returned.
9432
9433 If EARLIEST is nonzero, it is a pointer to a place where the earliest
9434 insn used in locating the condition was found. If a replacement test
9435 of the condition is desired, it should be placed in front of that
9436 insn and we will be sure that the inputs are still valid.
9437
9438 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
9439 compare CC mode register. */
9440
9441 rtx
9442 get_condition (rtx jump, rtx *earliest, int allow_cc_mode)
9443 {
9444 rtx cond;
9445 int reverse;
9446 rtx set;
9447
9448 /* If this is not a standard conditional jump, we can't parse it. */
9449 if (GET_CODE (jump) != JUMP_INSN
9450 || ! any_condjump_p (jump))
9451 return 0;
9452 set = pc_set (jump);
9453
9454 cond = XEXP (SET_SRC (set), 0);
9455
9456 /* If this branches to JUMP_LABEL when the condition is false, reverse
9457 the condition. */
9458 reverse
9459 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
9460 && XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (jump);
9461
9462 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX,
9463 allow_cc_mode);
9464 }
9465
9466 /* Similar to above routine, except that we also put an invariant last
9467 unless both operands are invariants. */
9468
9469 rtx
9470 get_condition_for_loop (const struct loop *loop, rtx x)
9471 {
9472 rtx comparison = get_condition (x, (rtx*) 0, false);
9473
9474 if (comparison == 0
9475 || ! loop_invariant_p (loop, XEXP (comparison, 0))
9476 || loop_invariant_p (loop, XEXP (comparison, 1)))
9477 return comparison;
9478
9479 return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)), VOIDmode,
9480 XEXP (comparison, 1), XEXP (comparison, 0));
9481 }
9482
9483 /* Scan the function and determine whether it has indirect (computed) jumps.
9484
9485 This is taken mostly from flow.c; similar code exists elsewhere
9486 in the compiler. It may be useful to put this into rtlanal.c. */
9487 static int
9488 indirect_jump_in_function_p (rtx start)
9489 {
9490 rtx insn;
9491
9492 for (insn = start; insn; insn = NEXT_INSN (insn))
9493 if (computed_jump_p (insn))
9494 return 1;
9495
9496 return 0;
9497 }
9498
9499 /* Add MEM to the LOOP_MEMS array, if appropriate. See the
9500 documentation for LOOP_MEMS for the definition of `appropriate'.
9501 This function is called from prescan_loop via for_each_rtx. */
9502
9503 static int
9504 insert_loop_mem (rtx *mem, void *data ATTRIBUTE_UNUSED)
9505 {
9506 struct loop_info *loop_info = data;
9507 int i;
9508 rtx m = *mem;
9509
9510 if (m == NULL_RTX)
9511 return 0;
9512
9513 switch (GET_CODE (m))
9514 {
9515 case MEM:
9516 break;
9517
9518 case CLOBBER:
9519 /* We're not interested in MEMs that are only clobbered. */
9520 return -1;
9521
9522 case CONST_DOUBLE:
9523 /* We're not interested in the MEM associated with a
9524 CONST_DOUBLE, so there's no need to traverse into this. */
9525 return -1;
9526
9527 case EXPR_LIST:
9528 /* We're not interested in any MEMs that only appear in notes. */
9529 return -1;
9530
9531 default:
9532 /* This is not a MEM. */
9533 return 0;
9534 }
9535
9536 /* See if we've already seen this MEM. */
9537 for (i = 0; i < loop_info->mems_idx; ++i)
9538 if (rtx_equal_p (m, loop_info->mems[i].mem))
9539 {
9540 if (MEM_VOLATILE_P (m) && !MEM_VOLATILE_P (loop_info->mems[i].mem))
9541 loop_info->mems[i].mem = m;
9542 if (GET_MODE (m) != GET_MODE (loop_info->mems[i].mem))
9543 /* The modes of the two memory accesses are different. If
9544 this happens, something tricky is going on, and we just
9545 don't optimize accesses to this MEM. */
9546 loop_info->mems[i].optimize = 0;
9547
9548 return 0;
9549 }
9550
9551 /* Resize the array, if necessary. */
9552 if (loop_info->mems_idx == loop_info->mems_allocated)
9553 {
9554 if (loop_info->mems_allocated != 0)
9555 loop_info->mems_allocated *= 2;
9556 else
9557 loop_info->mems_allocated = 32;
9558
9559 loop_info->mems = xrealloc (loop_info->mems,
9560 loop_info->mems_allocated * sizeof (loop_mem_info));
9561 }
9562
9563 /* Actually insert the MEM. */
9564 loop_info->mems[loop_info->mems_idx].mem = m;
9565 /* We can't hoist this MEM out of the loop if it's a BLKmode MEM
9566 because we can't put it in a register. We still store it in the
9567 table, though, so that if we see the same address later, but in a
9568 non-BLK mode, we'll not think we can optimize it at that point. */
9569 loop_info->mems[loop_info->mems_idx].optimize = (GET_MODE (m) != BLKmode);
9570 loop_info->mems[loop_info->mems_idx].reg = NULL_RTX;
9571 ++loop_info->mems_idx;
9572
9573 return 0;
9574 }
9575
9576
9577 /* Allocate REGS->ARRAY or reallocate it if it is too small.
9578
9579 Increment REGS->ARRAY[I].SET_IN_LOOP at the index I of each
9580 register that is modified by an insn between FROM and TO. If the
9581 value of an element of REGS->array[I].SET_IN_LOOP becomes 127 or
9582 more, stop incrementing it, to avoid overflow.
9583
9584 Store in REGS->ARRAY[I].SINGLE_USAGE the single insn in which
9585 register I is used, if it is only used once. Otherwise, it is set
9586 to 0 (for no uses) or const0_rtx for more than one use. This
9587 parameter may be zero, in which case this processing is not done.
9588
9589 Set REGS->ARRAY[I].MAY_NOT_OPTIMIZE nonzero if we should not
9590 optimize register I. */
9591
9592 static void
9593 loop_regs_scan (const struct loop *loop, int extra_size)
9594 {
9595 struct loop_regs *regs = LOOP_REGS (loop);
9596 int old_nregs;
9597 /* last_set[n] is nonzero iff reg n has been set in the current
9598 basic block. In that case, it is the insn that last set reg n. */
9599 rtx *last_set;
9600 rtx insn;
9601 int i;
9602
9603 old_nregs = regs->num;
9604 regs->num = max_reg_num ();
9605
9606 /* Grow the regs array if not allocated or too small. */
9607 if (regs->num >= regs->size)
9608 {
9609 regs->size = regs->num + extra_size;
9610
9611 regs->array = xrealloc (regs->array, regs->size * sizeof (*regs->array));
9612
9613 /* Zero the new elements. */
9614 memset (regs->array + old_nregs, 0,
9615 (regs->size - old_nregs) * sizeof (*regs->array));
9616 }
9617
9618 /* Clear previously scanned fields but do not clear n_times_set. */
9619 for (i = 0; i < old_nregs; i++)
9620 {
9621 regs->array[i].set_in_loop = 0;
9622 regs->array[i].may_not_optimize = 0;
9623 regs->array[i].single_usage = NULL_RTX;
9624 }
9625
9626 last_set = xcalloc (regs->num, sizeof (rtx));
9627
9628 /* Scan the loop, recording register usage. */
9629 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
9630 insn = NEXT_INSN (insn))
9631 {
9632 if (INSN_P (insn))
9633 {
9634 /* Record registers that have exactly one use. */
9635 find_single_use_in_loop (regs, insn, PATTERN (insn));
9636
9637 /* Include uses in REG_EQUAL notes. */
9638 if (REG_NOTES (insn))
9639 find_single_use_in_loop (regs, insn, REG_NOTES (insn));
9640
9641 if (GET_CODE (PATTERN (insn)) == SET
9642 || GET_CODE (PATTERN (insn)) == CLOBBER)
9643 count_one_set (regs, insn, PATTERN (insn), last_set);
9644 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
9645 {
9646 int i;
9647 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
9648 count_one_set (regs, insn, XVECEXP (PATTERN (insn), 0, i),
9649 last_set);
9650 }
9651 }
9652
9653 if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
9654 memset (last_set, 0, regs->num * sizeof (rtx));
9655
9656 /* Invalidate all registers used for function argument passing.
9657 We check rtx_varies_p for the same reason as below, to allow
9658 optimizing PIC calculations. */
9659 if (GET_CODE (insn) == CALL_INSN)
9660 {
9661 rtx link;
9662 for (link = CALL_INSN_FUNCTION_USAGE (insn);
9663 link;
9664 link = XEXP (link, 1))
9665 {
9666 rtx op, reg;
9667
9668 if (GET_CODE (op = XEXP (link, 0)) == USE
9669 && REG_P (reg = XEXP (op, 0))
9670 && rtx_varies_p (reg, 1))
9671 regs->array[REGNO (reg)].may_not_optimize = 1;
9672 }
9673 }
9674 }
9675
9676 /* Invalidate all hard registers clobbered by calls. With one exception:
9677 a call-clobbered PIC register is still function-invariant for our
9678 purposes, since we can hoist any PIC calculations out of the loop.
9679 Thus the call to rtx_varies_p. */
9680 if (LOOP_INFO (loop)->has_call)
9681 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
9682 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i)
9683 && rtx_varies_p (regno_reg_rtx[i], 1))
9684 {
9685 regs->array[i].may_not_optimize = 1;
9686 regs->array[i].set_in_loop = 1;
9687 }
9688
9689 #ifdef AVOID_CCMODE_COPIES
9690 /* Don't try to move insns which set CC registers if we should not
9691 create CCmode register copies. */
9692 for (i = regs->num - 1; i >= FIRST_PSEUDO_REGISTER; i--)
9693 if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC)
9694 regs->array[i].may_not_optimize = 1;
9695 #endif
9696
9697 /* Set regs->array[I].n_times_set for the new registers. */
9698 for (i = old_nregs; i < regs->num; i++)
9699 regs->array[i].n_times_set = regs->array[i].set_in_loop;
9700
9701 free (last_set);
9702 }
9703
9704 /* Returns the number of real INSNs in the LOOP. */
9705
9706 static int
9707 count_insns_in_loop (const struct loop *loop)
9708 {
9709 int count = 0;
9710 rtx insn;
9711
9712 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
9713 insn = NEXT_INSN (insn))
9714 if (INSN_P (insn))
9715 ++count;
9716
9717 return count;
9718 }
9719
9720 /* Move MEMs into registers for the duration of the loop. */
9721
9722 static void
9723 load_mems (const struct loop *loop)
9724 {
9725 struct loop_info *loop_info = LOOP_INFO (loop);
9726 struct loop_regs *regs = LOOP_REGS (loop);
9727 int maybe_never = 0;
9728 int i;
9729 rtx p, prev_ebb_head;
9730 rtx label = NULL_RTX;
9731 rtx end_label;
9732 /* Nonzero if the next instruction may never be executed. */
9733 int next_maybe_never = 0;
9734 unsigned int last_max_reg = max_reg_num ();
9735
9736 if (loop_info->mems_idx == 0)
9737 return;
9738
9739 /* We cannot use next_label here because it skips over normal insns. */
9740 end_label = next_nonnote_insn (loop->end);
9741 if (end_label && GET_CODE (end_label) != CODE_LABEL)
9742 end_label = NULL_RTX;
9743
9744 /* Check to see if it's possible that some instructions in the loop are
9745 never executed. Also check if there is a goto out of the loop other
9746 than right after the end of the loop. */
9747 for (p = next_insn_in_loop (loop, loop->scan_start);
9748 p != NULL_RTX;
9749 p = next_insn_in_loop (loop, p))
9750 {
9751 if (GET_CODE (p) == CODE_LABEL)
9752 maybe_never = 1;
9753 else if (GET_CODE (p) == JUMP_INSN
9754 /* If we enter the loop in the middle, and scan
9755 around to the beginning, don't set maybe_never
9756 for that. This must be an unconditional jump,
9757 otherwise the code at the top of the loop might
9758 never be executed. Unconditional jumps are
9759 followed a by barrier then loop end. */
9760 && ! (GET_CODE (p) == JUMP_INSN
9761 && JUMP_LABEL (p) == loop->top
9762 && NEXT_INSN (NEXT_INSN (p)) == loop->end
9763 && any_uncondjump_p (p)))
9764 {
9765 /* If this is a jump outside of the loop but not right
9766 after the end of the loop, we would have to emit new fixup
9767 sequences for each such label. */
9768 if (/* If we can't tell where control might go when this
9769 JUMP_INSN is executed, we must be conservative. */
9770 !JUMP_LABEL (p)
9771 || (JUMP_LABEL (p) != end_label
9772 && (INSN_UID (JUMP_LABEL (p)) >= max_uid_for_loop
9773 || INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop->start)
9774 || INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop->end))))
9775 return;
9776
9777 if (!any_condjump_p (p))
9778 /* Something complicated. */
9779 maybe_never = 1;
9780 else
9781 /* If there are any more instructions in the loop, they
9782 might not be reached. */
9783 next_maybe_never = 1;
9784 }
9785 else if (next_maybe_never)
9786 maybe_never = 1;
9787 }
9788
9789 /* Find start of the extended basic block that enters the loop. */
9790 for (p = loop->start;
9791 PREV_INSN (p) && GET_CODE (p) != CODE_LABEL;
9792 p = PREV_INSN (p))
9793 ;
9794 prev_ebb_head = p;
9795
9796 cselib_init (true);
9797
9798 /* Build table of mems that get set to constant values before the
9799 loop. */
9800 for (; p != loop->start; p = NEXT_INSN (p))
9801 cselib_process_insn (p);
9802
9803 /* Actually move the MEMs. */
9804 for (i = 0; i < loop_info->mems_idx; ++i)
9805 {
9806 regset_head load_copies;
9807 regset_head store_copies;
9808 int written = 0;
9809 rtx reg;
9810 rtx mem = loop_info->mems[i].mem;
9811 rtx mem_list_entry;
9812
9813 if (MEM_VOLATILE_P (mem)
9814 || loop_invariant_p (loop, XEXP (mem, 0)) != 1)
9815 /* There's no telling whether or not MEM is modified. */
9816 loop_info->mems[i].optimize = 0;
9817
9818 /* Go through the MEMs written to in the loop to see if this
9819 one is aliased by one of them. */
9820 mem_list_entry = loop_info->store_mems;
9821 while (mem_list_entry)
9822 {
9823 if (rtx_equal_p (mem, XEXP (mem_list_entry, 0)))
9824 written = 1;
9825 else if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
9826 mem, rtx_varies_p))
9827 {
9828 /* MEM is indeed aliased by this store. */
9829 loop_info->mems[i].optimize = 0;
9830 break;
9831 }
9832 mem_list_entry = XEXP (mem_list_entry, 1);
9833 }
9834
9835 if (flag_float_store && written
9836 && GET_MODE_CLASS (GET_MODE (mem)) == MODE_FLOAT)
9837 loop_info->mems[i].optimize = 0;
9838
9839 /* If this MEM is written to, we must be sure that there
9840 are no reads from another MEM that aliases this one. */
9841 if (loop_info->mems[i].optimize && written)
9842 {
9843 int j;
9844
9845 for (j = 0; j < loop_info->mems_idx; ++j)
9846 {
9847 if (j == i)
9848 continue;
9849 else if (true_dependence (mem,
9850 VOIDmode,
9851 loop_info->mems[j].mem,
9852 rtx_varies_p))
9853 {
9854 /* It's not safe to hoist loop_info->mems[i] out of
9855 the loop because writes to it might not be
9856 seen by reads from loop_info->mems[j]. */
9857 loop_info->mems[i].optimize = 0;
9858 break;
9859 }
9860 }
9861 }
9862
9863 if (maybe_never && may_trap_p (mem))
9864 /* We can't access the MEM outside the loop; it might
9865 cause a trap that wouldn't have happened otherwise. */
9866 loop_info->mems[i].optimize = 0;
9867
9868 if (!loop_info->mems[i].optimize)
9869 /* We thought we were going to lift this MEM out of the
9870 loop, but later discovered that we could not. */
9871 continue;
9872
9873 INIT_REG_SET (&load_copies);
9874 INIT_REG_SET (&store_copies);
9875
9876 /* Allocate a pseudo for this MEM. We set REG_USERVAR_P in
9877 order to keep scan_loop from moving stores to this MEM
9878 out of the loop just because this REG is neither a
9879 user-variable nor used in the loop test. */
9880 reg = gen_reg_rtx (GET_MODE (mem));
9881 REG_USERVAR_P (reg) = 1;
9882 loop_info->mems[i].reg = reg;
9883
9884 /* Now, replace all references to the MEM with the
9885 corresponding pseudos. */
9886 maybe_never = 0;
9887 for (p = next_insn_in_loop (loop, loop->scan_start);
9888 p != NULL_RTX;
9889 p = next_insn_in_loop (loop, p))
9890 {
9891 if (INSN_P (p))
9892 {
9893 rtx set;
9894
9895 set = single_set (p);
9896
9897 /* See if this copies the mem into a register that isn't
9898 modified afterwards. We'll try to do copy propagation
9899 a little further on. */
9900 if (set
9901 /* @@@ This test is _way_ too conservative. */
9902 && ! maybe_never
9903 && REG_P (SET_DEST (set))
9904 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
9905 && REGNO (SET_DEST (set)) < last_max_reg
9906 && regs->array[REGNO (SET_DEST (set))].n_times_set == 1
9907 && rtx_equal_p (SET_SRC (set), mem))
9908 SET_REGNO_REG_SET (&load_copies, REGNO (SET_DEST (set)));
9909
9910 /* See if this copies the mem from a register that isn't
9911 modified afterwards. We'll try to remove the
9912 redundant copy later on by doing a little register
9913 renaming and copy propagation. This will help
9914 to untangle things for the BIV detection code. */
9915 if (set
9916 && ! maybe_never
9917 && REG_P (SET_SRC (set))
9918 && REGNO (SET_SRC (set)) >= FIRST_PSEUDO_REGISTER
9919 && REGNO (SET_SRC (set)) < last_max_reg
9920 && regs->array[REGNO (SET_SRC (set))].n_times_set == 1
9921 && rtx_equal_p (SET_DEST (set), mem))
9922 SET_REGNO_REG_SET (&store_copies, REGNO (SET_SRC (set)));
9923
9924 /* If this is a call which uses / clobbers this memory
9925 location, we must not change the interface here. */
9926 if (GET_CODE (p) == CALL_INSN
9927 && reg_mentioned_p (loop_info->mems[i].mem,
9928 CALL_INSN_FUNCTION_USAGE (p)))
9929 {
9930 cancel_changes (0);
9931 loop_info->mems[i].optimize = 0;
9932 break;
9933 }
9934 else
9935 /* Replace the memory reference with the shadow register. */
9936 replace_loop_mems (p, loop_info->mems[i].mem,
9937 loop_info->mems[i].reg, written);
9938 }
9939
9940 if (GET_CODE (p) == CODE_LABEL
9941 || GET_CODE (p) == JUMP_INSN)
9942 maybe_never = 1;
9943 }
9944
9945 if (! loop_info->mems[i].optimize)
9946 ; /* We found we couldn't do the replacement, so do nothing. */
9947 else if (! apply_change_group ())
9948 /* We couldn't replace all occurrences of the MEM. */
9949 loop_info->mems[i].optimize = 0;
9950 else
9951 {
9952 /* Load the memory immediately before LOOP->START, which is
9953 the NOTE_LOOP_BEG. */
9954 cselib_val *e = cselib_lookup (mem, VOIDmode, 0);
9955 rtx set;
9956 rtx best = mem;
9957 int j;
9958 struct elt_loc_list *const_equiv = 0;
9959
9960 if (e)
9961 {
9962 struct elt_loc_list *equiv;
9963 struct elt_loc_list *best_equiv = 0;
9964 for (equiv = e->locs; equiv; equiv = equiv->next)
9965 {
9966 if (CONSTANT_P (equiv->loc))
9967 const_equiv = equiv;
9968 else if (REG_P (equiv->loc)
9969 /* Extending hard register lifetimes causes crash
9970 on SRC targets. Doing so on non-SRC is
9971 probably also not good idea, since we most
9972 probably have pseudoregister equivalence as
9973 well. */
9974 && REGNO (equiv->loc) >= FIRST_PSEUDO_REGISTER)
9975 best_equiv = equiv;
9976 }
9977 /* Use the constant equivalence if that is cheap enough. */
9978 if (! best_equiv)
9979 best_equiv = const_equiv;
9980 else if (const_equiv
9981 && (rtx_cost (const_equiv->loc, SET)
9982 <= rtx_cost (best_equiv->loc, SET)))
9983 {
9984 best_equiv = const_equiv;
9985 const_equiv = 0;
9986 }
9987
9988 /* If best_equiv is nonzero, we know that MEM is set to a
9989 constant or register before the loop. We will use this
9990 knowledge to initialize the shadow register with that
9991 constant or reg rather than by loading from MEM. */
9992 if (best_equiv)
9993 best = copy_rtx (best_equiv->loc);
9994 }
9995
9996 set = gen_move_insn (reg, best);
9997 set = loop_insn_hoist (loop, set);
9998 if (REG_P (best))
9999 {
10000 for (p = prev_ebb_head; p != loop->start; p = NEXT_INSN (p))
10001 if (REGNO_LAST_UID (REGNO (best)) == INSN_UID (p))
10002 {
10003 REGNO_LAST_UID (REGNO (best)) = INSN_UID (set);
10004 break;
10005 }
10006 }
10007
10008 if (const_equiv)
10009 set_unique_reg_note (set, REG_EQUAL, copy_rtx (const_equiv->loc));
10010
10011 if (written)
10012 {
10013 if (label == NULL_RTX)
10014 {
10015 label = gen_label_rtx ();
10016 emit_label_after (label, loop->end);
10017 }
10018
10019 /* Store the memory immediately after END, which is
10020 the NOTE_LOOP_END. */
10021 set = gen_move_insn (copy_rtx (mem), reg);
10022 loop_insn_emit_after (loop, 0, label, set);
10023 }
10024
10025 if (loop_dump_stream)
10026 {
10027 fprintf (loop_dump_stream, "Hoisted regno %d %s from ",
10028 REGNO (reg), (written ? "r/w" : "r/o"));
10029 print_rtl (loop_dump_stream, mem);
10030 fputc ('\n', loop_dump_stream);
10031 }
10032
10033 /* Attempt a bit of copy propagation. This helps untangle the
10034 data flow, and enables {basic,general}_induction_var to find
10035 more bivs/givs. */
10036 EXECUTE_IF_SET_IN_REG_SET
10037 (&load_copies, FIRST_PSEUDO_REGISTER, j,
10038 {
10039 try_copy_prop (loop, reg, j);
10040 });
10041 CLEAR_REG_SET (&load_copies);
10042
10043 EXECUTE_IF_SET_IN_REG_SET
10044 (&store_copies, FIRST_PSEUDO_REGISTER, j,
10045 {
10046 try_swap_copy_prop (loop, reg, j);
10047 });
10048 CLEAR_REG_SET (&store_copies);
10049 }
10050 }
10051
10052 /* Now, we need to replace all references to the previous exit
10053 label with the new one. */
10054 if (label != NULL_RTX && end_label != NULL_RTX)
10055 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
10056 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == end_label)
10057 redirect_jump (p, label, false);
10058
10059 cselib_finish ();
10060 }
10061
10062 /* For communication between note_reg_stored and its caller. */
10063 struct note_reg_stored_arg
10064 {
10065 int set_seen;
10066 rtx reg;
10067 };
10068
10069 /* Called via note_stores, record in SET_SEEN whether X, which is written,
10070 is equal to ARG. */
10071 static void
10072 note_reg_stored (rtx x, rtx setter ATTRIBUTE_UNUSED, void *arg)
10073 {
10074 struct note_reg_stored_arg *t = (struct note_reg_stored_arg *) arg;
10075 if (t->reg == x)
10076 t->set_seen = 1;
10077 }
10078
10079 /* Try to replace every occurrence of pseudo REGNO with REPLACEMENT.
10080 There must be exactly one insn that sets this pseudo; it will be
10081 deleted if all replacements succeed and we can prove that the register
10082 is not used after the loop. */
10083
10084 static void
10085 try_copy_prop (const struct loop *loop, rtx replacement, unsigned int regno)
10086 {
10087 /* This is the reg that we are copying from. */
10088 rtx reg_rtx = regno_reg_rtx[regno];
10089 rtx init_insn = 0;
10090 rtx insn;
10091 /* These help keep track of whether we replaced all uses of the reg. */
10092 int replaced_last = 0;
10093 int store_is_first = 0;
10094
10095 for (insn = next_insn_in_loop (loop, loop->scan_start);
10096 insn != NULL_RTX;
10097 insn = next_insn_in_loop (loop, insn))
10098 {
10099 rtx set;
10100
10101 /* Only substitute within one extended basic block from the initializing
10102 insn. */
10103 if (GET_CODE (insn) == CODE_LABEL && init_insn)
10104 break;
10105
10106 if (! INSN_P (insn))
10107 continue;
10108
10109 /* Is this the initializing insn? */
10110 set = single_set (insn);
10111 if (set
10112 && REG_P (SET_DEST (set))
10113 && REGNO (SET_DEST (set)) == regno)
10114 {
10115 if (init_insn)
10116 abort ();
10117
10118 init_insn = insn;
10119 if (REGNO_FIRST_UID (regno) == INSN_UID (insn))
10120 store_is_first = 1;
10121 }
10122
10123 /* Only substitute after seeing the initializing insn. */
10124 if (init_insn && insn != init_insn)
10125 {
10126 struct note_reg_stored_arg arg;
10127
10128 replace_loop_regs (insn, reg_rtx, replacement);
10129 if (REGNO_LAST_UID (regno) == INSN_UID (insn))
10130 replaced_last = 1;
10131
10132 /* Stop replacing when REPLACEMENT is modified. */
10133 arg.reg = replacement;
10134 arg.set_seen = 0;
10135 note_stores (PATTERN (insn), note_reg_stored, &arg);
10136 if (arg.set_seen)
10137 {
10138 rtx note = find_reg_note (insn, REG_EQUAL, NULL);
10139
10140 /* It is possible that we've turned previously valid REG_EQUAL to
10141 invalid, as we change the REGNO to REPLACEMENT and unlike REGNO,
10142 REPLACEMENT is modified, we get different meaning. */
10143 if (note && reg_mentioned_p (replacement, XEXP (note, 0)))
10144 remove_note (insn, note);
10145 break;
10146 }
10147 }
10148 }
10149 if (! init_insn)
10150 abort ();
10151 if (apply_change_group ())
10152 {
10153 if (loop_dump_stream)
10154 fprintf (loop_dump_stream, " Replaced reg %d", regno);
10155 if (store_is_first && replaced_last)
10156 {
10157 rtx first;
10158 rtx retval_note;
10159
10160 /* Assume we're just deleting INIT_INSN. */
10161 first = init_insn;
10162 /* Look for REG_RETVAL note. If we're deleting the end of
10163 the libcall sequence, the whole sequence can go. */
10164 retval_note = find_reg_note (init_insn, REG_RETVAL, NULL_RTX);
10165 /* If we found a REG_RETVAL note, find the first instruction
10166 in the sequence. */
10167 if (retval_note)
10168 first = XEXP (retval_note, 0);
10169
10170 /* Delete the instructions. */
10171 loop_delete_insns (first, init_insn);
10172 }
10173 if (loop_dump_stream)
10174 fprintf (loop_dump_stream, ".\n");
10175 }
10176 }
10177
10178 /* Replace all the instructions from FIRST up to and including LAST
10179 with NOTE_INSN_DELETED notes. */
10180
10181 static void
10182 loop_delete_insns (rtx first, rtx last)
10183 {
10184 while (1)
10185 {
10186 if (loop_dump_stream)
10187 fprintf (loop_dump_stream, ", deleting init_insn (%d)",
10188 INSN_UID (first));
10189 delete_insn (first);
10190
10191 /* If this was the LAST instructions we're supposed to delete,
10192 we're done. */
10193 if (first == last)
10194 break;
10195
10196 first = NEXT_INSN (first);
10197 }
10198 }
10199
10200 /* Try to replace occurrences of pseudo REGNO with REPLACEMENT within
10201 loop LOOP if the order of the sets of these registers can be
10202 swapped. There must be exactly one insn within the loop that sets
10203 this pseudo followed immediately by a move insn that sets
10204 REPLACEMENT with REGNO. */
10205 static void
10206 try_swap_copy_prop (const struct loop *loop, rtx replacement,
10207 unsigned int regno)
10208 {
10209 rtx insn;
10210 rtx set = NULL_RTX;
10211 unsigned int new_regno;
10212
10213 new_regno = REGNO (replacement);
10214
10215 for (insn = next_insn_in_loop (loop, loop->scan_start);
10216 insn != NULL_RTX;
10217 insn = next_insn_in_loop (loop, insn))
10218 {
10219 /* Search for the insn that copies REGNO to NEW_REGNO? */
10220 if (INSN_P (insn)
10221 && (set = single_set (insn))
10222 && REG_P (SET_DEST (set))
10223 && REGNO (SET_DEST (set)) == new_regno
10224 && REG_P (SET_SRC (set))
10225 && REGNO (SET_SRC (set)) == regno)
10226 break;
10227 }
10228
10229 if (insn != NULL_RTX)
10230 {
10231 rtx prev_insn;
10232 rtx prev_set;
10233
10234 /* Some DEF-USE info would come in handy here to make this
10235 function more general. For now, just check the previous insn
10236 which is the most likely candidate for setting REGNO. */
10237
10238 prev_insn = PREV_INSN (insn);
10239
10240 if (INSN_P (insn)
10241 && (prev_set = single_set (prev_insn))
10242 && REG_P (SET_DEST (prev_set))
10243 && REGNO (SET_DEST (prev_set)) == regno)
10244 {
10245 /* We have:
10246 (set (reg regno) (expr))
10247 (set (reg new_regno) (reg regno))
10248
10249 so try converting this to:
10250 (set (reg new_regno) (expr))
10251 (set (reg regno) (reg new_regno))
10252
10253 The former construct is often generated when a global
10254 variable used for an induction variable is shadowed by a
10255 register (NEW_REGNO). The latter construct improves the
10256 chances of GIV replacement and BIV elimination. */
10257
10258 validate_change (prev_insn, &SET_DEST (prev_set),
10259 replacement, 1);
10260 validate_change (insn, &SET_DEST (set),
10261 SET_SRC (set), 1);
10262 validate_change (insn, &SET_SRC (set),
10263 replacement, 1);
10264
10265 if (apply_change_group ())
10266 {
10267 if (loop_dump_stream)
10268 fprintf (loop_dump_stream,
10269 " Swapped set of reg %d at %d with reg %d at %d.\n",
10270 regno, INSN_UID (insn),
10271 new_regno, INSN_UID (prev_insn));
10272
10273 /* Update first use of REGNO. */
10274 if (REGNO_FIRST_UID (regno) == INSN_UID (prev_insn))
10275 REGNO_FIRST_UID (regno) = INSN_UID (insn);
10276
10277 /* Now perform copy propagation to hopefully
10278 remove all uses of REGNO within the loop. */
10279 try_copy_prop (loop, replacement, regno);
10280 }
10281 }
10282 }
10283 }
10284
10285 /* Worker function for find_mem_in_note, called via for_each_rtx. */
10286
10287 static int
10288 find_mem_in_note_1 (rtx *x, void *data)
10289 {
10290 if (*x != NULL_RTX && GET_CODE (*x) == MEM)
10291 {
10292 rtx *res = (rtx *) data;
10293 *res = *x;
10294 return 1;
10295 }
10296 return 0;
10297 }
10298
10299 /* Returns the first MEM found in NOTE by depth-first search. */
10300
10301 static rtx
10302 find_mem_in_note (rtx note)
10303 {
10304 if (note && for_each_rtx (&note, find_mem_in_note_1, &note))
10305 return note;
10306 return NULL_RTX;
10307 }
10308
10309 /* Replace MEM with its associated pseudo register. This function is
10310 called from load_mems via for_each_rtx. DATA is actually a pointer
10311 to a structure describing the instruction currently being scanned
10312 and the MEM we are currently replacing. */
10313
10314 static int
10315 replace_loop_mem (rtx *mem, void *data)
10316 {
10317 loop_replace_args *args = (loop_replace_args *) data;
10318 rtx m = *mem;
10319
10320 if (m == NULL_RTX)
10321 return 0;
10322
10323 switch (GET_CODE (m))
10324 {
10325 case MEM:
10326 break;
10327
10328 case CONST_DOUBLE:
10329 /* We're not interested in the MEM associated with a
10330 CONST_DOUBLE, so there's no need to traverse into one. */
10331 return -1;
10332
10333 default:
10334 /* This is not a MEM. */
10335 return 0;
10336 }
10337
10338 if (!rtx_equal_p (args->match, m))
10339 /* This is not the MEM we are currently replacing. */
10340 return 0;
10341
10342 /* Actually replace the MEM. */
10343 validate_change (args->insn, mem, args->replacement, 1);
10344
10345 return 0;
10346 }
10347
10348 static void
10349 replace_loop_mems (rtx insn, rtx mem, rtx reg, int written)
10350 {
10351 loop_replace_args args;
10352
10353 args.insn = insn;
10354 args.match = mem;
10355 args.replacement = reg;
10356
10357 for_each_rtx (&insn, replace_loop_mem, &args);
10358
10359 /* If we hoist a mem write out of the loop, then REG_EQUAL
10360 notes referring to the mem are no longer valid. */
10361 if (written)
10362 {
10363 rtx note, sub;
10364 rtx *link;
10365
10366 for (link = &REG_NOTES (insn); (note = *link); link = &XEXP (note, 1))
10367 {
10368 if (REG_NOTE_KIND (note) == REG_EQUAL
10369 && (sub = find_mem_in_note (note))
10370 && true_dependence (mem, VOIDmode, sub, rtx_varies_p))
10371 {
10372 /* Remove the note. */
10373 validate_change (NULL_RTX, link, XEXP (note, 1), 1);
10374 break;
10375 }
10376 }
10377 }
10378 }
10379
10380 /* Replace one register with another. Called through for_each_rtx; PX points
10381 to the rtx being scanned. DATA is actually a pointer to
10382 a structure of arguments. */
10383
10384 static int
10385 replace_loop_reg (rtx *px, void *data)
10386 {
10387 rtx x = *px;
10388 loop_replace_args *args = (loop_replace_args *) data;
10389
10390 if (x == NULL_RTX)
10391 return 0;
10392
10393 if (x == args->match)
10394 validate_change (args->insn, px, args->replacement, 1);
10395
10396 return 0;
10397 }
10398
10399 static void
10400 replace_loop_regs (rtx insn, rtx reg, rtx replacement)
10401 {
10402 loop_replace_args args;
10403
10404 args.insn = insn;
10405 args.match = reg;
10406 args.replacement = replacement;
10407
10408 for_each_rtx (&insn, replace_loop_reg, &args);
10409 }
10410 \f
10411 /* Emit insn for PATTERN after WHERE_INSN in basic block WHERE_BB
10412 (ignored in the interim). */
10413
10414 static rtx
10415 loop_insn_emit_after (const struct loop *loop ATTRIBUTE_UNUSED,
10416 basic_block where_bb ATTRIBUTE_UNUSED, rtx where_insn,
10417 rtx pattern)
10418 {
10419 return emit_insn_after (pattern, where_insn);
10420 }
10421
10422
10423 /* If WHERE_INSN is nonzero emit insn for PATTERN before WHERE_INSN
10424 in basic block WHERE_BB (ignored in the interim) within the loop
10425 otherwise hoist PATTERN into the loop pre-header. */
10426
10427 rtx
10428 loop_insn_emit_before (const struct loop *loop,
10429 basic_block where_bb ATTRIBUTE_UNUSED,
10430 rtx where_insn, rtx pattern)
10431 {
10432 if (! where_insn)
10433 return loop_insn_hoist (loop, pattern);
10434 return emit_insn_before (pattern, where_insn);
10435 }
10436
10437
10438 /* Emit call insn for PATTERN before WHERE_INSN in basic block
10439 WHERE_BB (ignored in the interim) within the loop. */
10440
10441 static rtx
10442 loop_call_insn_emit_before (const struct loop *loop ATTRIBUTE_UNUSED,
10443 basic_block where_bb ATTRIBUTE_UNUSED,
10444 rtx where_insn, rtx pattern)
10445 {
10446 return emit_call_insn_before (pattern, where_insn);
10447 }
10448
10449
10450 /* Hoist insn for PATTERN into the loop pre-header. */
10451
10452 rtx
10453 loop_insn_hoist (const struct loop *loop, rtx pattern)
10454 {
10455 return loop_insn_emit_before (loop, 0, loop->start, pattern);
10456 }
10457
10458
10459 /* Hoist call insn for PATTERN into the loop pre-header. */
10460
10461 static rtx
10462 loop_call_insn_hoist (const struct loop *loop, rtx pattern)
10463 {
10464 return loop_call_insn_emit_before (loop, 0, loop->start, pattern);
10465 }
10466
10467
10468 /* Sink insn for PATTERN after the loop end. */
10469
10470 rtx
10471 loop_insn_sink (const struct loop *loop, rtx pattern)
10472 {
10473 return loop_insn_emit_before (loop, 0, loop->sink, pattern);
10474 }
10475
10476 /* bl->final_value can be either general_operand or PLUS of general_operand
10477 and constant. Emit sequence of instructions to load it into REG. */
10478 static rtx
10479 gen_load_of_final_value (rtx reg, rtx final_value)
10480 {
10481 rtx seq;
10482 start_sequence ();
10483 final_value = force_operand (final_value, reg);
10484 if (final_value != reg)
10485 emit_move_insn (reg, final_value);
10486 seq = get_insns ();
10487 end_sequence ();
10488 return seq;
10489 }
10490
10491 /* If the loop has multiple exits, emit insn for PATTERN before the
10492 loop to ensure that it will always be executed no matter how the
10493 loop exits. Otherwise, emit the insn for PATTERN after the loop,
10494 since this is slightly more efficient. */
10495
10496 static rtx
10497 loop_insn_sink_or_swim (const struct loop *loop, rtx pattern)
10498 {
10499 if (loop->exit_count)
10500 return loop_insn_hoist (loop, pattern);
10501 else
10502 return loop_insn_sink (loop, pattern);
10503 }
10504 \f
10505 static void
10506 loop_ivs_dump (const struct loop *loop, FILE *file, int verbose)
10507 {
10508 struct iv_class *bl;
10509 int iv_num = 0;
10510
10511 if (! loop || ! file)
10512 return;
10513
10514 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
10515 iv_num++;
10516
10517 fprintf (file, "Loop %d: %d IV classes\n", loop->num, iv_num);
10518
10519 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
10520 {
10521 loop_iv_class_dump (bl, file, verbose);
10522 fputc ('\n', file);
10523 }
10524 }
10525
10526
10527 static void
10528 loop_iv_class_dump (const struct iv_class *bl, FILE *file,
10529 int verbose ATTRIBUTE_UNUSED)
10530 {
10531 struct induction *v;
10532 rtx incr;
10533 int i;
10534
10535 if (! bl || ! file)
10536 return;
10537
10538 fprintf (file, "IV class for reg %d, benefit %d\n",
10539 bl->regno, bl->total_benefit);
10540
10541 fprintf (file, " Init insn %d", INSN_UID (bl->init_insn));
10542 if (bl->initial_value)
10543 {
10544 fprintf (file, ", init val: ");
10545 print_simple_rtl (file, bl->initial_value);
10546 }
10547 if (bl->initial_test)
10548 {
10549 fprintf (file, ", init test: ");
10550 print_simple_rtl (file, bl->initial_test);
10551 }
10552 fputc ('\n', file);
10553
10554 if (bl->final_value)
10555 {
10556 fprintf (file, " Final val: ");
10557 print_simple_rtl (file, bl->final_value);
10558 fputc ('\n', file);
10559 }
10560
10561 if ((incr = biv_total_increment (bl)))
10562 {
10563 fprintf (file, " Total increment: ");
10564 print_simple_rtl (file, incr);
10565 fputc ('\n', file);
10566 }
10567
10568 /* List the increments. */
10569 for (i = 0, v = bl->biv; v; v = v->next_iv, i++)
10570 {
10571 fprintf (file, " Inc%d: insn %d, incr: ", i, INSN_UID (v->insn));
10572 print_simple_rtl (file, v->add_val);
10573 fputc ('\n', file);
10574 }
10575
10576 /* List the givs. */
10577 for (i = 0, v = bl->giv; v; v = v->next_iv, i++)
10578 {
10579 fprintf (file, " Giv%d: insn %d, benefit %d, ",
10580 i, INSN_UID (v->insn), v->benefit);
10581 if (v->giv_type == DEST_ADDR)
10582 print_simple_rtl (file, v->mem);
10583 else
10584 print_simple_rtl (file, single_set (v->insn));
10585 fputc ('\n', file);
10586 }
10587 }
10588
10589
10590 static void
10591 loop_biv_dump (const struct induction *v, FILE *file, int verbose)
10592 {
10593 if (! v || ! file)
10594 return;
10595
10596 fprintf (file,
10597 "Biv %d: insn %d",
10598 REGNO (v->dest_reg), INSN_UID (v->insn));
10599 fprintf (file, " const ");
10600 print_simple_rtl (file, v->add_val);
10601
10602 if (verbose && v->final_value)
10603 {
10604 fputc ('\n', file);
10605 fprintf (file, " final ");
10606 print_simple_rtl (file, v->final_value);
10607 }
10608
10609 fputc ('\n', file);
10610 }
10611
10612
10613 static void
10614 loop_giv_dump (const struct induction *v, FILE *file, int verbose)
10615 {
10616 if (! v || ! file)
10617 return;
10618
10619 if (v->giv_type == DEST_REG)
10620 fprintf (file, "Giv %d: insn %d",
10621 REGNO (v->dest_reg), INSN_UID (v->insn));
10622 else
10623 fprintf (file, "Dest address: insn %d",
10624 INSN_UID (v->insn));
10625
10626 fprintf (file, " src reg %d benefit %d",
10627 REGNO (v->src_reg), v->benefit);
10628 fprintf (file, " lifetime %d",
10629 v->lifetime);
10630
10631 if (v->replaceable)
10632 fprintf (file, " replaceable");
10633
10634 if (v->no_const_addval)
10635 fprintf (file, " ncav");
10636
10637 if (v->ext_dependent)
10638 {
10639 switch (GET_CODE (v->ext_dependent))
10640 {
10641 case SIGN_EXTEND:
10642 fprintf (file, " ext se");
10643 break;
10644 case ZERO_EXTEND:
10645 fprintf (file, " ext ze");
10646 break;
10647 case TRUNCATE:
10648 fprintf (file, " ext tr");
10649 break;
10650 default:
10651 abort ();
10652 }
10653 }
10654
10655 fputc ('\n', file);
10656 fprintf (file, " mult ");
10657 print_simple_rtl (file, v->mult_val);
10658
10659 fputc ('\n', file);
10660 fprintf (file, " add ");
10661 print_simple_rtl (file, v->add_val);
10662
10663 if (verbose && v->final_value)
10664 {
10665 fputc ('\n', file);
10666 fprintf (file, " final ");
10667 print_simple_rtl (file, v->final_value);
10668 }
10669
10670 fputc ('\n', file);
10671 }
10672
10673
10674 void
10675 debug_ivs (const struct loop *loop)
10676 {
10677 loop_ivs_dump (loop, stderr, 1);
10678 }
10679
10680
10681 void
10682 debug_iv_class (const struct iv_class *bl)
10683 {
10684 loop_iv_class_dump (bl, stderr, 1);
10685 }
10686
10687
10688 void
10689 debug_biv (const struct induction *v)
10690 {
10691 loop_biv_dump (v, stderr, 1);
10692 }
10693
10694
10695 void
10696 debug_giv (const struct induction *v)
10697 {
10698 loop_giv_dump (v, stderr, 1);
10699 }
10700
10701
10702 #define LOOP_BLOCK_NUM_1(INSN) \
10703 ((INSN) ? (BLOCK_FOR_INSN (INSN) ? BLOCK_NUM (INSN) : - 1) : -1)
10704
10705 /* The notes do not have an assigned block, so look at the next insn. */
10706 #define LOOP_BLOCK_NUM(INSN) \
10707 ((INSN) ? (GET_CODE (INSN) == NOTE \
10708 ? LOOP_BLOCK_NUM_1 (next_nonnote_insn (INSN)) \
10709 : LOOP_BLOCK_NUM_1 (INSN)) \
10710 : -1)
10711
10712 #define LOOP_INSN_UID(INSN) ((INSN) ? INSN_UID (INSN) : -1)
10713
10714 static void
10715 loop_dump_aux (const struct loop *loop, FILE *file,
10716 int verbose ATTRIBUTE_UNUSED)
10717 {
10718 rtx label;
10719
10720 if (! loop || ! file)
10721 return;
10722
10723 /* Print diagnostics to compare our concept of a loop with
10724 what the loop notes say. */
10725 if (! PREV_INSN (BB_HEAD (loop->first))
10726 || GET_CODE (PREV_INSN (BB_HEAD (loop->first))) != NOTE
10727 || NOTE_LINE_NUMBER (PREV_INSN (BB_HEAD (loop->first)))
10728 != NOTE_INSN_LOOP_BEG)
10729 fprintf (file, ";; No NOTE_INSN_LOOP_BEG at %d\n",
10730 INSN_UID (PREV_INSN (BB_HEAD (loop->first))));
10731 if (! NEXT_INSN (BB_END (loop->last))
10732 || GET_CODE (NEXT_INSN (BB_END (loop->last))) != NOTE
10733 || NOTE_LINE_NUMBER (NEXT_INSN (BB_END (loop->last)))
10734 != NOTE_INSN_LOOP_END)
10735 fprintf (file, ";; No NOTE_INSN_LOOP_END at %d\n",
10736 INSN_UID (NEXT_INSN (BB_END (loop->last))));
10737
10738 if (loop->start)
10739 {
10740 fprintf (file,
10741 ";; start %d (%d), cont dom %d (%d), cont %d (%d), vtop %d (%d), end %d (%d)\n",
10742 LOOP_BLOCK_NUM (loop->start),
10743 LOOP_INSN_UID (loop->start),
10744 LOOP_BLOCK_NUM (loop->cont),
10745 LOOP_INSN_UID (loop->cont),
10746 LOOP_BLOCK_NUM (loop->cont),
10747 LOOP_INSN_UID (loop->cont),
10748 LOOP_BLOCK_NUM (loop->vtop),
10749 LOOP_INSN_UID (loop->vtop),
10750 LOOP_BLOCK_NUM (loop->end),
10751 LOOP_INSN_UID (loop->end));
10752 fprintf (file, ";; top %d (%d), scan start %d (%d)\n",
10753 LOOP_BLOCK_NUM (loop->top),
10754 LOOP_INSN_UID (loop->top),
10755 LOOP_BLOCK_NUM (loop->scan_start),
10756 LOOP_INSN_UID (loop->scan_start));
10757 fprintf (file, ";; exit_count %d", loop->exit_count);
10758 if (loop->exit_count)
10759 {
10760 fputs (", labels:", file);
10761 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
10762 {
10763 fprintf (file, " %d ",
10764 LOOP_INSN_UID (XEXP (label, 0)));
10765 }
10766 }
10767 fputs ("\n", file);
10768
10769 /* This can happen when a marked loop appears as two nested loops,
10770 say from while (a || b) {}. The inner loop won't match
10771 the loop markers but the outer one will. */
10772 if (LOOP_BLOCK_NUM (loop->cont) != loop->latch->index)
10773 fprintf (file, ";; NOTE_INSN_LOOP_CONT not in loop latch\n");
10774 }
10775 }
10776
10777 /* Call this function from the debugger to dump LOOP. */
10778
10779 void
10780 debug_loop (const struct loop *loop)
10781 {
10782 flow_loop_dump (loop, stderr, loop_dump_aux, 1);
10783 }
10784
10785 /* Call this function from the debugger to dump LOOPS. */
10786
10787 void
10788 debug_loops (const struct loops *loops)
10789 {
10790 flow_loops_dump (loops, stderr, loop_dump_aux, 1);
10791 }