* loop.c (loop_givs_rescan): Delete the REG_EQUAL note, not the insn.
[gcc.git] / gcc / loop.c
1 /* Perform various loop optimizations, including strength reduction.
2 Copyright (C) 1987, 1988, 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997,
3 1998, 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
21
22 /* This is the loop optimization pass of the compiler.
23 It finds invariant computations within loops and moves them
24 to the beginning of the loop. Then it identifies basic and
25 general induction variables. Strength reduction is applied to the general
26 induction variables, and induction variable elimination is applied to
27 the basic induction variables.
28
29 It also finds cases where
30 a register is set within the loop by zero-extending a narrower value
31 and changes these to zero the entire register once before the loop
32 and merely copy the low part within the loop.
33
34 Most of the complexity is in heuristics to decide when it is worth
35 while to do these things. */
36
37 #include "config.h"
38 #include "system.h"
39 #include "rtl.h"
40 #include "tm_p.h"
41 #include "obstack.h"
42 #include "function.h"
43 #include "expr.h"
44 #include "hard-reg-set.h"
45 #include "basic-block.h"
46 #include "insn-config.h"
47 #include "regs.h"
48 #include "recog.h"
49 #include "flags.h"
50 #include "real.h"
51 #include "loop.h"
52 #include "cselib.h"
53 #include "except.h"
54 #include "toplev.h"
55 #include "predict.h"
56 #include "insn-flags.h"
57 #include "optabs.h"
58
59 /* Not really meaningful values, but at least something. */
60 #ifndef SIMULTANEOUS_PREFETCHES
61 #define SIMULTANEOUS_PREFETCHES 3
62 #endif
63 #ifndef PREFETCH_BLOCK
64 #define PREFETCH_BLOCK 32
65 #endif
66 #ifndef HAVE_prefetch
67 #define HAVE_prefetch 0
68 #define CODE_FOR_prefetch 0
69 #define gen_prefetch(a,b,c) (abort(), NULL_RTX)
70 #endif
71
72 /* Give up the prefetch optimizations once we exceed a given threshhold.
73 It is unlikely that we would be able to optimize something in a loop
74 with so many detected prefetches. */
75 #define MAX_PREFETCHES 100
76 /* The number of prefetch blocks that are beneficial to fetch at once before
77 a loop with a known (and low) iteration count. */
78 #define PREFETCH_BLOCKS_BEFORE_LOOP_MAX 6
79 /* For very tiny loops it is not worthwhile to prefetch even before the loop,
80 since it is likely that the data are already in the cache. */
81 #define PREFETCH_BLOCKS_BEFORE_LOOP_MIN 2
82 /* The minimal number of prefetch blocks that a loop must consume to make
83 the emitting of prefetch instruction in the body of loop worthwhile. */
84 #define PREFETCH_BLOCKS_IN_LOOP_MIN 6
85
86 /* Parameterize some prefetch heuristics so they can be turned on and off
87 easily for performance testing on new architecures. These can be
88 defined in target-dependent files. */
89
90 /* Prefetch is worthwhile only when loads/stores are dense. */
91 #ifndef PREFETCH_ONLY_DENSE_MEM
92 #define PREFETCH_ONLY_DENSE_MEM 1
93 #endif
94
95 /* Define what we mean by "dense" loads and stores; This value divided by 256
96 is the minimum percentage of memory references that worth prefetching. */
97 #ifndef PREFETCH_DENSE_MEM
98 #define PREFETCH_DENSE_MEM 220
99 #endif
100
101 /* Do not prefetch for a loop whose iteration count is known to be low. */
102 #ifndef PREFETCH_NO_LOW_LOOPCNT
103 #define PREFETCH_NO_LOW_LOOPCNT 1
104 #endif
105
106 /* Define what we mean by a "low" iteration count. */
107 #ifndef PREFETCH_LOW_LOOPCNT
108 #define PREFETCH_LOW_LOOPCNT 32
109 #endif
110
111 /* Do not prefetch for a loop that contains a function call; such a loop is
112 probably not an internal loop. */
113 #ifndef PREFETCH_NO_CALL
114 #define PREFETCH_NO_CALL 1
115 #endif
116
117 /* Do not prefetch accesses with an extreme stride. */
118 #ifndef PREFETCH_NO_EXTREME_STRIDE
119 #define PREFETCH_NO_EXTREME_STRIDE 1
120 #endif
121
122 /* Define what we mean by an "extreme" stride. */
123 #ifndef PREFETCH_EXTREME_STRIDE
124 #define PREFETCH_EXTREME_STRIDE 4096
125 #endif
126
127 /* Define a limit to how far apart indices can be and still be merged
128 into a single prefetch. */
129 #ifndef PREFETCH_EXTREME_DIFFERENCE
130 #define PREFETCH_EXTREME_DIFFERENCE 4096
131 #endif
132
133 /* Issue prefetch instructions before the loop to fetch data to be used
134 in the first few loop iterations. */
135 #ifndef PREFETCH_BEFORE_LOOP
136 #define PREFETCH_BEFORE_LOOP 1
137 #endif
138
139 /* Do not handle reversed order prefetches (negative stride). */
140 #ifndef PREFETCH_NO_REVERSE_ORDER
141 #define PREFETCH_NO_REVERSE_ORDER 1
142 #endif
143
144 /* Prefetch even if the GIV is in conditional code. */
145 #ifndef PREFETCH_CONDITIONAL
146 #define PREFETCH_CONDITIONAL 1
147 #endif
148
149 /* If the loop requires more prefetches than the target can process in
150 parallel then don't prefetch anything in that loop. */
151 #ifndef PREFETCH_LIMIT_TO_SIMULTANEOUS
152 #define PREFETCH_LIMIT_TO_SIMULTANEOUS 1
153 #endif
154
155 #define LOOP_REG_LIFETIME(LOOP, REGNO) \
156 ((REGNO_LAST_LUID (REGNO) - REGNO_FIRST_LUID (REGNO)))
157
158 #define LOOP_REG_GLOBAL_P(LOOP, REGNO) \
159 ((REGNO_LAST_LUID (REGNO) > INSN_LUID ((LOOP)->end) \
160 || REGNO_FIRST_LUID (REGNO) < INSN_LUID ((LOOP)->start)))
161
162 #define LOOP_REGNO_NREGS(REGNO, SET_DEST) \
163 ((REGNO) < FIRST_PSEUDO_REGISTER \
164 ? HARD_REGNO_NREGS ((REGNO), GET_MODE (SET_DEST)) : 1)
165
166
167 /* Vector mapping INSN_UIDs to luids.
168 The luids are like uids but increase monotonically always.
169 We use them to see whether a jump comes from outside a given loop. */
170
171 int *uid_luid;
172
173 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
174 number the insn is contained in. */
175
176 struct loop **uid_loop;
177
178 /* 1 + largest uid of any insn. */
179
180 int max_uid_for_loop;
181
182 /* 1 + luid of last insn. */
183
184 static int max_luid;
185
186 /* Number of loops detected in current function. Used as index to the
187 next few tables. */
188
189 static int max_loop_num;
190
191 /* Bound on pseudo register number before loop optimization.
192 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
193 unsigned int max_reg_before_loop;
194
195 /* The value to pass to the next call of reg_scan_update. */
196 static int loop_max_reg;
197
198 #define obstack_chunk_alloc xmalloc
199 #define obstack_chunk_free free
200 \f
201 /* During the analysis of a loop, a chain of `struct movable's
202 is made to record all the movable insns found.
203 Then the entire chain can be scanned to decide which to move. */
204
205 struct movable
206 {
207 rtx insn; /* A movable insn */
208 rtx set_src; /* The expression this reg is set from. */
209 rtx set_dest; /* The destination of this SET. */
210 rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST
211 of any registers used within the LIBCALL. */
212 int consec; /* Number of consecutive following insns
213 that must be moved with this one. */
214 unsigned int regno; /* The register it sets */
215 short lifetime; /* lifetime of that register;
216 may be adjusted when matching movables
217 that load the same value are found. */
218 short savings; /* Number of insns we can move for this reg,
219 including other movables that force this
220 or match this one. */
221 unsigned int cond : 1; /* 1 if only conditionally movable */
222 unsigned int force : 1; /* 1 means MUST move this insn */
223 unsigned int global : 1; /* 1 means reg is live outside this loop */
224 /* If PARTIAL is 1, GLOBAL means something different:
225 that the reg is live outside the range from where it is set
226 to the following label. */
227 unsigned int done : 1; /* 1 inhibits further processing of this */
228
229 unsigned int partial : 1; /* 1 means this reg is used for zero-extending.
230 In particular, moving it does not make it
231 invariant. */
232 unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to
233 load SRC, rather than copying INSN. */
234 unsigned int move_insn_first:1;/* Same as above, if this is necessary for the
235 first insn of a consecutive sets group. */
236 unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */
237 enum machine_mode savemode; /* Nonzero means it is a mode for a low part
238 that we should avoid changing when clearing
239 the rest of the reg. */
240 struct movable *match; /* First entry for same value */
241 struct movable *forces; /* An insn that must be moved if this is */
242 struct movable *next;
243 };
244
245
246 FILE *loop_dump_stream;
247
248 /* Forward declarations. */
249
250 static void invalidate_loops_containing_label PARAMS ((rtx));
251 static void find_and_verify_loops PARAMS ((rtx, struct loops *));
252 static void mark_loop_jump PARAMS ((rtx, struct loop *));
253 static void prescan_loop PARAMS ((struct loop *));
254 static int reg_in_basic_block_p PARAMS ((rtx, rtx));
255 static int consec_sets_invariant_p PARAMS ((const struct loop *,
256 rtx, int, rtx));
257 static int labels_in_range_p PARAMS ((rtx, int));
258 static void count_one_set PARAMS ((struct loop_regs *, rtx, rtx, rtx *));
259 static void note_addr_stored PARAMS ((rtx, rtx, void *));
260 static void note_set_pseudo_multiple_uses PARAMS ((rtx, rtx, void *));
261 static int loop_reg_used_before_p PARAMS ((const struct loop *, rtx, rtx));
262 static void scan_loop PARAMS ((struct loop*, int));
263 #if 0
264 static void replace_call_address PARAMS ((rtx, rtx, rtx));
265 #endif
266 static rtx skip_consec_insns PARAMS ((rtx, int));
267 static int libcall_benefit PARAMS ((rtx));
268 static void ignore_some_movables PARAMS ((struct loop_movables *));
269 static void force_movables PARAMS ((struct loop_movables *));
270 static void combine_movables PARAMS ((struct loop_movables *,
271 struct loop_regs *));
272 static int num_unmoved_movables PARAMS ((const struct loop *));
273 static int regs_match_p PARAMS ((rtx, rtx, struct loop_movables *));
274 static int rtx_equal_for_loop_p PARAMS ((rtx, rtx, struct loop_movables *,
275 struct loop_regs *));
276 static void add_label_notes PARAMS ((rtx, rtx));
277 static void move_movables PARAMS ((struct loop *loop, struct loop_movables *,
278 int, int));
279 static void loop_movables_add PARAMS((struct loop_movables *,
280 struct movable *));
281 static void loop_movables_free PARAMS((struct loop_movables *));
282 static int count_nonfixed_reads PARAMS ((const struct loop *, rtx));
283 static void loop_bivs_find PARAMS((struct loop *));
284 static void loop_bivs_init_find PARAMS((struct loop *));
285 static void loop_bivs_check PARAMS((struct loop *));
286 static void loop_givs_find PARAMS((struct loop *));
287 static void loop_givs_check PARAMS((struct loop *));
288 static int loop_biv_eliminable_p PARAMS((struct loop *, struct iv_class *,
289 int, int));
290 static int loop_giv_reduce_benefit PARAMS((struct loop *, struct iv_class *,
291 struct induction *, rtx));
292 static void loop_givs_dead_check PARAMS((struct loop *, struct iv_class *));
293 static void loop_givs_reduce PARAMS((struct loop *, struct iv_class *));
294 static void loop_givs_rescan PARAMS((struct loop *, struct iv_class *,
295 rtx *));
296 static void loop_ivs_free PARAMS((struct loop *));
297 static void strength_reduce PARAMS ((struct loop *, int));
298 static void find_single_use_in_loop PARAMS ((struct loop_regs *, rtx, rtx));
299 static int valid_initial_value_p PARAMS ((rtx, rtx, int, rtx));
300 static void find_mem_givs PARAMS ((const struct loop *, rtx, rtx, int, int));
301 static void record_biv PARAMS ((struct loop *, struct induction *,
302 rtx, rtx, rtx, rtx, rtx *,
303 int, int));
304 static void check_final_value PARAMS ((const struct loop *,
305 struct induction *));
306 static void loop_ivs_dump PARAMS((const struct loop *, FILE *, int));
307 static void loop_iv_class_dump PARAMS((const struct iv_class *, FILE *, int));
308 static void loop_biv_dump PARAMS((const struct induction *, FILE *, int));
309 static void loop_giv_dump PARAMS((const struct induction *, FILE *, int));
310 static void record_giv PARAMS ((const struct loop *, struct induction *,
311 rtx, rtx, rtx, rtx, rtx, rtx, int,
312 enum g_types, int, int, rtx *));
313 static void update_giv_derive PARAMS ((const struct loop *, rtx));
314 static void check_ext_dependent_givs PARAMS ((struct iv_class *,
315 struct loop_info *));
316 static int basic_induction_var PARAMS ((const struct loop *, rtx,
317 enum machine_mode, rtx, rtx,
318 rtx *, rtx *, rtx **));
319 static rtx simplify_giv_expr PARAMS ((const struct loop *, rtx, rtx *, int *));
320 static int general_induction_var PARAMS ((const struct loop *loop, rtx, rtx *,
321 rtx *, rtx *, rtx *, int, int *,
322 enum machine_mode));
323 static int consec_sets_giv PARAMS ((const struct loop *, int, rtx,
324 rtx, rtx, rtx *, rtx *, rtx *, rtx *));
325 static int check_dbra_loop PARAMS ((struct loop *, int));
326 static rtx express_from_1 PARAMS ((rtx, rtx, rtx));
327 static rtx combine_givs_p PARAMS ((struct induction *, struct induction *));
328 static int cmp_combine_givs_stats PARAMS ((const PTR, const PTR));
329 static void combine_givs PARAMS ((struct loop_regs *, struct iv_class *));
330 static int product_cheap_p PARAMS ((rtx, rtx));
331 static int maybe_eliminate_biv PARAMS ((const struct loop *, struct iv_class *,
332 int, int, int));
333 static int maybe_eliminate_biv_1 PARAMS ((const struct loop *, rtx, rtx,
334 struct iv_class *, int,
335 basic_block, rtx));
336 static int last_use_this_basic_block PARAMS ((rtx, rtx));
337 static void record_initial PARAMS ((rtx, rtx, void *));
338 static void update_reg_last_use PARAMS ((rtx, rtx));
339 static rtx next_insn_in_loop PARAMS ((const struct loop *, rtx));
340 static void loop_regs_scan PARAMS ((const struct loop *, int));
341 static int count_insns_in_loop PARAMS ((const struct loop *));
342 static void load_mems PARAMS ((const struct loop *));
343 static int insert_loop_mem PARAMS ((rtx *, void *));
344 static int replace_loop_mem PARAMS ((rtx *, void *));
345 static void replace_loop_mems PARAMS ((rtx, rtx, rtx));
346 static int replace_loop_reg PARAMS ((rtx *, void *));
347 static void replace_loop_regs PARAMS ((rtx insn, rtx, rtx));
348 static void note_reg_stored PARAMS ((rtx, rtx, void *));
349 static void try_copy_prop PARAMS ((const struct loop *, rtx, unsigned int));
350 static void try_swap_copy_prop PARAMS ((const struct loop *, rtx,
351 unsigned int));
352 static int replace_label PARAMS ((rtx *, void *));
353 static rtx check_insn_for_givs PARAMS((struct loop *, rtx, int, int));
354 static rtx check_insn_for_bivs PARAMS((struct loop *, rtx, int, int));
355 static rtx gen_add_mult PARAMS ((rtx, rtx, rtx, rtx));
356 static void loop_regs_update PARAMS ((const struct loop *, rtx));
357 static int iv_add_mult_cost PARAMS ((rtx, rtx, rtx, rtx));
358
359 static rtx loop_insn_emit_after PARAMS((const struct loop *, basic_block,
360 rtx, rtx));
361 static rtx loop_call_insn_emit_before PARAMS((const struct loop *,
362 basic_block, rtx, rtx));
363 static rtx loop_call_insn_hoist PARAMS((const struct loop *, rtx));
364 static rtx loop_insn_sink_or_swim PARAMS((const struct loop *, rtx));
365
366 static void loop_dump_aux PARAMS ((const struct loop *, FILE *, int));
367 static void loop_delete_insns PARAMS ((rtx, rtx));
368 static HOST_WIDE_INT remove_constant_addition PARAMS ((rtx *));
369 static rtx gen_load_of_final_value PARAMS ((rtx, rtx));
370 void debug_ivs PARAMS ((const struct loop *));
371 void debug_iv_class PARAMS ((const struct iv_class *));
372 void debug_biv PARAMS ((const struct induction *));
373 void debug_giv PARAMS ((const struct induction *));
374 void debug_loop PARAMS ((const struct loop *));
375 void debug_loops PARAMS ((const struct loops *));
376
377 typedef struct rtx_pair
378 {
379 rtx r1;
380 rtx r2;
381 } rtx_pair;
382
383 typedef struct loop_replace_args
384 {
385 rtx match;
386 rtx replacement;
387 rtx insn;
388 } loop_replace_args;
389
390 /* Nonzero iff INSN is between START and END, inclusive. */
391 #define INSN_IN_RANGE_P(INSN, START, END) \
392 (INSN_UID (INSN) < max_uid_for_loop \
393 && INSN_LUID (INSN) >= INSN_LUID (START) \
394 && INSN_LUID (INSN) <= INSN_LUID (END))
395
396 /* Indirect_jump_in_function is computed once per function. */
397 static int indirect_jump_in_function;
398 static int indirect_jump_in_function_p PARAMS ((rtx));
399
400 static int compute_luids PARAMS ((rtx, rtx, int));
401
402 static int biv_elimination_giv_has_0_offset PARAMS ((struct induction *,
403 struct induction *,
404 rtx));
405 \f
406 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
407 copy the value of the strength reduced giv to its original register. */
408 static int copy_cost;
409
410 /* Cost of using a register, to normalize the benefits of a giv. */
411 static int reg_address_cost;
412
413 void
414 init_loop ()
415 {
416 rtx reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
417
418 reg_address_cost = address_cost (reg, SImode);
419
420 copy_cost = COSTS_N_INSNS (1);
421 }
422 \f
423 /* Compute the mapping from uids to luids.
424 LUIDs are numbers assigned to insns, like uids,
425 except that luids increase monotonically through the code.
426 Start at insn START and stop just before END. Assign LUIDs
427 starting with PREV_LUID + 1. Return the last assigned LUID + 1. */
428 static int
429 compute_luids (start, end, prev_luid)
430 rtx start, end;
431 int prev_luid;
432 {
433 int i;
434 rtx insn;
435
436 for (insn = start, i = prev_luid; insn != end; insn = NEXT_INSN (insn))
437 {
438 if (INSN_UID (insn) >= max_uid_for_loop)
439 continue;
440 /* Don't assign luids to line-number NOTEs, so that the distance in
441 luids between two insns is not affected by -g. */
442 if (GET_CODE (insn) != NOTE
443 || NOTE_LINE_NUMBER (insn) <= 0)
444 uid_luid[INSN_UID (insn)] = ++i;
445 else
446 /* Give a line number note the same luid as preceding insn. */
447 uid_luid[INSN_UID (insn)] = i;
448 }
449 return i + 1;
450 }
451 \f
452 /* Entry point of this file. Perform loop optimization
453 on the current function. F is the first insn of the function
454 and DUMPFILE is a stream for output of a trace of actions taken
455 (or 0 if none should be output). */
456
457 void
458 loop_optimize (f, dumpfile, flags)
459 /* f is the first instruction of a chain of insns for one function */
460 rtx f;
461 FILE *dumpfile;
462 int flags;
463 {
464 rtx insn;
465 int i;
466 struct loops loops_data;
467 struct loops *loops = &loops_data;
468 struct loop_info *loops_info;
469
470 loop_dump_stream = dumpfile;
471
472 init_recog_no_volatile ();
473
474 max_reg_before_loop = max_reg_num ();
475 loop_max_reg = max_reg_before_loop;
476
477 regs_may_share = 0;
478
479 /* Count the number of loops. */
480
481 max_loop_num = 0;
482 for (insn = f; insn; insn = NEXT_INSN (insn))
483 {
484 if (GET_CODE (insn) == NOTE
485 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
486 max_loop_num++;
487 }
488
489 /* Don't waste time if no loops. */
490 if (max_loop_num == 0)
491 return;
492
493 loops->num = max_loop_num;
494
495 /* Get size to use for tables indexed by uids.
496 Leave some space for labels allocated by find_and_verify_loops. */
497 max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
498
499 uid_luid = (int *) xcalloc (max_uid_for_loop, sizeof (int));
500 uid_loop = (struct loop **) xcalloc (max_uid_for_loop,
501 sizeof (struct loop *));
502
503 /* Allocate storage for array of loops. */
504 loops->array = (struct loop *)
505 xcalloc (loops->num, sizeof (struct loop));
506
507 /* Find and process each loop.
508 First, find them, and record them in order of their beginnings. */
509 find_and_verify_loops (f, loops);
510
511 /* Allocate and initialize auxiliary loop information. */
512 loops_info = xcalloc (loops->num, sizeof (struct loop_info));
513 for (i = 0; i < loops->num; i++)
514 loops->array[i].aux = loops_info + i;
515
516 /* Now find all register lifetimes. This must be done after
517 find_and_verify_loops, because it might reorder the insns in the
518 function. */
519 reg_scan (f, max_reg_before_loop, 1);
520
521 /* This must occur after reg_scan so that registers created by gcse
522 will have entries in the register tables.
523
524 We could have added a call to reg_scan after gcse_main in toplev.c,
525 but moving this call to init_alias_analysis is more efficient. */
526 init_alias_analysis ();
527
528 /* See if we went too far. Note that get_max_uid already returns
529 one more that the maximum uid of all insn. */
530 if (get_max_uid () > max_uid_for_loop)
531 abort ();
532 /* Now reset it to the actual size we need. See above. */
533 max_uid_for_loop = get_max_uid ();
534
535 /* find_and_verify_loops has already called compute_luids, but it
536 might have rearranged code afterwards, so we need to recompute
537 the luids now. */
538 max_luid = compute_luids (f, NULL_RTX, 0);
539
540 /* Don't leave gaps in uid_luid for insns that have been
541 deleted. It is possible that the first or last insn
542 using some register has been deleted by cross-jumping.
543 Make sure that uid_luid for that former insn's uid
544 points to the general area where that insn used to be. */
545 for (i = 0; i < max_uid_for_loop; i++)
546 {
547 uid_luid[0] = uid_luid[i];
548 if (uid_luid[0] != 0)
549 break;
550 }
551 for (i = 0; i < max_uid_for_loop; i++)
552 if (uid_luid[i] == 0)
553 uid_luid[i] = uid_luid[i - 1];
554
555 /* Determine if the function has indirect jump. On some systems
556 this prevents low overhead loop instructions from being used. */
557 indirect_jump_in_function = indirect_jump_in_function_p (f);
558
559 /* Now scan the loops, last ones first, since this means inner ones are done
560 before outer ones. */
561 for (i = max_loop_num - 1; i >= 0; i--)
562 {
563 struct loop *loop = &loops->array[i];
564
565 if (! loop->invalid && loop->end)
566 scan_loop (loop, flags);
567 }
568
569 end_alias_analysis ();
570
571 /* Clean up. */
572 free (uid_luid);
573 free (uid_loop);
574 free (loops_info);
575 free (loops->array);
576 }
577 \f
578 /* Returns the next insn, in execution order, after INSN. START and
579 END are the NOTE_INSN_LOOP_BEG and NOTE_INSN_LOOP_END for the loop,
580 respectively. LOOP->TOP, if non-NULL, is the top of the loop in the
581 insn-stream; it is used with loops that are entered near the
582 bottom. */
583
584 static rtx
585 next_insn_in_loop (loop, insn)
586 const struct loop *loop;
587 rtx insn;
588 {
589 insn = NEXT_INSN (insn);
590
591 if (insn == loop->end)
592 {
593 if (loop->top)
594 /* Go to the top of the loop, and continue there. */
595 insn = loop->top;
596 else
597 /* We're done. */
598 insn = NULL_RTX;
599 }
600
601 if (insn == loop->scan_start)
602 /* We're done. */
603 insn = NULL_RTX;
604
605 return insn;
606 }
607
608 /* Optimize one loop described by LOOP. */
609
610 /* ??? Could also move memory writes out of loops if the destination address
611 is invariant, the source is invariant, the memory write is not volatile,
612 and if we can prove that no read inside the loop can read this address
613 before the write occurs. If there is a read of this address after the
614 write, then we can also mark the memory read as invariant. */
615
616 static void
617 scan_loop (loop, flags)
618 struct loop *loop;
619 int flags;
620 {
621 struct loop_info *loop_info = LOOP_INFO (loop);
622 struct loop_regs *regs = LOOP_REGS (loop);
623 int i;
624 rtx loop_start = loop->start;
625 rtx loop_end = loop->end;
626 rtx p;
627 /* 1 if we are scanning insns that could be executed zero times. */
628 int maybe_never = 0;
629 /* 1 if we are scanning insns that might never be executed
630 due to a subroutine call which might exit before they are reached. */
631 int call_passed = 0;
632 /* Jump insn that enters the loop, or 0 if control drops in. */
633 rtx loop_entry_jump = 0;
634 /* Number of insns in the loop. */
635 int insn_count;
636 int tem;
637 rtx temp, update_start, update_end;
638 /* The SET from an insn, if it is the only SET in the insn. */
639 rtx set, set1;
640 /* Chain describing insns movable in current loop. */
641 struct loop_movables *movables = LOOP_MOVABLES (loop);
642 /* Ratio of extra register life span we can justify
643 for saving an instruction. More if loop doesn't call subroutines
644 since in that case saving an insn makes more difference
645 and more registers are available. */
646 int threshold;
647 /* Nonzero if we are scanning instructions in a sub-loop. */
648 int loop_depth = 0;
649
650 loop->top = 0;
651
652 movables->head = 0;
653 movables->last = 0;
654
655 /* Determine whether this loop starts with a jump down to a test at
656 the end. This will occur for a small number of loops with a test
657 that is too complex to duplicate in front of the loop.
658
659 We search for the first insn or label in the loop, skipping NOTEs.
660 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
661 (because we might have a loop executed only once that contains a
662 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
663 (in case we have a degenerate loop).
664
665 Note that if we mistakenly think that a loop is entered at the top
666 when, in fact, it is entered at the exit test, the only effect will be
667 slightly poorer optimization. Making the opposite error can generate
668 incorrect code. Since very few loops now start with a jump to the
669 exit test, the code here to detect that case is very conservative. */
670
671 for (p = NEXT_INSN (loop_start);
672 p != loop_end
673 && GET_CODE (p) != CODE_LABEL && ! INSN_P (p)
674 && (GET_CODE (p) != NOTE
675 || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
676 && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END));
677 p = NEXT_INSN (p))
678 ;
679
680 loop->scan_start = p;
681
682 /* If loop end is the end of the current function, then emit a
683 NOTE_INSN_DELETED after loop_end and set loop->sink to the dummy
684 note insn. This is the position we use when sinking insns out of
685 the loop. */
686 if (NEXT_INSN (loop->end) != 0)
687 loop->sink = NEXT_INSN (loop->end);
688 else
689 loop->sink = emit_note_after (NOTE_INSN_DELETED, loop->end);
690
691 /* Set up variables describing this loop. */
692 prescan_loop (loop);
693 threshold = (loop_info->has_call ? 1 : 2) * (1 + n_non_fixed_regs);
694
695 /* If loop has a jump before the first label,
696 the true entry is the target of that jump.
697 Start scan from there.
698 But record in LOOP->TOP the place where the end-test jumps
699 back to so we can scan that after the end of the loop. */
700 if (GET_CODE (p) == JUMP_INSN)
701 {
702 loop_entry_jump = p;
703
704 /* Loop entry must be unconditional jump (and not a RETURN) */
705 if (any_uncondjump_p (p)
706 && JUMP_LABEL (p) != 0
707 /* Check to see whether the jump actually
708 jumps out of the loop (meaning it's no loop).
709 This case can happen for things like
710 do {..} while (0). If this label was generated previously
711 by loop, we can't tell anything about it and have to reject
712 the loop. */
713 && INSN_IN_RANGE_P (JUMP_LABEL (p), loop_start, loop_end))
714 {
715 loop->top = next_label (loop->scan_start);
716 loop->scan_start = JUMP_LABEL (p);
717 }
718 }
719
720 /* If LOOP->SCAN_START was an insn created by loop, we don't know its luid
721 as required by loop_reg_used_before_p. So skip such loops. (This
722 test may never be true, but it's best to play it safe.)
723
724 Also, skip loops where we do not start scanning at a label. This
725 test also rejects loops starting with a JUMP_INSN that failed the
726 test above. */
727
728 if (INSN_UID (loop->scan_start) >= max_uid_for_loop
729 || GET_CODE (loop->scan_start) != CODE_LABEL)
730 {
731 if (loop_dump_stream)
732 fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n",
733 INSN_UID (loop_start), INSN_UID (loop_end));
734 return;
735 }
736
737 /* Allocate extra space for REGs that might be created by load_mems.
738 We allocate a little extra slop as well, in the hopes that we
739 won't have to reallocate the regs array. */
740 loop_regs_scan (loop, loop_info->mems_idx + 16);
741 insn_count = count_insns_in_loop (loop);
742
743 if (loop_dump_stream)
744 {
745 fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n",
746 INSN_UID (loop_start), INSN_UID (loop_end), insn_count);
747 if (loop->cont)
748 fprintf (loop_dump_stream, "Continue at insn %d.\n",
749 INSN_UID (loop->cont));
750 }
751
752 /* Scan through the loop finding insns that are safe to move.
753 Set REGS->ARRAY[I].SET_IN_LOOP negative for the reg I being set, so that
754 this reg will be considered invariant for subsequent insns.
755 We consider whether subsequent insns use the reg
756 in deciding whether it is worth actually moving.
757
758 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
759 and therefore it is possible that the insns we are scanning
760 would never be executed. At such times, we must make sure
761 that it is safe to execute the insn once instead of zero times.
762 When MAYBE_NEVER is 0, all insns will be executed at least once
763 so that is not a problem. */
764
765 for (p = next_insn_in_loop (loop, loop->scan_start);
766 p != NULL_RTX;
767 p = next_insn_in_loop (loop, p))
768 {
769 if (GET_CODE (p) == INSN
770 && (set = single_set (p))
771 && GET_CODE (SET_DEST (set)) == REG
772 #ifdef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
773 && SET_DEST (set) != pic_offset_table_rtx
774 #endif
775 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
776 {
777 int tem1 = 0;
778 int tem2 = 0;
779 int move_insn = 0;
780 rtx src = SET_SRC (set);
781 rtx dependencies = 0;
782
783 /* Figure out what to use as a source of this insn. If a REG_EQUIV
784 note is given or if a REG_EQUAL note with a constant operand is
785 specified, use it as the source and mark that we should move
786 this insn by calling emit_move_insn rather that duplicating the
787 insn.
788
789 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL note
790 is present. */
791 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
792 if (temp)
793 src = XEXP (temp, 0), move_insn = 1;
794 else
795 {
796 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
797 if (temp && CONSTANT_P (XEXP (temp, 0)))
798 src = XEXP (temp, 0), move_insn = 1;
799 if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
800 {
801 src = XEXP (temp, 0);
802 /* A libcall block can use regs that don't appear in
803 the equivalent expression. To move the libcall,
804 we must move those regs too. */
805 dependencies = libcall_other_reg (p, src);
806 }
807 }
808
809 /* For parallels, add any possible uses to the depencies, as we can't move
810 the insn without resolving them first. */
811 if (GET_CODE (PATTERN (p)) == PARALLEL)
812 {
813 for (i = 0; i < XVECLEN (PATTERN (p), 0); i++)
814 {
815 rtx x = XVECEXP (PATTERN (p), 0, i);
816 if (GET_CODE (x) == USE)
817 dependencies = gen_rtx_EXPR_LIST (VOIDmode, XEXP (x, 0), dependencies);
818 }
819 }
820
821 /* Don't try to optimize a register that was made
822 by loop-optimization for an inner loop.
823 We don't know its life-span, so we can't compute the benefit. */
824 if (REGNO (SET_DEST (set)) >= max_reg_before_loop)
825 ;
826 else if (/* The register is used in basic blocks other
827 than the one where it is set (meaning that
828 something after this point in the loop might
829 depend on its value before the set). */
830 ! reg_in_basic_block_p (p, SET_DEST (set))
831 /* And the set is not guaranteed to be executed once
832 the loop starts, or the value before the set is
833 needed before the set occurs...
834
835 ??? Note we have quadratic behaviour here, mitigated
836 by the fact that the previous test will often fail for
837 large loops. Rather than re-scanning the entire loop
838 each time for register usage, we should build tables
839 of the register usage and use them here instead. */
840 && (maybe_never
841 || loop_reg_used_before_p (loop, set, p)))
842 /* It is unsafe to move the set.
843
844 This code used to consider it OK to move a set of a variable
845 which was not created by the user and not used in an exit test.
846 That behavior is incorrect and was removed. */
847 ;
848 else if ((tem = loop_invariant_p (loop, src))
849 && (dependencies == 0
850 || (tem2 = loop_invariant_p (loop, dependencies)) != 0)
851 && (regs->array[REGNO (SET_DEST (set))].set_in_loop == 1
852 || (tem1
853 = consec_sets_invariant_p
854 (loop, SET_DEST (set),
855 regs->array[REGNO (SET_DEST (set))].set_in_loop,
856 p)))
857 /* If the insn can cause a trap (such as divide by zero),
858 can't move it unless it's guaranteed to be executed
859 once loop is entered. Even a function call might
860 prevent the trap insn from being reached
861 (since it might exit!) */
862 && ! ((maybe_never || call_passed)
863 && may_trap_p (src)))
864 {
865 struct movable *m;
866 int regno = REGNO (SET_DEST (set));
867
868 /* A potential lossage is where we have a case where two insns
869 can be combined as long as they are both in the loop, but
870 we move one of them outside the loop. For large loops,
871 this can lose. The most common case of this is the address
872 of a function being called.
873
874 Therefore, if this register is marked as being used exactly
875 once if we are in a loop with calls (a "large loop"), see if
876 we can replace the usage of this register with the source
877 of this SET. If we can, delete this insn.
878
879 Don't do this if P has a REG_RETVAL note or if we have
880 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
881
882 if (loop_info->has_call
883 && regs->array[regno].single_usage != 0
884 && regs->array[regno].single_usage != const0_rtx
885 && REGNO_FIRST_UID (regno) == INSN_UID (p)
886 && (REGNO_LAST_UID (regno)
887 == INSN_UID (regs->array[regno].single_usage))
888 && regs->array[regno].set_in_loop == 1
889 && GET_CODE (SET_SRC (set)) != ASM_OPERANDS
890 && ! side_effects_p (SET_SRC (set))
891 && ! find_reg_note (p, REG_RETVAL, NULL_RTX)
892 && (! SMALL_REGISTER_CLASSES
893 || (! (GET_CODE (SET_SRC (set)) == REG
894 && REGNO (SET_SRC (set)) < FIRST_PSEUDO_REGISTER)))
895 /* This test is not redundant; SET_SRC (set) might be
896 a call-clobbered register and the life of REGNO
897 might span a call. */
898 && ! modified_between_p (SET_SRC (set), p,
899 regs->array[regno].single_usage)
900 && no_labels_between_p (p, regs->array[regno].single_usage)
901 && validate_replace_rtx (SET_DEST (set), SET_SRC (set),
902 regs->array[regno].single_usage))
903 {
904 /* Replace any usage in a REG_EQUAL note. Must copy the
905 new source, so that we don't get rtx sharing between the
906 SET_SOURCE and REG_NOTES of insn p. */
907 REG_NOTES (regs->array[regno].single_usage)
908 = replace_rtx (REG_NOTES (regs->array[regno].single_usage),
909 SET_DEST (set), copy_rtx (SET_SRC (set)));
910
911 delete_insn (p);
912 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set)); i++)
913 regs->array[regno+i].set_in_loop = 0;
914 continue;
915 }
916
917 m = (struct movable *) xmalloc (sizeof (struct movable));
918 m->next = 0;
919 m->insn = p;
920 m->set_src = src;
921 m->dependencies = dependencies;
922 m->set_dest = SET_DEST (set);
923 m->force = 0;
924 m->consec = regs->array[REGNO (SET_DEST (set))].set_in_loop - 1;
925 m->done = 0;
926 m->forces = 0;
927 m->partial = 0;
928 m->move_insn = move_insn;
929 m->move_insn_first = 0;
930 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
931 m->savemode = VOIDmode;
932 m->regno = regno;
933 /* Set M->cond if either loop_invariant_p
934 or consec_sets_invariant_p returned 2
935 (only conditionally invariant). */
936 m->cond = ((tem | tem1 | tem2) > 1);
937 m->global = LOOP_REG_GLOBAL_P (loop, regno);
938 m->match = 0;
939 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
940 m->savings = regs->array[regno].n_times_set;
941 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
942 m->savings += libcall_benefit (p);
943 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set)); i++)
944 regs->array[regno+i].set_in_loop = move_insn ? -2 : -1;
945 /* Add M to the end of the chain MOVABLES. */
946 loop_movables_add (movables, m);
947
948 if (m->consec > 0)
949 {
950 /* It is possible for the first instruction to have a
951 REG_EQUAL note but a non-invariant SET_SRC, so we must
952 remember the status of the first instruction in case
953 the last instruction doesn't have a REG_EQUAL note. */
954 m->move_insn_first = m->move_insn;
955
956 /* Skip this insn, not checking REG_LIBCALL notes. */
957 p = next_nonnote_insn (p);
958 /* Skip the consecutive insns, if there are any. */
959 p = skip_consec_insns (p, m->consec);
960 /* Back up to the last insn of the consecutive group. */
961 p = prev_nonnote_insn (p);
962
963 /* We must now reset m->move_insn, m->is_equiv, and possibly
964 m->set_src to correspond to the effects of all the
965 insns. */
966 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
967 if (temp)
968 m->set_src = XEXP (temp, 0), m->move_insn = 1;
969 else
970 {
971 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
972 if (temp && CONSTANT_P (XEXP (temp, 0)))
973 m->set_src = XEXP (temp, 0), m->move_insn = 1;
974 else
975 m->move_insn = 0;
976
977 }
978 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
979 }
980 }
981 /* If this register is always set within a STRICT_LOW_PART
982 or set to zero, then its high bytes are constant.
983 So clear them outside the loop and within the loop
984 just load the low bytes.
985 We must check that the machine has an instruction to do so.
986 Also, if the value loaded into the register
987 depends on the same register, this cannot be done. */
988 else if (SET_SRC (set) == const0_rtx
989 && GET_CODE (NEXT_INSN (p)) == INSN
990 && (set1 = single_set (NEXT_INSN (p)))
991 && GET_CODE (set1) == SET
992 && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART)
993 && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG)
994 && (SUBREG_REG (XEXP (SET_DEST (set1), 0))
995 == SET_DEST (set))
996 && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1)))
997 {
998 int regno = REGNO (SET_DEST (set));
999 if (regs->array[regno].set_in_loop == 2)
1000 {
1001 struct movable *m;
1002 m = (struct movable *) xmalloc (sizeof (struct movable));
1003 m->next = 0;
1004 m->insn = p;
1005 m->set_dest = SET_DEST (set);
1006 m->dependencies = 0;
1007 m->force = 0;
1008 m->consec = 0;
1009 m->done = 0;
1010 m->forces = 0;
1011 m->move_insn = 0;
1012 m->move_insn_first = 0;
1013 m->partial = 1;
1014 /* If the insn may not be executed on some cycles,
1015 we can't clear the whole reg; clear just high part.
1016 Not even if the reg is used only within this loop.
1017 Consider this:
1018 while (1)
1019 while (s != t) {
1020 if (foo ()) x = *s;
1021 use (x);
1022 }
1023 Clearing x before the inner loop could clobber a value
1024 being saved from the last time around the outer loop.
1025 However, if the reg is not used outside this loop
1026 and all uses of the register are in the same
1027 basic block as the store, there is no problem.
1028
1029 If this insn was made by loop, we don't know its
1030 INSN_LUID and hence must make a conservative
1031 assumption. */
1032 m->global = (INSN_UID (p) >= max_uid_for_loop
1033 || LOOP_REG_GLOBAL_P (loop, regno)
1034 || (labels_in_range_p
1035 (p, REGNO_FIRST_LUID (regno))));
1036 if (maybe_never && m->global)
1037 m->savemode = GET_MODE (SET_SRC (set1));
1038 else
1039 m->savemode = VOIDmode;
1040 m->regno = regno;
1041 m->cond = 0;
1042 m->match = 0;
1043 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
1044 m->savings = 1;
1045 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set)); i++)
1046 regs->array[regno+i].set_in_loop = -1;
1047 /* Add M to the end of the chain MOVABLES. */
1048 loop_movables_add (movables, m);
1049 }
1050 }
1051 }
1052 /* Past a call insn, we get to insns which might not be executed
1053 because the call might exit. This matters for insns that trap.
1054 Constant and pure call insns always return, so they don't count. */
1055 else if (GET_CODE (p) == CALL_INSN && ! CONST_OR_PURE_CALL_P (p))
1056 call_passed = 1;
1057 /* Past a label or a jump, we get to insns for which we
1058 can't count on whether or how many times they will be
1059 executed during each iteration. Therefore, we can
1060 only move out sets of trivial variables
1061 (those not used after the loop). */
1062 /* Similar code appears twice in strength_reduce. */
1063 else if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN)
1064 /* If we enter the loop in the middle, and scan around to the
1065 beginning, don't set maybe_never for that. This must be an
1066 unconditional jump, otherwise the code at the top of the
1067 loop might never be executed. Unconditional jumps are
1068 followed by a barrier then the loop_end. */
1069 && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop->top
1070 && NEXT_INSN (NEXT_INSN (p)) == loop_end
1071 && any_uncondjump_p (p)))
1072 maybe_never = 1;
1073 else if (GET_CODE (p) == NOTE)
1074 {
1075 /* At the virtual top of a converted loop, insns are again known to
1076 be executed: logically, the loop begins here even though the exit
1077 code has been duplicated. */
1078 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
1079 maybe_never = call_passed = 0;
1080 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
1081 loop_depth++;
1082 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
1083 loop_depth--;
1084 }
1085 }
1086
1087 /* If one movable subsumes another, ignore that other. */
1088
1089 ignore_some_movables (movables);
1090
1091 /* For each movable insn, see if the reg that it loads
1092 leads when it dies right into another conditionally movable insn.
1093 If so, record that the second insn "forces" the first one,
1094 since the second can be moved only if the first is. */
1095
1096 force_movables (movables);
1097
1098 /* See if there are multiple movable insns that load the same value.
1099 If there are, make all but the first point at the first one
1100 through the `match' field, and add the priorities of them
1101 all together as the priority of the first. */
1102
1103 combine_movables (movables, regs);
1104
1105 /* Now consider each movable insn to decide whether it is worth moving.
1106 Store 0 in regs->array[I].set_in_loop for each reg I that is moved.
1107
1108 Generally this increases code size, so do not move moveables when
1109 optimizing for code size. */
1110
1111 if (! optimize_size)
1112 {
1113 move_movables (loop, movables, threshold, insn_count);
1114
1115 /* Recalculate regs->array if move_movables has created new
1116 registers. */
1117 if (max_reg_num () > regs->num)
1118 {
1119 loop_regs_scan (loop, 0);
1120 for (update_start = loop_start;
1121 PREV_INSN (update_start)
1122 && GET_CODE (PREV_INSN (update_start)) != CODE_LABEL;
1123 update_start = PREV_INSN (update_start))
1124 ;
1125 update_end = NEXT_INSN (loop_end);
1126
1127 reg_scan_update (update_start, update_end, loop_max_reg);
1128 loop_max_reg = max_reg_num ();
1129 }
1130 }
1131
1132 /* Now candidates that still are negative are those not moved.
1133 Change regs->array[I].set_in_loop to indicate that those are not actually
1134 invariant. */
1135 for (i = 0; i < regs->num; i++)
1136 if (regs->array[i].set_in_loop < 0)
1137 regs->array[i].set_in_loop = regs->array[i].n_times_set;
1138
1139 /* Now that we've moved some things out of the loop, we might be able to
1140 hoist even more memory references. */
1141 load_mems (loop);
1142
1143 /* Recalculate regs->array if load_mems has created new registers. */
1144 if (max_reg_num () > regs->num)
1145 loop_regs_scan (loop, 0);
1146
1147 for (update_start = loop_start;
1148 PREV_INSN (update_start)
1149 && GET_CODE (PREV_INSN (update_start)) != CODE_LABEL;
1150 update_start = PREV_INSN (update_start))
1151 ;
1152 update_end = NEXT_INSN (loop_end);
1153
1154 reg_scan_update (update_start, update_end, loop_max_reg);
1155 loop_max_reg = max_reg_num ();
1156
1157 if (flag_strength_reduce)
1158 {
1159 if (update_end && GET_CODE (update_end) == CODE_LABEL)
1160 /* Ensure our label doesn't go away. */
1161 LABEL_NUSES (update_end)++;
1162
1163 strength_reduce (loop, flags);
1164
1165 reg_scan_update (update_start, update_end, loop_max_reg);
1166 loop_max_reg = max_reg_num ();
1167
1168 if (update_end && GET_CODE (update_end) == CODE_LABEL
1169 && --LABEL_NUSES (update_end) == 0)
1170 delete_related_insns (update_end);
1171 }
1172
1173
1174 /* The movable information is required for strength reduction. */
1175 loop_movables_free (movables);
1176
1177 free (regs->array);
1178 regs->array = 0;
1179 regs->num = 0;
1180 }
1181 \f
1182 /* Add elements to *OUTPUT to record all the pseudo-regs
1183 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
1184
1185 void
1186 record_excess_regs (in_this, not_in_this, output)
1187 rtx in_this, not_in_this;
1188 rtx *output;
1189 {
1190 enum rtx_code code;
1191 const char *fmt;
1192 int i;
1193
1194 code = GET_CODE (in_this);
1195
1196 switch (code)
1197 {
1198 case PC:
1199 case CC0:
1200 case CONST_INT:
1201 case CONST_DOUBLE:
1202 case CONST:
1203 case SYMBOL_REF:
1204 case LABEL_REF:
1205 return;
1206
1207 case REG:
1208 if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER
1209 && ! reg_mentioned_p (in_this, not_in_this))
1210 *output = gen_rtx_EXPR_LIST (VOIDmode, in_this, *output);
1211 return;
1212
1213 default:
1214 break;
1215 }
1216
1217 fmt = GET_RTX_FORMAT (code);
1218 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1219 {
1220 int j;
1221
1222 switch (fmt[i])
1223 {
1224 case 'E':
1225 for (j = 0; j < XVECLEN (in_this, i); j++)
1226 record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output);
1227 break;
1228
1229 case 'e':
1230 record_excess_regs (XEXP (in_this, i), not_in_this, output);
1231 break;
1232 }
1233 }
1234 }
1235 \f
1236 /* Check what regs are referred to in the libcall block ending with INSN,
1237 aside from those mentioned in the equivalent value.
1238 If there are none, return 0.
1239 If there are one or more, return an EXPR_LIST containing all of them. */
1240
1241 rtx
1242 libcall_other_reg (insn, equiv)
1243 rtx insn, equiv;
1244 {
1245 rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
1246 rtx p = XEXP (note, 0);
1247 rtx output = 0;
1248
1249 /* First, find all the regs used in the libcall block
1250 that are not mentioned as inputs to the result. */
1251
1252 while (p != insn)
1253 {
1254 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
1255 || GET_CODE (p) == CALL_INSN)
1256 record_excess_regs (PATTERN (p), equiv, &output);
1257 p = NEXT_INSN (p);
1258 }
1259
1260 return output;
1261 }
1262 \f
1263 /* Return 1 if all uses of REG
1264 are between INSN and the end of the basic block. */
1265
1266 static int
1267 reg_in_basic_block_p (insn, reg)
1268 rtx insn, reg;
1269 {
1270 int regno = REGNO (reg);
1271 rtx p;
1272
1273 if (REGNO_FIRST_UID (regno) != INSN_UID (insn))
1274 return 0;
1275
1276 /* Search this basic block for the already recorded last use of the reg. */
1277 for (p = insn; p; p = NEXT_INSN (p))
1278 {
1279 switch (GET_CODE (p))
1280 {
1281 case NOTE:
1282 break;
1283
1284 case INSN:
1285 case CALL_INSN:
1286 /* Ordinary insn: if this is the last use, we win. */
1287 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1288 return 1;
1289 break;
1290
1291 case JUMP_INSN:
1292 /* Jump insn: if this is the last use, we win. */
1293 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1294 return 1;
1295 /* Otherwise, it's the end of the basic block, so we lose. */
1296 return 0;
1297
1298 case CODE_LABEL:
1299 case BARRIER:
1300 /* It's the end of the basic block, so we lose. */
1301 return 0;
1302
1303 default:
1304 break;
1305 }
1306 }
1307
1308 /* The "last use" that was recorded can't be found after the first
1309 use. This can happen when the last use was deleted while
1310 processing an inner loop, this inner loop was then completely
1311 unrolled, and the outer loop is always exited after the inner loop,
1312 so that everything after the first use becomes a single basic block. */
1313 return 1;
1314 }
1315 \f
1316 /* Compute the benefit of eliminating the insns in the block whose
1317 last insn is LAST. This may be a group of insns used to compute a
1318 value directly or can contain a library call. */
1319
1320 static int
1321 libcall_benefit (last)
1322 rtx last;
1323 {
1324 rtx insn;
1325 int benefit = 0;
1326
1327 for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0);
1328 insn != last; insn = NEXT_INSN (insn))
1329 {
1330 if (GET_CODE (insn) == CALL_INSN)
1331 benefit += 10; /* Assume at least this many insns in a library
1332 routine. */
1333 else if (GET_CODE (insn) == INSN
1334 && GET_CODE (PATTERN (insn)) != USE
1335 && GET_CODE (PATTERN (insn)) != CLOBBER)
1336 benefit++;
1337 }
1338
1339 return benefit;
1340 }
1341 \f
1342 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1343
1344 static rtx
1345 skip_consec_insns (insn, count)
1346 rtx insn;
1347 int count;
1348 {
1349 for (; count > 0; count--)
1350 {
1351 rtx temp;
1352
1353 /* If first insn of libcall sequence, skip to end. */
1354 /* Do this at start of loop, since INSN is guaranteed to
1355 be an insn here. */
1356 if (GET_CODE (insn) != NOTE
1357 && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
1358 insn = XEXP (temp, 0);
1359
1360 do
1361 insn = NEXT_INSN (insn);
1362 while (GET_CODE (insn) == NOTE);
1363 }
1364
1365 return insn;
1366 }
1367
1368 /* Ignore any movable whose insn falls within a libcall
1369 which is part of another movable.
1370 We make use of the fact that the movable for the libcall value
1371 was made later and so appears later on the chain. */
1372
1373 static void
1374 ignore_some_movables (movables)
1375 struct loop_movables *movables;
1376 {
1377 struct movable *m, *m1;
1378
1379 for (m = movables->head; m; m = m->next)
1380 {
1381 /* Is this a movable for the value of a libcall? */
1382 rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX);
1383 if (note)
1384 {
1385 rtx insn;
1386 /* Check for earlier movables inside that range,
1387 and mark them invalid. We cannot use LUIDs here because
1388 insns created by loop.c for prior loops don't have LUIDs.
1389 Rather than reject all such insns from movables, we just
1390 explicitly check each insn in the libcall (since invariant
1391 libcalls aren't that common). */
1392 for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn))
1393 for (m1 = movables->head; m1 != m; m1 = m1->next)
1394 if (m1->insn == insn)
1395 m1->done = 1;
1396 }
1397 }
1398 }
1399
1400 /* For each movable insn, see if the reg that it loads
1401 leads when it dies right into another conditionally movable insn.
1402 If so, record that the second insn "forces" the first one,
1403 since the second can be moved only if the first is. */
1404
1405 static void
1406 force_movables (movables)
1407 struct loop_movables *movables;
1408 {
1409 struct movable *m, *m1;
1410
1411 for (m1 = movables->head; m1; m1 = m1->next)
1412 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1413 if (!m1->partial && !m1->done)
1414 {
1415 int regno = m1->regno;
1416 for (m = m1->next; m; m = m->next)
1417 /* ??? Could this be a bug? What if CSE caused the
1418 register of M1 to be used after this insn?
1419 Since CSE does not update regno_last_uid,
1420 this insn M->insn might not be where it dies.
1421 But very likely this doesn't matter; what matters is
1422 that M's reg is computed from M1's reg. */
1423 if (INSN_UID (m->insn) == REGNO_LAST_UID (regno)
1424 && !m->done)
1425 break;
1426 if (m != 0 && m->set_src == m1->set_dest
1427 /* If m->consec, m->set_src isn't valid. */
1428 && m->consec == 0)
1429 m = 0;
1430
1431 /* Increase the priority of the moving the first insn
1432 since it permits the second to be moved as well. */
1433 if (m != 0)
1434 {
1435 m->forces = m1;
1436 m1->lifetime += m->lifetime;
1437 m1->savings += m->savings;
1438 }
1439 }
1440 }
1441 \f
1442 /* Find invariant expressions that are equal and can be combined into
1443 one register. */
1444
1445 static void
1446 combine_movables (movables, regs)
1447 struct loop_movables *movables;
1448 struct loop_regs *regs;
1449 {
1450 struct movable *m;
1451 char *matched_regs = (char *) xmalloc (regs->num);
1452 enum machine_mode mode;
1453
1454 /* Regs that are set more than once are not allowed to match
1455 or be matched. I'm no longer sure why not. */
1456 /* Only pseudo registers are allowed to match or be matched,
1457 since move_movables does not validate the change. */
1458 /* Perhaps testing m->consec_sets would be more appropriate here? */
1459
1460 for (m = movables->head; m; m = m->next)
1461 if (m->match == 0 && regs->array[m->regno].n_times_set == 1
1462 && m->regno >= FIRST_PSEUDO_REGISTER
1463 && !m->partial)
1464 {
1465 struct movable *m1;
1466 int regno = m->regno;
1467
1468 memset (matched_regs, 0, regs->num);
1469 matched_regs[regno] = 1;
1470
1471 /* We want later insns to match the first one. Don't make the first
1472 one match any later ones. So start this loop at m->next. */
1473 for (m1 = m->next; m1; m1 = m1->next)
1474 if (m != m1 && m1->match == 0
1475 && regs->array[m1->regno].n_times_set == 1
1476 && m1->regno >= FIRST_PSEUDO_REGISTER
1477 /* A reg used outside the loop mustn't be eliminated. */
1478 && !m1->global
1479 /* A reg used for zero-extending mustn't be eliminated. */
1480 && !m1->partial
1481 && (matched_regs[m1->regno]
1482 ||
1483 (
1484 /* Can combine regs with different modes loaded from the
1485 same constant only if the modes are the same or
1486 if both are integer modes with M wider or the same
1487 width as M1. The check for integer is redundant, but
1488 safe, since the only case of differing destination
1489 modes with equal sources is when both sources are
1490 VOIDmode, i.e., CONST_INT. */
1491 (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)
1492 || (GET_MODE_CLASS (GET_MODE (m->set_dest)) == MODE_INT
1493 && GET_MODE_CLASS (GET_MODE (m1->set_dest)) == MODE_INT
1494 && (GET_MODE_BITSIZE (GET_MODE (m->set_dest))
1495 >= GET_MODE_BITSIZE (GET_MODE (m1->set_dest)))))
1496 /* See if the source of M1 says it matches M. */
1497 && ((GET_CODE (m1->set_src) == REG
1498 && matched_regs[REGNO (m1->set_src)])
1499 || rtx_equal_for_loop_p (m->set_src, m1->set_src,
1500 movables, regs))))
1501 && ((m->dependencies == m1->dependencies)
1502 || rtx_equal_p (m->dependencies, m1->dependencies)))
1503 {
1504 m->lifetime += m1->lifetime;
1505 m->savings += m1->savings;
1506 m1->done = 1;
1507 m1->match = m;
1508 matched_regs[m1->regno] = 1;
1509 }
1510 }
1511
1512 /* Now combine the regs used for zero-extension.
1513 This can be done for those not marked `global'
1514 provided their lives don't overlap. */
1515
1516 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1517 mode = GET_MODE_WIDER_MODE (mode))
1518 {
1519 struct movable *m0 = 0;
1520
1521 /* Combine all the registers for extension from mode MODE.
1522 Don't combine any that are used outside this loop. */
1523 for (m = movables->head; m; m = m->next)
1524 if (m->partial && ! m->global
1525 && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn)))))
1526 {
1527 struct movable *m1;
1528
1529 int first = REGNO_FIRST_LUID (m->regno);
1530 int last = REGNO_LAST_LUID (m->regno);
1531
1532 if (m0 == 0)
1533 {
1534 /* First one: don't check for overlap, just record it. */
1535 m0 = m;
1536 continue;
1537 }
1538
1539 /* Make sure they extend to the same mode.
1540 (Almost always true.) */
1541 if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest))
1542 continue;
1543
1544 /* We already have one: check for overlap with those
1545 already combined together. */
1546 for (m1 = movables->head; m1 != m; m1 = m1->next)
1547 if (m1 == m0 || (m1->partial && m1->match == m0))
1548 if (! (REGNO_FIRST_LUID (m1->regno) > last
1549 || REGNO_LAST_LUID (m1->regno) < first))
1550 goto overlap;
1551
1552 /* No overlap: we can combine this with the others. */
1553 m0->lifetime += m->lifetime;
1554 m0->savings += m->savings;
1555 m->done = 1;
1556 m->match = m0;
1557
1558 overlap:
1559 ;
1560 }
1561 }
1562
1563 /* Clean up. */
1564 free (matched_regs);
1565 }
1566
1567 /* Returns the number of movable instructions in LOOP that were not
1568 moved outside the loop. */
1569
1570 static int
1571 num_unmoved_movables (loop)
1572 const struct loop *loop;
1573 {
1574 int num = 0;
1575 struct movable *m;
1576
1577 for (m = LOOP_MOVABLES (loop)->head; m; m = m->next)
1578 if (!m->done)
1579 ++num;
1580
1581 return num;
1582 }
1583
1584 \f
1585 /* Return 1 if regs X and Y will become the same if moved. */
1586
1587 static int
1588 regs_match_p (x, y, movables)
1589 rtx x, y;
1590 struct loop_movables *movables;
1591 {
1592 unsigned int xn = REGNO (x);
1593 unsigned int yn = REGNO (y);
1594 struct movable *mx, *my;
1595
1596 for (mx = movables->head; mx; mx = mx->next)
1597 if (mx->regno == xn)
1598 break;
1599
1600 for (my = movables->head; my; my = my->next)
1601 if (my->regno == yn)
1602 break;
1603
1604 return (mx && my
1605 && ((mx->match == my->match && mx->match != 0)
1606 || mx->match == my
1607 || mx == my->match));
1608 }
1609
1610 /* Return 1 if X and Y are identical-looking rtx's.
1611 This is the Lisp function EQUAL for rtx arguments.
1612
1613 If two registers are matching movables or a movable register and an
1614 equivalent constant, consider them equal. */
1615
1616 static int
1617 rtx_equal_for_loop_p (x, y, movables, regs)
1618 rtx x, y;
1619 struct loop_movables *movables;
1620 struct loop_regs *regs;
1621 {
1622 int i;
1623 int j;
1624 struct movable *m;
1625 enum rtx_code code;
1626 const char *fmt;
1627
1628 if (x == y)
1629 return 1;
1630 if (x == 0 || y == 0)
1631 return 0;
1632
1633 code = GET_CODE (x);
1634
1635 /* If we have a register and a constant, they may sometimes be
1636 equal. */
1637 if (GET_CODE (x) == REG && regs->array[REGNO (x)].set_in_loop == -2
1638 && CONSTANT_P (y))
1639 {
1640 for (m = movables->head; m; m = m->next)
1641 if (m->move_insn && m->regno == REGNO (x)
1642 && rtx_equal_p (m->set_src, y))
1643 return 1;
1644 }
1645 else if (GET_CODE (y) == REG && regs->array[REGNO (y)].set_in_loop == -2
1646 && CONSTANT_P (x))
1647 {
1648 for (m = movables->head; m; m = m->next)
1649 if (m->move_insn && m->regno == REGNO (y)
1650 && rtx_equal_p (m->set_src, x))
1651 return 1;
1652 }
1653
1654 /* Otherwise, rtx's of different codes cannot be equal. */
1655 if (code != GET_CODE (y))
1656 return 0;
1657
1658 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1659 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1660
1661 if (GET_MODE (x) != GET_MODE (y))
1662 return 0;
1663
1664 /* These three types of rtx's can be compared nonrecursively. */
1665 if (code == REG)
1666 return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables));
1667
1668 if (code == LABEL_REF)
1669 return XEXP (x, 0) == XEXP (y, 0);
1670 if (code == SYMBOL_REF)
1671 return XSTR (x, 0) == XSTR (y, 0);
1672
1673 /* Compare the elements. If any pair of corresponding elements
1674 fail to match, return 0 for the whole things. */
1675
1676 fmt = GET_RTX_FORMAT (code);
1677 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1678 {
1679 switch (fmt[i])
1680 {
1681 case 'w':
1682 if (XWINT (x, i) != XWINT (y, i))
1683 return 0;
1684 break;
1685
1686 case 'i':
1687 if (XINT (x, i) != XINT (y, i))
1688 return 0;
1689 break;
1690
1691 case 'E':
1692 /* Two vectors must have the same length. */
1693 if (XVECLEN (x, i) != XVECLEN (y, i))
1694 return 0;
1695
1696 /* And the corresponding elements must match. */
1697 for (j = 0; j < XVECLEN (x, i); j++)
1698 if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j),
1699 movables, regs) == 0)
1700 return 0;
1701 break;
1702
1703 case 'e':
1704 if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables, regs)
1705 == 0)
1706 return 0;
1707 break;
1708
1709 case 's':
1710 if (strcmp (XSTR (x, i), XSTR (y, i)))
1711 return 0;
1712 break;
1713
1714 case 'u':
1715 /* These are just backpointers, so they don't matter. */
1716 break;
1717
1718 case '0':
1719 break;
1720
1721 /* It is believed that rtx's at this level will never
1722 contain anything but integers and other rtx's,
1723 except for within LABEL_REFs and SYMBOL_REFs. */
1724 default:
1725 abort ();
1726 }
1727 }
1728 return 1;
1729 }
1730 \f
1731 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
1732 insns in INSNS which use the reference. LABEL_NUSES for CODE_LABEL
1733 references is incremented once for each added note. */
1734
1735 static void
1736 add_label_notes (x, insns)
1737 rtx x;
1738 rtx insns;
1739 {
1740 enum rtx_code code = GET_CODE (x);
1741 int i, j;
1742 const char *fmt;
1743 rtx insn;
1744
1745 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
1746 {
1747 /* This code used to ignore labels that referred to dispatch tables to
1748 avoid flow generating (slighly) worse code.
1749
1750 We no longer ignore such label references (see LABEL_REF handling in
1751 mark_jump_label for additional information). */
1752 for (insn = insns; insn; insn = NEXT_INSN (insn))
1753 if (reg_mentioned_p (XEXP (x, 0), insn))
1754 {
1755 REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_LABEL, XEXP (x, 0),
1756 REG_NOTES (insn));
1757 if (LABEL_P (XEXP (x, 0)))
1758 LABEL_NUSES (XEXP (x, 0))++;
1759 }
1760 }
1761
1762 fmt = GET_RTX_FORMAT (code);
1763 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1764 {
1765 if (fmt[i] == 'e')
1766 add_label_notes (XEXP (x, i), insns);
1767 else if (fmt[i] == 'E')
1768 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1769 add_label_notes (XVECEXP (x, i, j), insns);
1770 }
1771 }
1772 \f
1773 /* Scan MOVABLES, and move the insns that deserve to be moved.
1774 If two matching movables are combined, replace one reg with the
1775 other throughout. */
1776
1777 static void
1778 move_movables (loop, movables, threshold, insn_count)
1779 struct loop *loop;
1780 struct loop_movables *movables;
1781 int threshold;
1782 int insn_count;
1783 {
1784 struct loop_regs *regs = LOOP_REGS (loop);
1785 int nregs = regs->num;
1786 rtx new_start = 0;
1787 struct movable *m;
1788 rtx p;
1789 rtx loop_start = loop->start;
1790 rtx loop_end = loop->end;
1791 /* Map of pseudo-register replacements to handle combining
1792 when we move several insns that load the same value
1793 into different pseudo-registers. */
1794 rtx *reg_map = (rtx *) xcalloc (nregs, sizeof (rtx));
1795 char *already_moved = (char *) xcalloc (nregs, sizeof (char));
1796
1797 for (m = movables->head; m; m = m->next)
1798 {
1799 /* Describe this movable insn. */
1800
1801 if (loop_dump_stream)
1802 {
1803 fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ",
1804 INSN_UID (m->insn), m->regno, m->lifetime);
1805 if (m->consec > 0)
1806 fprintf (loop_dump_stream, "consec %d, ", m->consec);
1807 if (m->cond)
1808 fprintf (loop_dump_stream, "cond ");
1809 if (m->force)
1810 fprintf (loop_dump_stream, "force ");
1811 if (m->global)
1812 fprintf (loop_dump_stream, "global ");
1813 if (m->done)
1814 fprintf (loop_dump_stream, "done ");
1815 if (m->move_insn)
1816 fprintf (loop_dump_stream, "move-insn ");
1817 if (m->match)
1818 fprintf (loop_dump_stream, "matches %d ",
1819 INSN_UID (m->match->insn));
1820 if (m->forces)
1821 fprintf (loop_dump_stream, "forces %d ",
1822 INSN_UID (m->forces->insn));
1823 }
1824
1825 /* Ignore the insn if it's already done (it matched something else).
1826 Otherwise, see if it is now safe to move. */
1827
1828 if (!m->done
1829 && (! m->cond
1830 || (1 == loop_invariant_p (loop, m->set_src)
1831 && (m->dependencies == 0
1832 || 1 == loop_invariant_p (loop, m->dependencies))
1833 && (m->consec == 0
1834 || 1 == consec_sets_invariant_p (loop, m->set_dest,
1835 m->consec + 1,
1836 m->insn))))
1837 && (! m->forces || m->forces->done))
1838 {
1839 int regno;
1840 rtx p;
1841 int savings = m->savings;
1842
1843 /* We have an insn that is safe to move.
1844 Compute its desirability. */
1845
1846 p = m->insn;
1847 regno = m->regno;
1848
1849 if (loop_dump_stream)
1850 fprintf (loop_dump_stream, "savings %d ", savings);
1851
1852 if (regs->array[regno].moved_once && loop_dump_stream)
1853 fprintf (loop_dump_stream, "halved since already moved ");
1854
1855 /* An insn MUST be moved if we already moved something else
1856 which is safe only if this one is moved too: that is,
1857 if already_moved[REGNO] is nonzero. */
1858
1859 /* An insn is desirable to move if the new lifetime of the
1860 register is no more than THRESHOLD times the old lifetime.
1861 If it's not desirable, it means the loop is so big
1862 that moving won't speed things up much,
1863 and it is liable to make register usage worse. */
1864
1865 /* It is also desirable to move if it can be moved at no
1866 extra cost because something else was already moved. */
1867
1868 if (already_moved[regno]
1869 || flag_move_all_movables
1870 || (threshold * savings * m->lifetime) >=
1871 (regs->array[regno].moved_once ? insn_count * 2 : insn_count)
1872 || (m->forces && m->forces->done
1873 && regs->array[m->forces->regno].n_times_set == 1))
1874 {
1875 int count;
1876 struct movable *m1;
1877 rtx first = NULL_RTX;
1878
1879 /* Now move the insns that set the reg. */
1880
1881 if (m->partial && m->match)
1882 {
1883 rtx newpat, i1;
1884 rtx r1, r2;
1885 /* Find the end of this chain of matching regs.
1886 Thus, we load each reg in the chain from that one reg.
1887 And that reg is loaded with 0 directly,
1888 since it has ->match == 0. */
1889 for (m1 = m; m1->match; m1 = m1->match);
1890 newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)),
1891 SET_DEST (PATTERN (m1->insn)));
1892 i1 = loop_insn_hoist (loop, newpat);
1893
1894 /* Mark the moved, invariant reg as being allowed to
1895 share a hard reg with the other matching invariant. */
1896 REG_NOTES (i1) = REG_NOTES (m->insn);
1897 r1 = SET_DEST (PATTERN (m->insn));
1898 r2 = SET_DEST (PATTERN (m1->insn));
1899 regs_may_share
1900 = gen_rtx_EXPR_LIST (VOIDmode, r1,
1901 gen_rtx_EXPR_LIST (VOIDmode, r2,
1902 regs_may_share));
1903 delete_insn (m->insn);
1904
1905 if (new_start == 0)
1906 new_start = i1;
1907
1908 if (loop_dump_stream)
1909 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1910 }
1911 /* If we are to re-generate the item being moved with a
1912 new move insn, first delete what we have and then emit
1913 the move insn before the loop. */
1914 else if (m->move_insn)
1915 {
1916 rtx i1, temp, seq;
1917
1918 for (count = m->consec; count >= 0; count--)
1919 {
1920 /* If this is the first insn of a library call sequence,
1921 skip to the end. */
1922 if (GET_CODE (p) != NOTE
1923 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1924 p = XEXP (temp, 0);
1925
1926 /* If this is the last insn of a libcall sequence, then
1927 delete every insn in the sequence except the last.
1928 The last insn is handled in the normal manner. */
1929 if (GET_CODE (p) != NOTE
1930 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1931 {
1932 temp = XEXP (temp, 0);
1933 while (temp != p)
1934 temp = delete_insn (temp);
1935 }
1936
1937 temp = p;
1938 p = delete_insn (p);
1939
1940 /* simplify_giv_expr expects that it can walk the insns
1941 at m->insn forwards and see this old sequence we are
1942 tossing here. delete_insn does preserve the next
1943 pointers, but when we skip over a NOTE we must fix
1944 it up. Otherwise that code walks into the non-deleted
1945 insn stream. */
1946 while (p && GET_CODE (p) == NOTE)
1947 p = NEXT_INSN (temp) = NEXT_INSN (p);
1948 }
1949
1950 start_sequence ();
1951 emit_move_insn (m->set_dest, m->set_src);
1952 seq = get_insns ();
1953 end_sequence ();
1954
1955 add_label_notes (m->set_src, seq);
1956
1957 i1 = loop_insn_hoist (loop, seq);
1958 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
1959 set_unique_reg_note (i1,
1960 m->is_equiv ? REG_EQUIV : REG_EQUAL,
1961 m->set_src);
1962
1963 if (loop_dump_stream)
1964 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1965
1966 /* The more regs we move, the less we like moving them. */
1967 threshold -= 3;
1968 }
1969 else
1970 {
1971 for (count = m->consec; count >= 0; count--)
1972 {
1973 rtx i1, temp;
1974
1975 /* If first insn of libcall sequence, skip to end. */
1976 /* Do this at start of loop, since p is guaranteed to
1977 be an insn here. */
1978 if (GET_CODE (p) != NOTE
1979 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1980 p = XEXP (temp, 0);
1981
1982 /* If last insn of libcall sequence, move all
1983 insns except the last before the loop. The last
1984 insn is handled in the normal manner. */
1985 if (GET_CODE (p) != NOTE
1986 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1987 {
1988 rtx fn_address = 0;
1989 rtx fn_reg = 0;
1990 rtx fn_address_insn = 0;
1991
1992 first = 0;
1993 for (temp = XEXP (temp, 0); temp != p;
1994 temp = NEXT_INSN (temp))
1995 {
1996 rtx body;
1997 rtx n;
1998 rtx next;
1999
2000 if (GET_CODE (temp) == NOTE)
2001 continue;
2002
2003 body = PATTERN (temp);
2004
2005 /* Find the next insn after TEMP,
2006 not counting USE or NOTE insns. */
2007 for (next = NEXT_INSN (temp); next != p;
2008 next = NEXT_INSN (next))
2009 if (! (GET_CODE (next) == INSN
2010 && GET_CODE (PATTERN (next)) == USE)
2011 && GET_CODE (next) != NOTE)
2012 break;
2013
2014 /* If that is the call, this may be the insn
2015 that loads the function address.
2016
2017 Extract the function address from the insn
2018 that loads it into a register.
2019 If this insn was cse'd, we get incorrect code.
2020
2021 So emit a new move insn that copies the
2022 function address into the register that the
2023 call insn will use. flow.c will delete any
2024 redundant stores that we have created. */
2025 if (GET_CODE (next) == CALL_INSN
2026 && GET_CODE (body) == SET
2027 && GET_CODE (SET_DEST (body)) == REG
2028 && (n = find_reg_note (temp, REG_EQUAL,
2029 NULL_RTX)))
2030 {
2031 fn_reg = SET_SRC (body);
2032 if (GET_CODE (fn_reg) != REG)
2033 fn_reg = SET_DEST (body);
2034 fn_address = XEXP (n, 0);
2035 fn_address_insn = temp;
2036 }
2037 /* We have the call insn.
2038 If it uses the register we suspect it might,
2039 load it with the correct address directly. */
2040 if (GET_CODE (temp) == CALL_INSN
2041 && fn_address != 0
2042 && reg_referenced_p (fn_reg, body))
2043 loop_insn_emit_after (loop, 0, fn_address_insn,
2044 gen_move_insn
2045 (fn_reg, fn_address));
2046
2047 if (GET_CODE (temp) == CALL_INSN)
2048 {
2049 i1 = loop_call_insn_hoist (loop, body);
2050 /* Because the USAGE information potentially
2051 contains objects other than hard registers
2052 we need to copy it. */
2053 if (CALL_INSN_FUNCTION_USAGE (temp))
2054 CALL_INSN_FUNCTION_USAGE (i1)
2055 = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp));
2056 }
2057 else
2058 i1 = loop_insn_hoist (loop, body);
2059 if (first == 0)
2060 first = i1;
2061 if (temp == fn_address_insn)
2062 fn_address_insn = i1;
2063 REG_NOTES (i1) = REG_NOTES (temp);
2064 REG_NOTES (temp) = NULL;
2065 delete_insn (temp);
2066 }
2067 if (new_start == 0)
2068 new_start = first;
2069 }
2070 if (m->savemode != VOIDmode)
2071 {
2072 /* P sets REG to zero; but we should clear only
2073 the bits that are not covered by the mode
2074 m->savemode. */
2075 rtx reg = m->set_dest;
2076 rtx sequence;
2077 rtx tem;
2078
2079 start_sequence ();
2080 tem = expand_simple_binop
2081 (GET_MODE (reg), AND, reg,
2082 GEN_INT ((((HOST_WIDE_INT) 1
2083 << GET_MODE_BITSIZE (m->savemode)))
2084 - 1),
2085 reg, 1, OPTAB_LIB_WIDEN);
2086 if (tem == 0)
2087 abort ();
2088 if (tem != reg)
2089 emit_move_insn (reg, tem);
2090 sequence = get_insns ();
2091 end_sequence ();
2092 i1 = loop_insn_hoist (loop, sequence);
2093 }
2094 else if (GET_CODE (p) == CALL_INSN)
2095 {
2096 i1 = loop_call_insn_hoist (loop, PATTERN (p));
2097 /* Because the USAGE information potentially
2098 contains objects other than hard registers
2099 we need to copy it. */
2100 if (CALL_INSN_FUNCTION_USAGE (p))
2101 CALL_INSN_FUNCTION_USAGE (i1)
2102 = copy_rtx (CALL_INSN_FUNCTION_USAGE (p));
2103 }
2104 else if (count == m->consec && m->move_insn_first)
2105 {
2106 rtx seq;
2107 /* The SET_SRC might not be invariant, so we must
2108 use the REG_EQUAL note. */
2109 start_sequence ();
2110 emit_move_insn (m->set_dest, m->set_src);
2111 seq = get_insns ();
2112 end_sequence ();
2113
2114 add_label_notes (m->set_src, seq);
2115
2116 i1 = loop_insn_hoist (loop, seq);
2117 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
2118 set_unique_reg_note (i1, m->is_equiv ? REG_EQUIV
2119 : REG_EQUAL, m->set_src);
2120 }
2121 else
2122 i1 = loop_insn_hoist (loop, PATTERN (p));
2123
2124 if (REG_NOTES (i1) == 0)
2125 {
2126 REG_NOTES (i1) = REG_NOTES (p);
2127 REG_NOTES (p) = NULL;
2128
2129 /* If there is a REG_EQUAL note present whose value
2130 is not loop invariant, then delete it, since it
2131 may cause problems with later optimization passes.
2132 It is possible for cse to create such notes
2133 like this as a result of record_jump_cond. */
2134
2135 if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX))
2136 && ! loop_invariant_p (loop, XEXP (temp, 0)))
2137 remove_note (i1, temp);
2138 }
2139
2140 if (new_start == 0)
2141 new_start = i1;
2142
2143 if (loop_dump_stream)
2144 fprintf (loop_dump_stream, " moved to %d",
2145 INSN_UID (i1));
2146
2147 /* If library call, now fix the REG_NOTES that contain
2148 insn pointers, namely REG_LIBCALL on FIRST
2149 and REG_RETVAL on I1. */
2150 if ((temp = find_reg_note (i1, REG_RETVAL, NULL_RTX)))
2151 {
2152 XEXP (temp, 0) = first;
2153 temp = find_reg_note (first, REG_LIBCALL, NULL_RTX);
2154 XEXP (temp, 0) = i1;
2155 }
2156
2157 temp = p;
2158 delete_insn (p);
2159 p = NEXT_INSN (p);
2160
2161 /* simplify_giv_expr expects that it can walk the insns
2162 at m->insn forwards and see this old sequence we are
2163 tossing here. delete_insn does preserve the next
2164 pointers, but when we skip over a NOTE we must fix
2165 it up. Otherwise that code walks into the non-deleted
2166 insn stream. */
2167 while (p && GET_CODE (p) == NOTE)
2168 p = NEXT_INSN (temp) = NEXT_INSN (p);
2169 }
2170
2171 /* The more regs we move, the less we like moving them. */
2172 threshold -= 3;
2173 }
2174
2175 /* Any other movable that loads the same register
2176 MUST be moved. */
2177 already_moved[regno] = 1;
2178
2179 /* This reg has been moved out of one loop. */
2180 regs->array[regno].moved_once = 1;
2181
2182 /* The reg set here is now invariant. */
2183 if (! m->partial)
2184 {
2185 int i;
2186 for (i = 0; i < LOOP_REGNO_NREGS (regno, m->set_dest); i++)
2187 regs->array[regno+i].set_in_loop = 0;
2188 }
2189
2190 m->done = 1;
2191
2192 /* Change the length-of-life info for the register
2193 to say it lives at least the full length of this loop.
2194 This will help guide optimizations in outer loops. */
2195
2196 if (REGNO_FIRST_LUID (regno) > INSN_LUID (loop_start))
2197 /* This is the old insn before all the moved insns.
2198 We can't use the moved insn because it is out of range
2199 in uid_luid. Only the old insns have luids. */
2200 REGNO_FIRST_UID (regno) = INSN_UID (loop_start);
2201 if (REGNO_LAST_LUID (regno) < INSN_LUID (loop_end))
2202 REGNO_LAST_UID (regno) = INSN_UID (loop_end);
2203
2204 /* Combine with this moved insn any other matching movables. */
2205
2206 if (! m->partial)
2207 for (m1 = movables->head; m1; m1 = m1->next)
2208 if (m1->match == m)
2209 {
2210 rtx temp;
2211
2212 /* Schedule the reg loaded by M1
2213 for replacement so that shares the reg of M.
2214 If the modes differ (only possible in restricted
2215 circumstances, make a SUBREG.
2216
2217 Note this assumes that the target dependent files
2218 treat REG and SUBREG equally, including within
2219 GO_IF_LEGITIMATE_ADDRESS and in all the
2220 predicates since we never verify that replacing the
2221 original register with a SUBREG results in a
2222 recognizable insn. */
2223 if (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest))
2224 reg_map[m1->regno] = m->set_dest;
2225 else
2226 reg_map[m1->regno]
2227 = gen_lowpart_common (GET_MODE (m1->set_dest),
2228 m->set_dest);
2229
2230 /* Get rid of the matching insn
2231 and prevent further processing of it. */
2232 m1->done = 1;
2233
2234 /* if library call, delete all insns. */
2235 if ((temp = find_reg_note (m1->insn, REG_RETVAL,
2236 NULL_RTX)))
2237 delete_insn_chain (XEXP (temp, 0), m1->insn);
2238 else
2239 delete_insn (m1->insn);
2240
2241 /* Any other movable that loads the same register
2242 MUST be moved. */
2243 already_moved[m1->regno] = 1;
2244
2245 /* The reg merged here is now invariant,
2246 if the reg it matches is invariant. */
2247 if (! m->partial)
2248 {
2249 int i;
2250 for (i = 0;
2251 i < LOOP_REGNO_NREGS (regno, m1->set_dest);
2252 i++)
2253 regs->array[m1->regno+i].set_in_loop = 0;
2254 }
2255 }
2256 }
2257 else if (loop_dump_stream)
2258 fprintf (loop_dump_stream, "not desirable");
2259 }
2260 else if (loop_dump_stream && !m->match)
2261 fprintf (loop_dump_stream, "not safe");
2262
2263 if (loop_dump_stream)
2264 fprintf (loop_dump_stream, "\n");
2265 }
2266
2267 if (new_start == 0)
2268 new_start = loop_start;
2269
2270 /* Go through all the instructions in the loop, making
2271 all the register substitutions scheduled in REG_MAP. */
2272 for (p = new_start; p != loop_end; p = NEXT_INSN (p))
2273 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
2274 || GET_CODE (p) == CALL_INSN)
2275 {
2276 replace_regs (PATTERN (p), reg_map, nregs, 0);
2277 replace_regs (REG_NOTES (p), reg_map, nregs, 0);
2278 INSN_CODE (p) = -1;
2279 }
2280
2281 /* Clean up. */
2282 free (reg_map);
2283 free (already_moved);
2284 }
2285
2286
2287 static void
2288 loop_movables_add (movables, m)
2289 struct loop_movables *movables;
2290 struct movable *m;
2291 {
2292 if (movables->head == 0)
2293 movables->head = m;
2294 else
2295 movables->last->next = m;
2296 movables->last = m;
2297 }
2298
2299
2300 static void
2301 loop_movables_free (movables)
2302 struct loop_movables *movables;
2303 {
2304 struct movable *m;
2305 struct movable *m_next;
2306
2307 for (m = movables->head; m; m = m_next)
2308 {
2309 m_next = m->next;
2310 free (m);
2311 }
2312 }
2313 \f
2314 #if 0
2315 /* Scan X and replace the address of any MEM in it with ADDR.
2316 REG is the address that MEM should have before the replacement. */
2317
2318 static void
2319 replace_call_address (x, reg, addr)
2320 rtx x, reg, addr;
2321 {
2322 enum rtx_code code;
2323 int i;
2324 const char *fmt;
2325
2326 if (x == 0)
2327 return;
2328 code = GET_CODE (x);
2329 switch (code)
2330 {
2331 case PC:
2332 case CC0:
2333 case CONST_INT:
2334 case CONST_DOUBLE:
2335 case CONST:
2336 case SYMBOL_REF:
2337 case LABEL_REF:
2338 case REG:
2339 return;
2340
2341 case SET:
2342 /* Short cut for very common case. */
2343 replace_call_address (XEXP (x, 1), reg, addr);
2344 return;
2345
2346 case CALL:
2347 /* Short cut for very common case. */
2348 replace_call_address (XEXP (x, 0), reg, addr);
2349 return;
2350
2351 case MEM:
2352 /* If this MEM uses a reg other than the one we expected,
2353 something is wrong. */
2354 if (XEXP (x, 0) != reg)
2355 abort ();
2356 XEXP (x, 0) = addr;
2357 return;
2358
2359 default:
2360 break;
2361 }
2362
2363 fmt = GET_RTX_FORMAT (code);
2364 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2365 {
2366 if (fmt[i] == 'e')
2367 replace_call_address (XEXP (x, i), reg, addr);
2368 else if (fmt[i] == 'E')
2369 {
2370 int j;
2371 for (j = 0; j < XVECLEN (x, i); j++)
2372 replace_call_address (XVECEXP (x, i, j), reg, addr);
2373 }
2374 }
2375 }
2376 #endif
2377 \f
2378 /* Return the number of memory refs to addresses that vary
2379 in the rtx X. */
2380
2381 static int
2382 count_nonfixed_reads (loop, x)
2383 const struct loop *loop;
2384 rtx x;
2385 {
2386 enum rtx_code code;
2387 int i;
2388 const char *fmt;
2389 int value;
2390
2391 if (x == 0)
2392 return 0;
2393
2394 code = GET_CODE (x);
2395 switch (code)
2396 {
2397 case PC:
2398 case CC0:
2399 case CONST_INT:
2400 case CONST_DOUBLE:
2401 case CONST:
2402 case SYMBOL_REF:
2403 case LABEL_REF:
2404 case REG:
2405 return 0;
2406
2407 case MEM:
2408 return ((loop_invariant_p (loop, XEXP (x, 0)) != 1)
2409 + count_nonfixed_reads (loop, XEXP (x, 0)));
2410
2411 default:
2412 break;
2413 }
2414
2415 value = 0;
2416 fmt = GET_RTX_FORMAT (code);
2417 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2418 {
2419 if (fmt[i] == 'e')
2420 value += count_nonfixed_reads (loop, XEXP (x, i));
2421 if (fmt[i] == 'E')
2422 {
2423 int j;
2424 for (j = 0; j < XVECLEN (x, i); j++)
2425 value += count_nonfixed_reads (loop, XVECEXP (x, i, j));
2426 }
2427 }
2428 return value;
2429 }
2430 \f
2431 /* Scan a loop setting the elements `cont', `vtop', `loops_enclosed',
2432 `has_call', `has_nonconst_call', `has_volatile', `has_tablejump',
2433 `unknown_address_altered', `unknown_constant_address_altered', and
2434 `num_mem_sets' in LOOP. Also, fill in the array `mems' and the
2435 list `store_mems' in LOOP. */
2436
2437 static void
2438 prescan_loop (loop)
2439 struct loop *loop;
2440 {
2441 int level = 1;
2442 rtx insn;
2443 struct loop_info *loop_info = LOOP_INFO (loop);
2444 rtx start = loop->start;
2445 rtx end = loop->end;
2446 /* The label after END. Jumping here is just like falling off the
2447 end of the loop. We use next_nonnote_insn instead of next_label
2448 as a hedge against the (pathological) case where some actual insn
2449 might end up between the two. */
2450 rtx exit_target = next_nonnote_insn (end);
2451
2452 loop_info->has_indirect_jump = indirect_jump_in_function;
2453 loop_info->pre_header_has_call = 0;
2454 loop_info->has_call = 0;
2455 loop_info->has_nonconst_call = 0;
2456 loop_info->has_prefetch = 0;
2457 loop_info->has_volatile = 0;
2458 loop_info->has_tablejump = 0;
2459 loop_info->has_multiple_exit_targets = 0;
2460 loop->level = 1;
2461
2462 loop_info->unknown_address_altered = 0;
2463 loop_info->unknown_constant_address_altered = 0;
2464 loop_info->store_mems = NULL_RTX;
2465 loop_info->first_loop_store_insn = NULL_RTX;
2466 loop_info->mems_idx = 0;
2467 loop_info->num_mem_sets = 0;
2468
2469
2470 for (insn = start; insn && GET_CODE (insn) != CODE_LABEL;
2471 insn = PREV_INSN (insn))
2472 {
2473 if (GET_CODE (insn) == CALL_INSN)
2474 {
2475 loop_info->pre_header_has_call = 1;
2476 break;
2477 }
2478 }
2479
2480 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2481 insn = NEXT_INSN (insn))
2482 {
2483 switch (GET_CODE (insn))
2484 {
2485 case NOTE:
2486 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
2487 {
2488 ++level;
2489 /* Count number of loops contained in this one. */
2490 loop->level++;
2491 }
2492 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
2493 --level;
2494 break;
2495
2496 case CALL_INSN:
2497 if (! CONST_OR_PURE_CALL_P (insn))
2498 {
2499 loop_info->unknown_address_altered = 1;
2500 loop_info->has_nonconst_call = 1;
2501 }
2502 else if (pure_call_p (insn))
2503 loop_info->has_nonconst_call = 1;
2504 loop_info->has_call = 1;
2505 if (can_throw_internal (insn))
2506 loop_info->has_multiple_exit_targets = 1;
2507 break;
2508
2509 case JUMP_INSN:
2510 if (! loop_info->has_multiple_exit_targets)
2511 {
2512 rtx set = pc_set (insn);
2513
2514 if (set)
2515 {
2516 rtx src = SET_SRC (set);
2517 rtx label1, label2;
2518
2519 if (GET_CODE (src) == IF_THEN_ELSE)
2520 {
2521 label1 = XEXP (src, 1);
2522 label2 = XEXP (src, 2);
2523 }
2524 else
2525 {
2526 label1 = src;
2527 label2 = NULL_RTX;
2528 }
2529
2530 do
2531 {
2532 if (label1 && label1 != pc_rtx)
2533 {
2534 if (GET_CODE (label1) != LABEL_REF)
2535 {
2536 /* Something tricky. */
2537 loop_info->has_multiple_exit_targets = 1;
2538 break;
2539 }
2540 else if (XEXP (label1, 0) != exit_target
2541 && LABEL_OUTSIDE_LOOP_P (label1))
2542 {
2543 /* A jump outside the current loop. */
2544 loop_info->has_multiple_exit_targets = 1;
2545 break;
2546 }
2547 }
2548
2549 label1 = label2;
2550 label2 = NULL_RTX;
2551 }
2552 while (label1);
2553 }
2554 else
2555 {
2556 /* A return, or something tricky. */
2557 loop_info->has_multiple_exit_targets = 1;
2558 }
2559 }
2560 /* FALLTHRU */
2561
2562 case INSN:
2563 if (volatile_refs_p (PATTERN (insn)))
2564 loop_info->has_volatile = 1;
2565
2566 if (GET_CODE (insn) == JUMP_INSN
2567 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
2568 || GET_CODE (PATTERN (insn)) == ADDR_VEC))
2569 loop_info->has_tablejump = 1;
2570
2571 note_stores (PATTERN (insn), note_addr_stored, loop_info);
2572 if (! loop_info->first_loop_store_insn && loop_info->store_mems)
2573 loop_info->first_loop_store_insn = insn;
2574
2575 if (flag_non_call_exceptions && can_throw_internal (insn))
2576 loop_info->has_multiple_exit_targets = 1;
2577 break;
2578
2579 default:
2580 break;
2581 }
2582 }
2583
2584 /* Now, rescan the loop, setting up the LOOP_MEMS array. */
2585 if (/* An exception thrown by a called function might land us
2586 anywhere. */
2587 ! loop_info->has_nonconst_call
2588 /* We don't want loads for MEMs moved to a location before the
2589 one at which their stack memory becomes allocated. (Note
2590 that this is not a problem for malloc, etc., since those
2591 require actual function calls. */
2592 && ! current_function_calls_alloca
2593 /* There are ways to leave the loop other than falling off the
2594 end. */
2595 && ! loop_info->has_multiple_exit_targets)
2596 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2597 insn = NEXT_INSN (insn))
2598 for_each_rtx (&insn, insert_loop_mem, loop_info);
2599
2600 /* BLKmode MEMs are added to LOOP_STORE_MEM as necessary so
2601 that loop_invariant_p and load_mems can use true_dependence
2602 to determine what is really clobbered. */
2603 if (loop_info->unknown_address_altered)
2604 {
2605 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
2606
2607 loop_info->store_mems
2608 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
2609 }
2610 if (loop_info->unknown_constant_address_altered)
2611 {
2612 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
2613
2614 RTX_UNCHANGING_P (mem) = 1;
2615 loop_info->store_mems
2616 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
2617 }
2618 }
2619 \f
2620 /* Invalidate all loops containing LABEL. */
2621
2622 static void
2623 invalidate_loops_containing_label (label)
2624 rtx label;
2625 {
2626 struct loop *loop;
2627 for (loop = uid_loop[INSN_UID (label)]; loop; loop = loop->outer)
2628 loop->invalid = 1;
2629 }
2630
2631 /* Scan the function looking for loops. Record the start and end of each loop.
2632 Also mark as invalid loops any loops that contain a setjmp or are branched
2633 to from outside the loop. */
2634
2635 static void
2636 find_and_verify_loops (f, loops)
2637 rtx f;
2638 struct loops *loops;
2639 {
2640 rtx insn;
2641 rtx label;
2642 int num_loops;
2643 struct loop *current_loop;
2644 struct loop *next_loop;
2645 struct loop *loop;
2646
2647 num_loops = loops->num;
2648
2649 compute_luids (f, NULL_RTX, 0);
2650
2651 /* If there are jumps to undefined labels,
2652 treat them as jumps out of any/all loops.
2653 This also avoids writing past end of tables when there are no loops. */
2654 uid_loop[0] = NULL;
2655
2656 /* Find boundaries of loops, mark which loops are contained within
2657 loops, and invalidate loops that have setjmp. */
2658
2659 num_loops = 0;
2660 current_loop = NULL;
2661 for (insn = f; insn; insn = NEXT_INSN (insn))
2662 {
2663 if (GET_CODE (insn) == NOTE)
2664 switch (NOTE_LINE_NUMBER (insn))
2665 {
2666 case NOTE_INSN_LOOP_BEG:
2667 next_loop = loops->array + num_loops;
2668 next_loop->num = num_loops;
2669 num_loops++;
2670 next_loop->start = insn;
2671 next_loop->outer = current_loop;
2672 current_loop = next_loop;
2673 break;
2674
2675 case NOTE_INSN_LOOP_CONT:
2676 current_loop->cont = insn;
2677 break;
2678
2679 case NOTE_INSN_LOOP_VTOP:
2680 current_loop->vtop = insn;
2681 break;
2682
2683 case NOTE_INSN_LOOP_END:
2684 if (! current_loop)
2685 abort ();
2686
2687 current_loop->end = insn;
2688 current_loop = current_loop->outer;
2689 break;
2690
2691 default:
2692 break;
2693 }
2694
2695 if (GET_CODE (insn) == CALL_INSN
2696 && find_reg_note (insn, REG_SETJMP, NULL))
2697 {
2698 /* In this case, we must invalidate our current loop and any
2699 enclosing loop. */
2700 for (loop = current_loop; loop; loop = loop->outer)
2701 {
2702 loop->invalid = 1;
2703 if (loop_dump_stream)
2704 fprintf (loop_dump_stream,
2705 "\nLoop at %d ignored due to setjmp.\n",
2706 INSN_UID (loop->start));
2707 }
2708 }
2709
2710 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
2711 enclosing loop, but this doesn't matter. */
2712 uid_loop[INSN_UID (insn)] = current_loop;
2713 }
2714
2715 /* Any loop containing a label used in an initializer must be invalidated,
2716 because it can be jumped into from anywhere. */
2717 for (label = forced_labels; label; label = XEXP (label, 1))
2718 invalidate_loops_containing_label (XEXP (label, 0));
2719
2720 /* Any loop containing a label used for an exception handler must be
2721 invalidated, because it can be jumped into from anywhere. */
2722 for_each_eh_label (invalidate_loops_containing_label);
2723
2724 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
2725 loop that it is not contained within, that loop is marked invalid.
2726 If any INSN or CALL_INSN uses a label's address, then the loop containing
2727 that label is marked invalid, because it could be jumped into from
2728 anywhere.
2729
2730 Also look for blocks of code ending in an unconditional branch that
2731 exits the loop. If such a block is surrounded by a conditional
2732 branch around the block, move the block elsewhere (see below) and
2733 invert the jump to point to the code block. This may eliminate a
2734 label in our loop and will simplify processing by both us and a
2735 possible second cse pass. */
2736
2737 for (insn = f; insn; insn = NEXT_INSN (insn))
2738 if (INSN_P (insn))
2739 {
2740 struct loop *this_loop = uid_loop[INSN_UID (insn)];
2741
2742 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
2743 {
2744 rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
2745 if (note)
2746 invalidate_loops_containing_label (XEXP (note, 0));
2747 }
2748
2749 if (GET_CODE (insn) != JUMP_INSN)
2750 continue;
2751
2752 mark_loop_jump (PATTERN (insn), this_loop);
2753
2754 /* See if this is an unconditional branch outside the loop. */
2755 if (this_loop
2756 && (GET_CODE (PATTERN (insn)) == RETURN
2757 || (any_uncondjump_p (insn)
2758 && onlyjump_p (insn)
2759 && (uid_loop[INSN_UID (JUMP_LABEL (insn))]
2760 != this_loop)))
2761 && get_max_uid () < max_uid_for_loop)
2762 {
2763 rtx p;
2764 rtx our_next = next_real_insn (insn);
2765 rtx last_insn_to_move = NEXT_INSN (insn);
2766 struct loop *dest_loop;
2767 struct loop *outer_loop = NULL;
2768
2769 /* Go backwards until we reach the start of the loop, a label,
2770 or a JUMP_INSN. */
2771 for (p = PREV_INSN (insn);
2772 GET_CODE (p) != CODE_LABEL
2773 && ! (GET_CODE (p) == NOTE
2774 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
2775 && GET_CODE (p) != JUMP_INSN;
2776 p = PREV_INSN (p))
2777 ;
2778
2779 /* Check for the case where we have a jump to an inner nested
2780 loop, and do not perform the optimization in that case. */
2781
2782 if (JUMP_LABEL (insn))
2783 {
2784 dest_loop = uid_loop[INSN_UID (JUMP_LABEL (insn))];
2785 if (dest_loop)
2786 {
2787 for (outer_loop = dest_loop; outer_loop;
2788 outer_loop = outer_loop->outer)
2789 if (outer_loop == this_loop)
2790 break;
2791 }
2792 }
2793
2794 /* Make sure that the target of P is within the current loop. */
2795
2796 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
2797 && uid_loop[INSN_UID (JUMP_LABEL (p))] != this_loop)
2798 outer_loop = this_loop;
2799
2800 /* If we stopped on a JUMP_INSN to the next insn after INSN,
2801 we have a block of code to try to move.
2802
2803 We look backward and then forward from the target of INSN
2804 to find a BARRIER at the same loop depth as the target.
2805 If we find such a BARRIER, we make a new label for the start
2806 of the block, invert the jump in P and point it to that label,
2807 and move the block of code to the spot we found. */
2808
2809 if (! outer_loop
2810 && GET_CODE (p) == JUMP_INSN
2811 && JUMP_LABEL (p) != 0
2812 /* Just ignore jumps to labels that were never emitted.
2813 These always indicate compilation errors. */
2814 && INSN_UID (JUMP_LABEL (p)) != 0
2815 && any_condjump_p (p) && onlyjump_p (p)
2816 && next_real_insn (JUMP_LABEL (p)) == our_next
2817 /* If it's not safe to move the sequence, then we
2818 mustn't try. */
2819 && insns_safe_to_move_p (p, NEXT_INSN (insn),
2820 &last_insn_to_move))
2821 {
2822 rtx target
2823 = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn ();
2824 struct loop *target_loop = uid_loop[INSN_UID (target)];
2825 rtx loc, loc2;
2826 rtx tmp;
2827
2828 /* Search for possible garbage past the conditional jumps
2829 and look for the last barrier. */
2830 for (tmp = last_insn_to_move;
2831 tmp && GET_CODE (tmp) != CODE_LABEL; tmp = NEXT_INSN (tmp))
2832 if (GET_CODE (tmp) == BARRIER)
2833 last_insn_to_move = tmp;
2834
2835 for (loc = target; loc; loc = PREV_INSN (loc))
2836 if (GET_CODE (loc) == BARRIER
2837 /* Don't move things inside a tablejump. */
2838 && ((loc2 = next_nonnote_insn (loc)) == 0
2839 || GET_CODE (loc2) != CODE_LABEL
2840 || (loc2 = next_nonnote_insn (loc2)) == 0
2841 || GET_CODE (loc2) != JUMP_INSN
2842 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
2843 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
2844 && uid_loop[INSN_UID (loc)] == target_loop)
2845 break;
2846
2847 if (loc == 0)
2848 for (loc = target; loc; loc = NEXT_INSN (loc))
2849 if (GET_CODE (loc) == BARRIER
2850 /* Don't move things inside a tablejump. */
2851 && ((loc2 = next_nonnote_insn (loc)) == 0
2852 || GET_CODE (loc2) != CODE_LABEL
2853 || (loc2 = next_nonnote_insn (loc2)) == 0
2854 || GET_CODE (loc2) != JUMP_INSN
2855 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
2856 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
2857 && uid_loop[INSN_UID (loc)] == target_loop)
2858 break;
2859
2860 if (loc)
2861 {
2862 rtx cond_label = JUMP_LABEL (p);
2863 rtx new_label = get_label_after (p);
2864
2865 /* Ensure our label doesn't go away. */
2866 LABEL_NUSES (cond_label)++;
2867
2868 /* Verify that uid_loop is large enough and that
2869 we can invert P. */
2870 if (invert_jump (p, new_label, 1))
2871 {
2872 rtx q, r;
2873
2874 /* If no suitable BARRIER was found, create a suitable
2875 one before TARGET. Since TARGET is a fall through
2876 path, we'll need to insert an jump around our block
2877 and add a BARRIER before TARGET.
2878
2879 This creates an extra unconditional jump outside
2880 the loop. However, the benefits of removing rarely
2881 executed instructions from inside the loop usually
2882 outweighs the cost of the extra unconditional jump
2883 outside the loop. */
2884 if (loc == 0)
2885 {
2886 rtx temp;
2887
2888 temp = gen_jump (JUMP_LABEL (insn));
2889 temp = emit_jump_insn_before (temp, target);
2890 JUMP_LABEL (temp) = JUMP_LABEL (insn);
2891 LABEL_NUSES (JUMP_LABEL (insn))++;
2892 loc = emit_barrier_before (target);
2893 }
2894
2895 /* Include the BARRIER after INSN and copy the
2896 block after LOC. */
2897 if (squeeze_notes (&new_label, &last_insn_to_move))
2898 abort ();
2899 reorder_insns (new_label, last_insn_to_move, loc);
2900
2901 /* All those insns are now in TARGET_LOOP. */
2902 for (q = new_label;
2903 q != NEXT_INSN (last_insn_to_move);
2904 q = NEXT_INSN (q))
2905 uid_loop[INSN_UID (q)] = target_loop;
2906
2907 /* The label jumped to by INSN is no longer a loop
2908 exit. Unless INSN does not have a label (e.g.,
2909 it is a RETURN insn), search loop->exit_labels
2910 to find its label_ref, and remove it. Also turn
2911 off LABEL_OUTSIDE_LOOP_P bit. */
2912 if (JUMP_LABEL (insn))
2913 {
2914 for (q = 0, r = this_loop->exit_labels;
2915 r;
2916 q = r, r = LABEL_NEXTREF (r))
2917 if (XEXP (r, 0) == JUMP_LABEL (insn))
2918 {
2919 LABEL_OUTSIDE_LOOP_P (r) = 0;
2920 if (q)
2921 LABEL_NEXTREF (q) = LABEL_NEXTREF (r);
2922 else
2923 this_loop->exit_labels = LABEL_NEXTREF (r);
2924 break;
2925 }
2926
2927 for (loop = this_loop; loop && loop != target_loop;
2928 loop = loop->outer)
2929 loop->exit_count--;
2930
2931 /* If we didn't find it, then something is
2932 wrong. */
2933 if (! r)
2934 abort ();
2935 }
2936
2937 /* P is now a jump outside the loop, so it must be put
2938 in loop->exit_labels, and marked as such.
2939 The easiest way to do this is to just call
2940 mark_loop_jump again for P. */
2941 mark_loop_jump (PATTERN (p), this_loop);
2942
2943 /* If INSN now jumps to the insn after it,
2944 delete INSN. */
2945 if (JUMP_LABEL (insn) != 0
2946 && (next_real_insn (JUMP_LABEL (insn))
2947 == next_real_insn (insn)))
2948 delete_related_insns (insn);
2949 }
2950
2951 /* Continue the loop after where the conditional
2952 branch used to jump, since the only branch insn
2953 in the block (if it still remains) is an inter-loop
2954 branch and hence needs no processing. */
2955 insn = NEXT_INSN (cond_label);
2956
2957 if (--LABEL_NUSES (cond_label) == 0)
2958 delete_related_insns (cond_label);
2959
2960 /* This loop will be continued with NEXT_INSN (insn). */
2961 insn = PREV_INSN (insn);
2962 }
2963 }
2964 }
2965 }
2966 }
2967
2968 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
2969 loops it is contained in, mark the target loop invalid.
2970
2971 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
2972
2973 static void
2974 mark_loop_jump (x, loop)
2975 rtx x;
2976 struct loop *loop;
2977 {
2978 struct loop *dest_loop;
2979 struct loop *outer_loop;
2980 int i;
2981
2982 switch (GET_CODE (x))
2983 {
2984 case PC:
2985 case USE:
2986 case CLOBBER:
2987 case REG:
2988 case MEM:
2989 case CONST_INT:
2990 case CONST_DOUBLE:
2991 case RETURN:
2992 return;
2993
2994 case CONST:
2995 /* There could be a label reference in here. */
2996 mark_loop_jump (XEXP (x, 0), loop);
2997 return;
2998
2999 case PLUS:
3000 case MINUS:
3001 case MULT:
3002 mark_loop_jump (XEXP (x, 0), loop);
3003 mark_loop_jump (XEXP (x, 1), loop);
3004 return;
3005
3006 case LO_SUM:
3007 /* This may refer to a LABEL_REF or SYMBOL_REF. */
3008 mark_loop_jump (XEXP (x, 1), loop);
3009 return;
3010
3011 case SIGN_EXTEND:
3012 case ZERO_EXTEND:
3013 mark_loop_jump (XEXP (x, 0), loop);
3014 return;
3015
3016 case LABEL_REF:
3017 dest_loop = uid_loop[INSN_UID (XEXP (x, 0))];
3018
3019 /* Link together all labels that branch outside the loop. This
3020 is used by final_[bg]iv_value and the loop unrolling code. Also
3021 mark this LABEL_REF so we know that this branch should predict
3022 false. */
3023
3024 /* A check to make sure the label is not in an inner nested loop,
3025 since this does not count as a loop exit. */
3026 if (dest_loop)
3027 {
3028 for (outer_loop = dest_loop; outer_loop;
3029 outer_loop = outer_loop->outer)
3030 if (outer_loop == loop)
3031 break;
3032 }
3033 else
3034 outer_loop = NULL;
3035
3036 if (loop && ! outer_loop)
3037 {
3038 LABEL_OUTSIDE_LOOP_P (x) = 1;
3039 LABEL_NEXTREF (x) = loop->exit_labels;
3040 loop->exit_labels = x;
3041
3042 for (outer_loop = loop;
3043 outer_loop && outer_loop != dest_loop;
3044 outer_loop = outer_loop->outer)
3045 outer_loop->exit_count++;
3046 }
3047
3048 /* If this is inside a loop, but not in the current loop or one enclosed
3049 by it, it invalidates at least one loop. */
3050
3051 if (! dest_loop)
3052 return;
3053
3054 /* We must invalidate every nested loop containing the target of this
3055 label, except those that also contain the jump insn. */
3056
3057 for (; dest_loop; dest_loop = dest_loop->outer)
3058 {
3059 /* Stop when we reach a loop that also contains the jump insn. */
3060 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
3061 if (dest_loop == outer_loop)
3062 return;
3063
3064 /* If we get here, we know we need to invalidate a loop. */
3065 if (loop_dump_stream && ! dest_loop->invalid)
3066 fprintf (loop_dump_stream,
3067 "\nLoop at %d ignored due to multiple entry points.\n",
3068 INSN_UID (dest_loop->start));
3069
3070 dest_loop->invalid = 1;
3071 }
3072 return;
3073
3074 case SET:
3075 /* If this is not setting pc, ignore. */
3076 if (SET_DEST (x) == pc_rtx)
3077 mark_loop_jump (SET_SRC (x), loop);
3078 return;
3079
3080 case IF_THEN_ELSE:
3081 mark_loop_jump (XEXP (x, 1), loop);
3082 mark_loop_jump (XEXP (x, 2), loop);
3083 return;
3084
3085 case PARALLEL:
3086 case ADDR_VEC:
3087 for (i = 0; i < XVECLEN (x, 0); i++)
3088 mark_loop_jump (XVECEXP (x, 0, i), loop);
3089 return;
3090
3091 case ADDR_DIFF_VEC:
3092 for (i = 0; i < XVECLEN (x, 1); i++)
3093 mark_loop_jump (XVECEXP (x, 1, i), loop);
3094 return;
3095
3096 default:
3097 /* Strictly speaking this is not a jump into the loop, only a possible
3098 jump out of the loop. However, we have no way to link the destination
3099 of this jump onto the list of exit labels. To be safe we mark this
3100 loop and any containing loops as invalid. */
3101 if (loop)
3102 {
3103 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
3104 {
3105 if (loop_dump_stream && ! outer_loop->invalid)
3106 fprintf (loop_dump_stream,
3107 "\nLoop at %d ignored due to unknown exit jump.\n",
3108 INSN_UID (outer_loop->start));
3109 outer_loop->invalid = 1;
3110 }
3111 }
3112 return;
3113 }
3114 }
3115 \f
3116 /* Return nonzero if there is a label in the range from
3117 insn INSN to and including the insn whose luid is END
3118 INSN must have an assigned luid (i.e., it must not have
3119 been previously created by loop.c). */
3120
3121 static int
3122 labels_in_range_p (insn, end)
3123 rtx insn;
3124 int end;
3125 {
3126 while (insn && INSN_LUID (insn) <= end)
3127 {
3128 if (GET_CODE (insn) == CODE_LABEL)
3129 return 1;
3130 insn = NEXT_INSN (insn);
3131 }
3132
3133 return 0;
3134 }
3135
3136 /* Record that a memory reference X is being set. */
3137
3138 static void
3139 note_addr_stored (x, y, data)
3140 rtx x;
3141 rtx y ATTRIBUTE_UNUSED;
3142 void *data ATTRIBUTE_UNUSED;
3143 {
3144 struct loop_info *loop_info = data;
3145
3146 if (x == 0 || GET_CODE (x) != MEM)
3147 return;
3148
3149 /* Count number of memory writes.
3150 This affects heuristics in strength_reduce. */
3151 loop_info->num_mem_sets++;
3152
3153 /* BLKmode MEM means all memory is clobbered. */
3154 if (GET_MODE (x) == BLKmode)
3155 {
3156 if (RTX_UNCHANGING_P (x))
3157 loop_info->unknown_constant_address_altered = 1;
3158 else
3159 loop_info->unknown_address_altered = 1;
3160
3161 return;
3162 }
3163
3164 loop_info->store_mems = gen_rtx_EXPR_LIST (VOIDmode, x,
3165 loop_info->store_mems);
3166 }
3167
3168 /* X is a value modified by an INSN that references a biv inside a loop
3169 exit test (ie, X is somehow related to the value of the biv). If X
3170 is a pseudo that is used more than once, then the biv is (effectively)
3171 used more than once. DATA is a pointer to a loop_regs structure. */
3172
3173 static void
3174 note_set_pseudo_multiple_uses (x, y, data)
3175 rtx x;
3176 rtx y ATTRIBUTE_UNUSED;
3177 void *data;
3178 {
3179 struct loop_regs *regs = (struct loop_regs *) data;
3180
3181 if (x == 0)
3182 return;
3183
3184 while (GET_CODE (x) == STRICT_LOW_PART
3185 || GET_CODE (x) == SIGN_EXTRACT
3186 || GET_CODE (x) == ZERO_EXTRACT
3187 || GET_CODE (x) == SUBREG)
3188 x = XEXP (x, 0);
3189
3190 if (GET_CODE (x) != REG || REGNO (x) < FIRST_PSEUDO_REGISTER)
3191 return;
3192
3193 /* If we do not have usage information, or if we know the register
3194 is used more than once, note that fact for check_dbra_loop. */
3195 if (REGNO (x) >= max_reg_before_loop
3196 || ! regs->array[REGNO (x)].single_usage
3197 || regs->array[REGNO (x)].single_usage == const0_rtx)
3198 regs->multiple_uses = 1;
3199 }
3200 \f
3201 /* Return nonzero if the rtx X is invariant over the current loop.
3202
3203 The value is 2 if we refer to something only conditionally invariant.
3204
3205 A memory ref is invariant if it is not volatile and does not conflict
3206 with anything stored in `loop_info->store_mems'. */
3207
3208 int
3209 loop_invariant_p (loop, x)
3210 const struct loop *loop;
3211 rtx x;
3212 {
3213 struct loop_info *loop_info = LOOP_INFO (loop);
3214 struct loop_regs *regs = LOOP_REGS (loop);
3215 int i;
3216 enum rtx_code code;
3217 const char *fmt;
3218 int conditional = 0;
3219 rtx mem_list_entry;
3220
3221 if (x == 0)
3222 return 1;
3223 code = GET_CODE (x);
3224 switch (code)
3225 {
3226 case CONST_INT:
3227 case CONST_DOUBLE:
3228 case SYMBOL_REF:
3229 case CONST:
3230 return 1;
3231
3232 case LABEL_REF:
3233 /* A LABEL_REF is normally invariant, however, if we are unrolling
3234 loops, and this label is inside the loop, then it isn't invariant.
3235 This is because each unrolled copy of the loop body will have
3236 a copy of this label. If this was invariant, then an insn loading
3237 the address of this label into a register might get moved outside
3238 the loop, and then each loop body would end up using the same label.
3239
3240 We don't know the loop bounds here though, so just fail for all
3241 labels. */
3242 if (flag_unroll_loops)
3243 return 0;
3244 else
3245 return 1;
3246
3247 case PC:
3248 case CC0:
3249 case UNSPEC_VOLATILE:
3250 return 0;
3251
3252 case REG:
3253 /* We used to check RTX_UNCHANGING_P (x) here, but that is invalid
3254 since the reg might be set by initialization within the loop. */
3255
3256 if ((x == frame_pointer_rtx || x == hard_frame_pointer_rtx
3257 || x == arg_pointer_rtx || x == pic_offset_table_rtx)
3258 && ! current_function_has_nonlocal_goto)
3259 return 1;
3260
3261 if (LOOP_INFO (loop)->has_call
3262 && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)])
3263 return 0;
3264
3265 if (regs->array[REGNO (x)].set_in_loop < 0)
3266 return 2;
3267
3268 return regs->array[REGNO (x)].set_in_loop == 0;
3269
3270 case MEM:
3271 /* Volatile memory references must be rejected. Do this before
3272 checking for read-only items, so that volatile read-only items
3273 will be rejected also. */
3274 if (MEM_VOLATILE_P (x))
3275 return 0;
3276
3277 /* See if there is any dependence between a store and this load. */
3278 mem_list_entry = loop_info->store_mems;
3279 while (mem_list_entry)
3280 {
3281 if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
3282 x, rtx_varies_p))
3283 return 0;
3284
3285 mem_list_entry = XEXP (mem_list_entry, 1);
3286 }
3287
3288 /* It's not invalidated by a store in memory
3289 but we must still verify the address is invariant. */
3290 break;
3291
3292 case ASM_OPERANDS:
3293 /* Don't mess with insns declared volatile. */
3294 if (MEM_VOLATILE_P (x))
3295 return 0;
3296 break;
3297
3298 default:
3299 break;
3300 }
3301
3302 fmt = GET_RTX_FORMAT (code);
3303 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3304 {
3305 if (fmt[i] == 'e')
3306 {
3307 int tem = loop_invariant_p (loop, XEXP (x, i));
3308 if (tem == 0)
3309 return 0;
3310 if (tem == 2)
3311 conditional = 1;
3312 }
3313 else if (fmt[i] == 'E')
3314 {
3315 int j;
3316 for (j = 0; j < XVECLEN (x, i); j++)
3317 {
3318 int tem = loop_invariant_p (loop, XVECEXP (x, i, j));
3319 if (tem == 0)
3320 return 0;
3321 if (tem == 2)
3322 conditional = 1;
3323 }
3324
3325 }
3326 }
3327
3328 return 1 + conditional;
3329 }
3330 \f
3331 /* Return nonzero if all the insns in the loop that set REG
3332 are INSN and the immediately following insns,
3333 and if each of those insns sets REG in an invariant way
3334 (not counting uses of REG in them).
3335
3336 The value is 2 if some of these insns are only conditionally invariant.
3337
3338 We assume that INSN itself is the first set of REG
3339 and that its source is invariant. */
3340
3341 static int
3342 consec_sets_invariant_p (loop, reg, n_sets, insn)
3343 const struct loop *loop;
3344 int n_sets;
3345 rtx reg, insn;
3346 {
3347 struct loop_regs *regs = LOOP_REGS (loop);
3348 rtx p = insn;
3349 unsigned int regno = REGNO (reg);
3350 rtx temp;
3351 /* Number of sets we have to insist on finding after INSN. */
3352 int count = n_sets - 1;
3353 int old = regs->array[regno].set_in_loop;
3354 int value = 0;
3355 int this;
3356
3357 /* If N_SETS hit the limit, we can't rely on its value. */
3358 if (n_sets == 127)
3359 return 0;
3360
3361 regs->array[regno].set_in_loop = 0;
3362
3363 while (count > 0)
3364 {
3365 enum rtx_code code;
3366 rtx set;
3367
3368 p = NEXT_INSN (p);
3369 code = GET_CODE (p);
3370
3371 /* If library call, skip to end of it. */
3372 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3373 p = XEXP (temp, 0);
3374
3375 this = 0;
3376 if (code == INSN
3377 && (set = single_set (p))
3378 && GET_CODE (SET_DEST (set)) == REG
3379 && REGNO (SET_DEST (set)) == regno)
3380 {
3381 this = loop_invariant_p (loop, SET_SRC (set));
3382 if (this != 0)
3383 value |= this;
3384 else if ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX)))
3385 {
3386 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
3387 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
3388 notes are OK. */
3389 this = (CONSTANT_P (XEXP (temp, 0))
3390 || (find_reg_note (p, REG_RETVAL, NULL_RTX)
3391 && loop_invariant_p (loop, XEXP (temp, 0))));
3392 if (this != 0)
3393 value |= this;
3394 }
3395 }
3396 if (this != 0)
3397 count--;
3398 else if (code != NOTE)
3399 {
3400 regs->array[regno].set_in_loop = old;
3401 return 0;
3402 }
3403 }
3404
3405 regs->array[regno].set_in_loop = old;
3406 /* If loop_invariant_p ever returned 2, we return 2. */
3407 return 1 + (value & 2);
3408 }
3409
3410 #if 0
3411 /* I don't think this condition is sufficient to allow INSN
3412 to be moved, so we no longer test it. */
3413
3414 /* Return 1 if all insns in the basic block of INSN and following INSN
3415 that set REG are invariant according to TABLE. */
3416
3417 static int
3418 all_sets_invariant_p (reg, insn, table)
3419 rtx reg, insn;
3420 short *table;
3421 {
3422 rtx p = insn;
3423 int regno = REGNO (reg);
3424
3425 while (1)
3426 {
3427 enum rtx_code code;
3428 p = NEXT_INSN (p);
3429 code = GET_CODE (p);
3430 if (code == CODE_LABEL || code == JUMP_INSN)
3431 return 1;
3432 if (code == INSN && GET_CODE (PATTERN (p)) == SET
3433 && GET_CODE (SET_DEST (PATTERN (p))) == REG
3434 && REGNO (SET_DEST (PATTERN (p))) == regno)
3435 {
3436 if (! loop_invariant_p (loop, SET_SRC (PATTERN (p)), table))
3437 return 0;
3438 }
3439 }
3440 }
3441 #endif /* 0 */
3442 \f
3443 /* Look at all uses (not sets) of registers in X. For each, if it is
3444 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
3445 a different insn, set USAGE[REGNO] to const0_rtx. */
3446
3447 static void
3448 find_single_use_in_loop (regs, insn, x)
3449 struct loop_regs *regs;
3450 rtx insn;
3451 rtx x;
3452 {
3453 enum rtx_code code = GET_CODE (x);
3454 const char *fmt = GET_RTX_FORMAT (code);
3455 int i, j;
3456
3457 if (code == REG)
3458 regs->array[REGNO (x)].single_usage
3459 = (regs->array[REGNO (x)].single_usage != 0
3460 && regs->array[REGNO (x)].single_usage != insn)
3461 ? const0_rtx : insn;
3462
3463 else if (code == SET)
3464 {
3465 /* Don't count SET_DEST if it is a REG; otherwise count things
3466 in SET_DEST because if a register is partially modified, it won't
3467 show up as a potential movable so we don't care how USAGE is set
3468 for it. */
3469 if (GET_CODE (SET_DEST (x)) != REG)
3470 find_single_use_in_loop (regs, insn, SET_DEST (x));
3471 find_single_use_in_loop (regs, insn, SET_SRC (x));
3472 }
3473 else
3474 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3475 {
3476 if (fmt[i] == 'e' && XEXP (x, i) != 0)
3477 find_single_use_in_loop (regs, insn, XEXP (x, i));
3478 else if (fmt[i] == 'E')
3479 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3480 find_single_use_in_loop (regs, insn, XVECEXP (x, i, j));
3481 }
3482 }
3483 \f
3484 /* Count and record any set in X which is contained in INSN. Update
3485 REGS->array[I].MAY_NOT_OPTIMIZE and LAST_SET for any register I set
3486 in X. */
3487
3488 static void
3489 count_one_set (regs, insn, x, last_set)
3490 struct loop_regs *regs;
3491 rtx insn, x;
3492 rtx *last_set;
3493 {
3494 if (GET_CODE (x) == CLOBBER && GET_CODE (XEXP (x, 0)) == REG)
3495 /* Don't move a reg that has an explicit clobber.
3496 It's not worth the pain to try to do it correctly. */
3497 regs->array[REGNO (XEXP (x, 0))].may_not_optimize = 1;
3498
3499 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
3500 {
3501 rtx dest = SET_DEST (x);
3502 while (GET_CODE (dest) == SUBREG
3503 || GET_CODE (dest) == ZERO_EXTRACT
3504 || GET_CODE (dest) == SIGN_EXTRACT
3505 || GET_CODE (dest) == STRICT_LOW_PART)
3506 dest = XEXP (dest, 0);
3507 if (GET_CODE (dest) == REG)
3508 {
3509 int i;
3510 int regno = REGNO (dest);
3511 for (i = 0; i < LOOP_REGNO_NREGS (regno, dest); i++)
3512 {
3513 /* If this is the first setting of this reg
3514 in current basic block, and it was set before,
3515 it must be set in two basic blocks, so it cannot
3516 be moved out of the loop. */
3517 if (regs->array[regno].set_in_loop > 0
3518 && last_set == 0)
3519 regs->array[regno+i].may_not_optimize = 1;
3520 /* If this is not first setting in current basic block,
3521 see if reg was used in between previous one and this.
3522 If so, neither one can be moved. */
3523 if (last_set[regno] != 0
3524 && reg_used_between_p (dest, last_set[regno], insn))
3525 regs->array[regno+i].may_not_optimize = 1;
3526 if (regs->array[regno+i].set_in_loop < 127)
3527 ++regs->array[regno+i].set_in_loop;
3528 last_set[regno+i] = insn;
3529 }
3530 }
3531 }
3532 }
3533 \f
3534 /* Given a loop that is bounded by LOOP->START and LOOP->END and that
3535 is entered at LOOP->SCAN_START, return 1 if the register set in SET
3536 contained in insn INSN is used by any insn that precedes INSN in
3537 cyclic order starting from the loop entry point.
3538
3539 We don't want to use INSN_LUID here because if we restrict INSN to those
3540 that have a valid INSN_LUID, it means we cannot move an invariant out
3541 from an inner loop past two loops. */
3542
3543 static int
3544 loop_reg_used_before_p (loop, set, insn)
3545 const struct loop *loop;
3546 rtx set, insn;
3547 {
3548 rtx reg = SET_DEST (set);
3549 rtx p;
3550
3551 /* Scan forward checking for register usage. If we hit INSN, we
3552 are done. Otherwise, if we hit LOOP->END, wrap around to LOOP->START. */
3553 for (p = loop->scan_start; p != insn; p = NEXT_INSN (p))
3554 {
3555 if (INSN_P (p) && reg_overlap_mentioned_p (reg, PATTERN (p)))
3556 return 1;
3557
3558 if (p == loop->end)
3559 p = loop->start;
3560 }
3561
3562 return 0;
3563 }
3564 \f
3565
3566 /* Information we collect about arrays that we might want to prefetch. */
3567 struct prefetch_info
3568 {
3569 struct iv_class *class; /* Class this prefetch is based on. */
3570 struct induction *giv; /* GIV this prefetch is based on. */
3571 rtx base_address; /* Start prefetching from this address plus
3572 index. */
3573 HOST_WIDE_INT index;
3574 HOST_WIDE_INT stride; /* Prefetch stride in bytes in each
3575 iteration. */
3576 unsigned int bytes_accessed; /* Sum of sizes of all acceses to this
3577 prefetch area in one iteration. */
3578 unsigned int total_bytes; /* Total bytes loop will access in this block.
3579 This is set only for loops with known
3580 iteration counts and is 0xffffffff
3581 otherwise. */
3582 int prefetch_in_loop; /* Number of prefetch insns in loop. */
3583 int prefetch_before_loop; /* Number of prefetch insns before loop. */
3584 unsigned int write : 1; /* 1 for read/write prefetches. */
3585 };
3586
3587 /* Data used by check_store function. */
3588 struct check_store_data
3589 {
3590 rtx mem_address;
3591 int mem_write;
3592 };
3593
3594 static void check_store PARAMS ((rtx, rtx, void *));
3595 static void emit_prefetch_instructions PARAMS ((struct loop *));
3596 static int rtx_equal_for_prefetch_p PARAMS ((rtx, rtx));
3597
3598 /* Set mem_write when mem_address is found. Used as callback to
3599 note_stores. */
3600 static void
3601 check_store (x, pat, data)
3602 rtx x, pat ATTRIBUTE_UNUSED;
3603 void *data;
3604 {
3605 struct check_store_data *d = (struct check_store_data *) data;
3606
3607 if ((GET_CODE (x) == MEM) && rtx_equal_p (d->mem_address, XEXP (x, 0)))
3608 d->mem_write = 1;
3609 }
3610 \f
3611 /* Like rtx_equal_p, but attempts to swap commutative operands. This is
3612 important to get some addresses combined. Later more sophisticated
3613 transformations can be added when necesary.
3614
3615 ??? Same trick with swapping operand is done at several other places.
3616 It can be nice to develop some common way to handle this. */
3617
3618 static int
3619 rtx_equal_for_prefetch_p (x, y)
3620 rtx x, y;
3621 {
3622 int i;
3623 int j;
3624 enum rtx_code code = GET_CODE (x);
3625 const char *fmt;
3626
3627 if (x == y)
3628 return 1;
3629 if (code != GET_CODE (y))
3630 return 0;
3631
3632 code = GET_CODE (x);
3633
3634 if (GET_RTX_CLASS (code) == 'c')
3635 {
3636 return ((rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 0))
3637 && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 1)))
3638 || (rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 1))
3639 && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 0))));
3640 }
3641 /* Compare the elements. If any pair of corresponding elements fails to
3642 match, return 0 for the whole thing. */
3643
3644 fmt = GET_RTX_FORMAT (code);
3645 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3646 {
3647 switch (fmt[i])
3648 {
3649 case 'w':
3650 if (XWINT (x, i) != XWINT (y, i))
3651 return 0;
3652 break;
3653
3654 case 'i':
3655 if (XINT (x, i) != XINT (y, i))
3656 return 0;
3657 break;
3658
3659 case 'E':
3660 /* Two vectors must have the same length. */
3661 if (XVECLEN (x, i) != XVECLEN (y, i))
3662 return 0;
3663
3664 /* And the corresponding elements must match. */
3665 for (j = 0; j < XVECLEN (x, i); j++)
3666 if (rtx_equal_for_prefetch_p (XVECEXP (x, i, j),
3667 XVECEXP (y, i, j)) == 0)
3668 return 0;
3669 break;
3670
3671 case 'e':
3672 if (rtx_equal_for_prefetch_p (XEXP (x, i), XEXP (y, i)) == 0)
3673 return 0;
3674 break;
3675
3676 case 's':
3677 if (strcmp (XSTR (x, i), XSTR (y, i)))
3678 return 0;
3679 break;
3680
3681 case 'u':
3682 /* These are just backpointers, so they don't matter. */
3683 break;
3684
3685 case '0':
3686 break;
3687
3688 /* It is believed that rtx's at this level will never
3689 contain anything but integers and other rtx's,
3690 except for within LABEL_REFs and SYMBOL_REFs. */
3691 default:
3692 abort ();
3693 }
3694 }
3695 return 1;
3696 }
3697 \f
3698 /* Remove constant addition value from the expression X (when present)
3699 and return it. */
3700
3701 static HOST_WIDE_INT
3702 remove_constant_addition (x)
3703 rtx *x;
3704 {
3705 HOST_WIDE_INT addval = 0;
3706 rtx exp = *x;
3707
3708 /* Avoid clobbering a shared CONST expression. */
3709 if (GET_CODE (exp) == CONST)
3710 {
3711 if (GET_CODE (XEXP (exp, 0)) == PLUS
3712 && GET_CODE (XEXP (XEXP (exp, 0), 0)) == SYMBOL_REF
3713 && GET_CODE (XEXP (XEXP (exp, 0), 1)) == CONST_INT)
3714 {
3715 *x = XEXP (XEXP (exp, 0), 0);
3716 return INTVAL (XEXP (XEXP (exp, 0), 1));
3717 }
3718 return 0;
3719 }
3720
3721 if (GET_CODE (exp) == CONST_INT)
3722 {
3723 addval = INTVAL (exp);
3724 *x = const0_rtx;
3725 }
3726
3727 /* For plus expression recurse on ourself. */
3728 else if (GET_CODE (exp) == PLUS)
3729 {
3730 addval += remove_constant_addition (&XEXP (exp, 0));
3731 addval += remove_constant_addition (&XEXP (exp, 1));
3732
3733 /* In case our parameter was constant, remove extra zero from the
3734 expression. */
3735 if (XEXP (exp, 0) == const0_rtx)
3736 *x = XEXP (exp, 1);
3737 else if (XEXP (exp, 1) == const0_rtx)
3738 *x = XEXP (exp, 0);
3739 }
3740
3741 return addval;
3742 }
3743
3744 /* Attempt to identify accesses to arrays that are most likely to cause cache
3745 misses, and emit prefetch instructions a few prefetch blocks forward.
3746
3747 To detect the arrays we use the GIV information that was collected by the
3748 strength reduction pass.
3749
3750 The prefetch instructions are generated after the GIV information is done
3751 and before the strength reduction process. The new GIVs are injected into
3752 the strength reduction tables, so the prefetch addresses are optimized as
3753 well.
3754
3755 GIVs are split into base address, stride, and constant addition values.
3756 GIVs with the same address, stride and close addition values are combined
3757 into a single prefetch. Also writes to GIVs are detected, so that prefetch
3758 for write instructions can be used for the block we write to, on machines
3759 that support write prefetches.
3760
3761 Several heuristics are used to determine when to prefetch. They are
3762 controlled by defined symbols that can be overridden for each target. */
3763
3764 static void
3765 emit_prefetch_instructions (loop)
3766 struct loop *loop;
3767 {
3768 int num_prefetches = 0;
3769 int num_real_prefetches = 0;
3770 int num_real_write_prefetches = 0;
3771 int num_prefetches_before = 0;
3772 int num_write_prefetches_before = 0;
3773 int ahead = 0;
3774 int i;
3775 struct iv_class *bl;
3776 struct induction *iv;
3777 struct prefetch_info info[MAX_PREFETCHES];
3778 struct loop_ivs *ivs = LOOP_IVS (loop);
3779
3780 if (!HAVE_prefetch)
3781 return;
3782
3783 /* Consider only loops w/o calls. When a call is done, the loop is probably
3784 slow enough to read the memory. */
3785 if (PREFETCH_NO_CALL && LOOP_INFO (loop)->has_call)
3786 {
3787 if (loop_dump_stream)
3788 fprintf (loop_dump_stream, "Prefetch: ignoring loop: has call.\n");
3789
3790 return;
3791 }
3792
3793 /* Don't prefetch in loops known to have few iterations. */
3794 if (PREFETCH_NO_LOW_LOOPCNT
3795 && LOOP_INFO (loop)->n_iterations
3796 && LOOP_INFO (loop)->n_iterations <= PREFETCH_LOW_LOOPCNT)
3797 {
3798 if (loop_dump_stream)
3799 fprintf (loop_dump_stream,
3800 "Prefetch: ignoring loop: not enough iterations.\n");
3801 return;
3802 }
3803
3804 /* Search all induction variables and pick those interesting for the prefetch
3805 machinery. */
3806 for (bl = ivs->list; bl; bl = bl->next)
3807 {
3808 struct induction *biv = bl->biv, *biv1;
3809 int basestride = 0;
3810
3811 biv1 = biv;
3812
3813 /* Expect all BIVs to be executed in each iteration. This makes our
3814 analysis more conservative. */
3815 while (biv1)
3816 {
3817 /* Discard non-constant additions that we can't handle well yet, and
3818 BIVs that are executed multiple times; such BIVs ought to be
3819 handled in the nested loop. We accept not_every_iteration BIVs,
3820 since these only result in larger strides and make our
3821 heuristics more conservative. */
3822 if (GET_CODE (biv->add_val) != CONST_INT)
3823 {
3824 if (loop_dump_stream)
3825 {
3826 fprintf (loop_dump_stream,
3827 "Prefetch: ignoring biv %d: non-constant addition at insn %d:",
3828 REGNO (biv->src_reg), INSN_UID (biv->insn));
3829 print_rtl (loop_dump_stream, biv->add_val);
3830 fprintf (loop_dump_stream, "\n");
3831 }
3832 break;
3833 }
3834
3835 if (biv->maybe_multiple)
3836 {
3837 if (loop_dump_stream)
3838 {
3839 fprintf (loop_dump_stream,
3840 "Prefetch: ignoring biv %d: maybe_multiple at insn %i:",
3841 REGNO (biv->src_reg), INSN_UID (biv->insn));
3842 print_rtl (loop_dump_stream, biv->add_val);
3843 fprintf (loop_dump_stream, "\n");
3844 }
3845 break;
3846 }
3847
3848 basestride += INTVAL (biv1->add_val);
3849 biv1 = biv1->next_iv;
3850 }
3851
3852 if (biv1 || !basestride)
3853 continue;
3854
3855 for (iv = bl->giv; iv; iv = iv->next_iv)
3856 {
3857 rtx address;
3858 rtx temp;
3859 HOST_WIDE_INT index = 0;
3860 int add = 1;
3861 HOST_WIDE_INT stride = 0;
3862 int stride_sign = 1;
3863 struct check_store_data d;
3864 const char *ignore_reason = NULL;
3865 int size = GET_MODE_SIZE (GET_MODE (iv));
3866
3867 /* See whether an induction variable is interesting to us and if
3868 not, report the reason. */
3869 if (iv->giv_type != DEST_ADDR)
3870 ignore_reason = "giv is not a destination address";
3871
3872 /* We are interested only in constant stride memory references
3873 in order to be able to compute density easily. */
3874 else if (GET_CODE (iv->mult_val) != CONST_INT)
3875 ignore_reason = "stride is not constant";
3876
3877 else
3878 {
3879 stride = INTVAL (iv->mult_val) * basestride;
3880 if (stride < 0)
3881 {
3882 stride = -stride;
3883 stride_sign = -1;
3884 }
3885
3886 /* On some targets, reversed order prefetches are not
3887 worthwhile. */
3888 if (PREFETCH_NO_REVERSE_ORDER && stride_sign < 0)
3889 ignore_reason = "reversed order stride";
3890
3891 /* Prefetch of accesses with an extreme stride might not be
3892 worthwhile, either. */
3893 else if (PREFETCH_NO_EXTREME_STRIDE
3894 && stride > PREFETCH_EXTREME_STRIDE)
3895 ignore_reason = "extreme stride";
3896
3897 /* Ignore GIVs with varying add values; we can't predict the
3898 value for the next iteration. */
3899 else if (!loop_invariant_p (loop, iv->add_val))
3900 ignore_reason = "giv has varying add value";
3901
3902 /* Ignore GIVs in the nested loops; they ought to have been
3903 handled already. */
3904 else if (iv->maybe_multiple)
3905 ignore_reason = "giv is in nested loop";
3906 }
3907
3908 if (ignore_reason != NULL)
3909 {
3910 if (loop_dump_stream)
3911 fprintf (loop_dump_stream,
3912 "Prefetch: ignoring giv at %d: %s.\n",
3913 INSN_UID (iv->insn), ignore_reason);
3914 continue;
3915 }
3916
3917 /* Determine the pointer to the basic array we are examining. It is
3918 the sum of the BIV's initial value and the GIV's add_val. */
3919 address = copy_rtx (iv->add_val);
3920 temp = copy_rtx (bl->initial_value);
3921
3922 address = simplify_gen_binary (PLUS, Pmode, temp, address);
3923 index = remove_constant_addition (&address);
3924
3925 d.mem_write = 0;
3926 d.mem_address = *iv->location;
3927
3928 /* When the GIV is not always executed, we might be better off by
3929 not dirtying the cache pages. */
3930 if (PREFETCH_CONDITIONAL || iv->always_executed)
3931 note_stores (PATTERN (iv->insn), check_store, &d);
3932 else
3933 {
3934 if (loop_dump_stream)
3935 fprintf (loop_dump_stream, "Prefetch: Ignoring giv at %d: %s\n",
3936 INSN_UID (iv->insn), "in conditional code.");
3937 continue;
3938 }
3939
3940 /* Attempt to find another prefetch to the same array and see if we
3941 can merge this one. */
3942 for (i = 0; i < num_prefetches; i++)
3943 if (rtx_equal_for_prefetch_p (address, info[i].base_address)
3944 && stride == info[i].stride)
3945 {
3946 /* In case both access same array (same location
3947 just with small difference in constant indexes), merge
3948 the prefetches. Just do the later and the earlier will
3949 get prefetched from previous iteration.
3950 The artificial threshold should not be too small,
3951 but also not bigger than small portion of memory usually
3952 traversed by single loop. */
3953 if (index >= info[i].index
3954 && index - info[i].index < PREFETCH_EXTREME_DIFFERENCE)
3955 {
3956 info[i].write |= d.mem_write;
3957 info[i].bytes_accessed += size;
3958 info[i].index = index;
3959 info[i].giv = iv;
3960 info[i].class = bl;
3961 info[num_prefetches].base_address = address;
3962 add = 0;
3963 break;
3964 }
3965
3966 if (index < info[i].index
3967 && info[i].index - index < PREFETCH_EXTREME_DIFFERENCE)
3968 {
3969 info[i].write |= d.mem_write;
3970 info[i].bytes_accessed += size;
3971 add = 0;
3972 break;
3973 }
3974 }
3975
3976 /* Merging failed. */
3977 if (add)
3978 {
3979 info[num_prefetches].giv = iv;
3980 info[num_prefetches].class = bl;
3981 info[num_prefetches].index = index;
3982 info[num_prefetches].stride = stride;
3983 info[num_prefetches].base_address = address;
3984 info[num_prefetches].write = d.mem_write;
3985 info[num_prefetches].bytes_accessed = size;
3986 num_prefetches++;
3987 if (num_prefetches >= MAX_PREFETCHES)
3988 {
3989 if (loop_dump_stream)
3990 fprintf (loop_dump_stream,
3991 "Maximal number of prefetches exceeded.\n");
3992 return;
3993 }
3994 }
3995 }
3996 }
3997
3998 for (i = 0; i < num_prefetches; i++)
3999 {
4000 int density;
4001
4002 /* Attempt to calculate the total number of bytes fetched by all
4003 iterations of the loop. Avoid overflow. */
4004 if (LOOP_INFO (loop)->n_iterations
4005 && ((unsigned HOST_WIDE_INT) (0xffffffff / info[i].stride)
4006 >= LOOP_INFO (loop)->n_iterations))
4007 info[i].total_bytes = info[i].stride * LOOP_INFO (loop)->n_iterations;
4008 else
4009 info[i].total_bytes = 0xffffffff;
4010
4011 density = info[i].bytes_accessed * 100 / info[i].stride;
4012
4013 /* Prefetch might be worthwhile only when the loads/stores are dense. */
4014 if (PREFETCH_ONLY_DENSE_MEM)
4015 if (density * 256 > PREFETCH_DENSE_MEM * 100
4016 && (info[i].total_bytes / PREFETCH_BLOCK
4017 >= PREFETCH_BLOCKS_BEFORE_LOOP_MIN))
4018 {
4019 info[i].prefetch_before_loop = 1;
4020 info[i].prefetch_in_loop
4021 = (info[i].total_bytes / PREFETCH_BLOCK
4022 > PREFETCH_BLOCKS_BEFORE_LOOP_MAX);
4023 }
4024 else
4025 {
4026 info[i].prefetch_in_loop = 0, info[i].prefetch_before_loop = 0;
4027 if (loop_dump_stream)
4028 fprintf (loop_dump_stream,
4029 "Prefetch: ignoring giv at %d: %d%% density is too low.\n",
4030 INSN_UID (info[i].giv->insn), density);
4031 }
4032 else
4033 info[i].prefetch_in_loop = 1, info[i].prefetch_before_loop = 1;
4034
4035 /* Find how many prefetch instructions we'll use within the loop. */
4036 if (info[i].prefetch_in_loop != 0)
4037 {
4038 info[i].prefetch_in_loop = ((info[i].stride + PREFETCH_BLOCK - 1)
4039 / PREFETCH_BLOCK);
4040 num_real_prefetches += info[i].prefetch_in_loop;
4041 if (info[i].write)
4042 num_real_write_prefetches += info[i].prefetch_in_loop;
4043 }
4044 }
4045
4046 /* Determine how many iterations ahead to prefetch within the loop, based
4047 on how many prefetches we currently expect to do within the loop. */
4048 if (num_real_prefetches != 0)
4049 {
4050 if ((ahead = SIMULTANEOUS_PREFETCHES / num_real_prefetches) == 0)
4051 {
4052 if (loop_dump_stream)
4053 fprintf (loop_dump_stream,
4054 "Prefetch: ignoring prefetches within loop: ahead is zero; %d < %d\n",
4055 SIMULTANEOUS_PREFETCHES, num_real_prefetches);
4056 num_real_prefetches = 0, num_real_write_prefetches = 0;
4057 }
4058 }
4059 /* We'll also use AHEAD to determine how many prefetch instructions to
4060 emit before a loop, so don't leave it zero. */
4061 if (ahead == 0)
4062 ahead = PREFETCH_BLOCKS_BEFORE_LOOP_MAX;
4063
4064 for (i = 0; i < num_prefetches; i++)
4065 {
4066 /* Update if we've decided not to prefetch anything within the loop. */
4067 if (num_real_prefetches == 0)
4068 info[i].prefetch_in_loop = 0;
4069
4070 /* Find how many prefetch instructions we'll use before the loop. */
4071 if (info[i].prefetch_before_loop != 0)
4072 {
4073 int n = info[i].total_bytes / PREFETCH_BLOCK;
4074 if (n > ahead)
4075 n = ahead;
4076 info[i].prefetch_before_loop = n;
4077 num_prefetches_before += n;
4078 if (info[i].write)
4079 num_write_prefetches_before += n;
4080 }
4081
4082 if (loop_dump_stream)
4083 {
4084 if (info[i].prefetch_in_loop == 0
4085 && info[i].prefetch_before_loop == 0)
4086 continue;
4087 fprintf (loop_dump_stream, "Prefetch insn: %d",
4088 INSN_UID (info[i].giv->insn));
4089 fprintf (loop_dump_stream,
4090 "; in loop: %d; before: %d; %s\n",
4091 info[i].prefetch_in_loop,
4092 info[i].prefetch_before_loop,
4093 info[i].write ? "read/write" : "read only");
4094 fprintf (loop_dump_stream,
4095 " density: %d%%; bytes_accessed: %u; total_bytes: %u\n",
4096 (int) (info[i].bytes_accessed * 100 / info[i].stride),
4097 info[i].bytes_accessed, info[i].total_bytes);
4098 fprintf (loop_dump_stream, " index: ");
4099 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, info[i].index);
4100 fprintf (loop_dump_stream, "; stride: ");
4101 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, info[i].stride);
4102 fprintf (loop_dump_stream, "; address: ");
4103 print_rtl (loop_dump_stream, info[i].base_address);
4104 fprintf (loop_dump_stream, "\n");
4105 }
4106 }
4107
4108 if (num_real_prefetches + num_prefetches_before > 0)
4109 {
4110 /* Record that this loop uses prefetch instructions. */
4111 LOOP_INFO (loop)->has_prefetch = 1;
4112
4113 if (loop_dump_stream)
4114 {
4115 fprintf (loop_dump_stream, "Real prefetches needed within loop: %d (write: %d)\n",
4116 num_real_prefetches, num_real_write_prefetches);
4117 fprintf (loop_dump_stream, "Real prefetches needed before loop: %d (write: %d)\n",
4118 num_prefetches_before, num_write_prefetches_before);
4119 }
4120 }
4121
4122 for (i = 0; i < num_prefetches; i++)
4123 {
4124 int y;
4125
4126 for (y = 0; y < info[i].prefetch_in_loop; y++)
4127 {
4128 rtx loc = copy_rtx (*info[i].giv->location);
4129 rtx insn;
4130 int bytes_ahead = PREFETCH_BLOCK * (ahead + y);
4131 rtx before_insn = info[i].giv->insn;
4132 rtx prev_insn = PREV_INSN (info[i].giv->insn);
4133 rtx seq;
4134
4135 /* We can save some effort by offsetting the address on
4136 architectures with offsettable memory references. */
4137 if (offsettable_address_p (0, VOIDmode, loc))
4138 loc = plus_constant (loc, bytes_ahead);
4139 else
4140 {
4141 rtx reg = gen_reg_rtx (Pmode);
4142 loop_iv_add_mult_emit_before (loop, loc, const1_rtx,
4143 GEN_INT (bytes_ahead), reg,
4144 0, before_insn);
4145 loc = reg;
4146 }
4147
4148 start_sequence ();
4149 /* Make sure the address operand is valid for prefetch. */
4150 if (! (*insn_data[(int)CODE_FOR_prefetch].operand[0].predicate)
4151 (loc, insn_data[(int)CODE_FOR_prefetch].operand[0].mode))
4152 loc = force_reg (Pmode, loc);
4153 emit_insn (gen_prefetch (loc, GEN_INT (info[i].write),
4154 GEN_INT (3)));
4155 seq = get_insns ();
4156 end_sequence ();
4157 emit_insn_before (seq, before_insn);
4158
4159 /* Check all insns emitted and record the new GIV
4160 information. */
4161 insn = NEXT_INSN (prev_insn);
4162 while (insn != before_insn)
4163 {
4164 insn = check_insn_for_givs (loop, insn,
4165 info[i].giv->always_executed,
4166 info[i].giv->maybe_multiple);
4167 insn = NEXT_INSN (insn);
4168 }
4169 }
4170
4171 if (PREFETCH_BEFORE_LOOP)
4172 {
4173 /* Emit insns before the loop to fetch the first cache lines or,
4174 if we're not prefetching within the loop, everything we expect
4175 to need. */
4176 for (y = 0; y < info[i].prefetch_before_loop; y++)
4177 {
4178 rtx reg = gen_reg_rtx (Pmode);
4179 rtx loop_start = loop->start;
4180 rtx init_val = info[i].class->initial_value;
4181 rtx add_val = simplify_gen_binary (PLUS, Pmode,
4182 info[i].giv->add_val,
4183 GEN_INT (y * PREFETCH_BLOCK));
4184
4185 /* Functions called by LOOP_IV_ADD_EMIT_BEFORE expect a
4186 non-constant INIT_VAL to have the same mode as REG, which
4187 in this case we know to be Pmode. */
4188 if (GET_MODE (init_val) != Pmode && !CONSTANT_P (init_val))
4189 init_val = convert_to_mode (Pmode, init_val, 0);
4190 loop_iv_add_mult_emit_before (loop, init_val,
4191 info[i].giv->mult_val,
4192 add_val, reg, 0, loop_start);
4193 emit_insn_before (gen_prefetch (reg, GEN_INT (info[i].write),
4194 GEN_INT (3)),
4195 loop_start);
4196 }
4197 }
4198 }
4199
4200 return;
4201 }
4202 \f
4203 /* A "basic induction variable" or biv is a pseudo reg that is set
4204 (within this loop) only by incrementing or decrementing it. */
4205 /* A "general induction variable" or giv is a pseudo reg whose
4206 value is a linear function of a biv. */
4207
4208 /* Bivs are recognized by `basic_induction_var';
4209 Givs by `general_induction_var'. */
4210
4211 /* Communication with routines called via `note_stores'. */
4212
4213 static rtx note_insn;
4214
4215 /* Dummy register to have non-zero DEST_REG for DEST_ADDR type givs. */
4216
4217 static rtx addr_placeholder;
4218
4219 /* ??? Unfinished optimizations, and possible future optimizations,
4220 for the strength reduction code. */
4221
4222 /* ??? The interaction of biv elimination, and recognition of 'constant'
4223 bivs, may cause problems. */
4224
4225 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
4226 performance problems.
4227
4228 Perhaps don't eliminate things that can be combined with an addressing
4229 mode. Find all givs that have the same biv, mult_val, and add_val;
4230 then for each giv, check to see if its only use dies in a following
4231 memory address. If so, generate a new memory address and check to see
4232 if it is valid. If it is valid, then store the modified memory address,
4233 otherwise, mark the giv as not done so that it will get its own iv. */
4234
4235 /* ??? Could try to optimize branches when it is known that a biv is always
4236 positive. */
4237
4238 /* ??? When replace a biv in a compare insn, we should replace with closest
4239 giv so that an optimized branch can still be recognized by the combiner,
4240 e.g. the VAX acb insn. */
4241
4242 /* ??? Many of the checks involving uid_luid could be simplified if regscan
4243 was rerun in loop_optimize whenever a register was added or moved.
4244 Also, some of the optimizations could be a little less conservative. */
4245 \f
4246 /* Scan the loop body and call FNCALL for each insn. In the addition to the
4247 LOOP and INSN parameters pass MAYBE_MULTIPLE and NOT_EVERY_ITERATION to the
4248 callback.
4249
4250 NOT_EVERY_ITERATION is 1 if current insn is not known to be executed at
4251 least once for every loop iteration except for the last one.
4252
4253 MAYBE_MULTIPLE is 1 if current insn may be executed more than once for every
4254 loop iteration.
4255 */
4256 void
4257 for_each_insn_in_loop (loop, fncall)
4258 struct loop *loop;
4259 loop_insn_callback fncall;
4260 {
4261 int not_every_iteration = 0;
4262 int maybe_multiple = 0;
4263 int past_loop_latch = 0;
4264 int loop_depth = 0;
4265 rtx p;
4266
4267 /* If loop_scan_start points to the loop exit test, we have to be wary of
4268 subversive use of gotos inside expression statements. */
4269 if (prev_nonnote_insn (loop->scan_start) != prev_nonnote_insn (loop->start))
4270 maybe_multiple = back_branch_in_range_p (loop, loop->scan_start);
4271
4272 /* Scan through loop and update NOT_EVERY_ITERATION and MAYBE_MULTIPLE. */
4273 for (p = next_insn_in_loop (loop, loop->scan_start);
4274 p != NULL_RTX;
4275 p = next_insn_in_loop (loop, p))
4276 {
4277 p = fncall (loop, p, not_every_iteration, maybe_multiple);
4278
4279 /* Past CODE_LABEL, we get to insns that may be executed multiple
4280 times. The only way we can be sure that they can't is if every
4281 jump insn between here and the end of the loop either
4282 returns, exits the loop, is a jump to a location that is still
4283 behind the label, or is a jump to the loop start. */
4284
4285 if (GET_CODE (p) == CODE_LABEL)
4286 {
4287 rtx insn = p;
4288
4289 maybe_multiple = 0;
4290
4291 while (1)
4292 {
4293 insn = NEXT_INSN (insn);
4294 if (insn == loop->scan_start)
4295 break;
4296 if (insn == loop->end)
4297 {
4298 if (loop->top != 0)
4299 insn = loop->top;
4300 else
4301 break;
4302 if (insn == loop->scan_start)
4303 break;
4304 }
4305
4306 if (GET_CODE (insn) == JUMP_INSN
4307 && GET_CODE (PATTERN (insn)) != RETURN
4308 && (!any_condjump_p (insn)
4309 || (JUMP_LABEL (insn) != 0
4310 && JUMP_LABEL (insn) != loop->scan_start
4311 && !loop_insn_first_p (p, JUMP_LABEL (insn)))))
4312 {
4313 maybe_multiple = 1;
4314 break;
4315 }
4316 }
4317 }
4318
4319 /* Past a jump, we get to insns for which we can't count
4320 on whether they will be executed during each iteration. */
4321 /* This code appears twice in strength_reduce. There is also similar
4322 code in scan_loop. */
4323 if (GET_CODE (p) == JUMP_INSN
4324 /* If we enter the loop in the middle, and scan around to the
4325 beginning, don't set not_every_iteration for that.
4326 This can be any kind of jump, since we want to know if insns
4327 will be executed if the loop is executed. */
4328 && !(JUMP_LABEL (p) == loop->top
4329 && ((NEXT_INSN (NEXT_INSN (p)) == loop->end
4330 && any_uncondjump_p (p))
4331 || (NEXT_INSN (p) == loop->end && any_condjump_p (p)))))
4332 {
4333 rtx label = 0;
4334
4335 /* If this is a jump outside the loop, then it also doesn't
4336 matter. Check to see if the target of this branch is on the
4337 loop->exits_labels list. */
4338
4339 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
4340 if (XEXP (label, 0) == JUMP_LABEL (p))
4341 break;
4342
4343 if (!label)
4344 not_every_iteration = 1;
4345 }
4346
4347 else if (GET_CODE (p) == NOTE)
4348 {
4349 /* At the virtual top of a converted loop, insns are again known to
4350 be executed each iteration: logically, the loop begins here
4351 even though the exit code has been duplicated.
4352
4353 Insns are also again known to be executed each iteration at
4354 the LOOP_CONT note. */
4355 if ((NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP
4356 || NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_CONT)
4357 && loop_depth == 0)
4358 not_every_iteration = 0;
4359 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
4360 loop_depth++;
4361 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
4362 loop_depth--;
4363 }
4364
4365 /* Note if we pass a loop latch. If we do, then we can not clear
4366 NOT_EVERY_ITERATION below when we pass the last CODE_LABEL in
4367 a loop since a jump before the last CODE_LABEL may have started
4368 a new loop iteration.
4369
4370 Note that LOOP_TOP is only set for rotated loops and we need
4371 this check for all loops, so compare against the CODE_LABEL
4372 which immediately follows LOOP_START. */
4373 if (GET_CODE (p) == JUMP_INSN
4374 && JUMP_LABEL (p) == NEXT_INSN (loop->start))
4375 past_loop_latch = 1;
4376
4377 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
4378 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
4379 or not an insn is known to be executed each iteration of the
4380 loop, whether or not any iterations are known to occur.
4381
4382 Therefore, if we have just passed a label and have no more labels
4383 between here and the test insn of the loop, and we have not passed
4384 a jump to the top of the loop, then we know these insns will be
4385 executed each iteration. */
4386
4387 if (not_every_iteration
4388 && !past_loop_latch
4389 && GET_CODE (p) == CODE_LABEL
4390 && no_labels_between_p (p, loop->end)
4391 && loop_insn_first_p (p, loop->cont))
4392 not_every_iteration = 0;
4393 }
4394 }
4395 \f
4396 static void
4397 loop_bivs_find (loop)
4398 struct loop *loop;
4399 {
4400 struct loop_regs *regs = LOOP_REGS (loop);
4401 struct loop_ivs *ivs = LOOP_IVS (loop);
4402 /* Temporary list pointers for traversing ivs->list. */
4403 struct iv_class *bl, **backbl;
4404
4405 ivs->list = 0;
4406
4407 for_each_insn_in_loop (loop, check_insn_for_bivs);
4408
4409 /* Scan ivs->list to remove all regs that proved not to be bivs.
4410 Make a sanity check against regs->n_times_set. */
4411 for (backbl = &ivs->list, bl = *backbl; bl; bl = bl->next)
4412 {
4413 if (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
4414 /* Above happens if register modified by subreg, etc. */
4415 /* Make sure it is not recognized as a basic induction var: */
4416 || regs->array[bl->regno].n_times_set != bl->biv_count
4417 /* If never incremented, it is invariant that we decided not to
4418 move. So leave it alone. */
4419 || ! bl->incremented)
4420 {
4421 if (loop_dump_stream)
4422 fprintf (loop_dump_stream, "Biv %d: discarded, %s\n",
4423 bl->regno,
4424 (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
4425 ? "not induction variable"
4426 : (! bl->incremented ? "never incremented"
4427 : "count error")));
4428
4429 REG_IV_TYPE (ivs, bl->regno) = NOT_BASIC_INDUCT;
4430 *backbl = bl->next;
4431 }
4432 else
4433 {
4434 backbl = &bl->next;
4435
4436 if (loop_dump_stream)
4437 fprintf (loop_dump_stream, "Biv %d: verified\n", bl->regno);
4438 }
4439 }
4440 }
4441
4442
4443 /* Determine how BIVS are initialised by looking through pre-header
4444 extended basic block. */
4445 static void
4446 loop_bivs_init_find (loop)
4447 struct loop *loop;
4448 {
4449 struct loop_ivs *ivs = LOOP_IVS (loop);
4450 /* Temporary list pointers for traversing ivs->list. */
4451 struct iv_class *bl;
4452 int call_seen;
4453 rtx p;
4454
4455 /* Find initial value for each biv by searching backwards from loop_start,
4456 halting at first label. Also record any test condition. */
4457
4458 call_seen = 0;
4459 for (p = loop->start; p && GET_CODE (p) != CODE_LABEL; p = PREV_INSN (p))
4460 {
4461 rtx test;
4462
4463 note_insn = p;
4464
4465 if (GET_CODE (p) == CALL_INSN)
4466 call_seen = 1;
4467
4468 if (INSN_P (p))
4469 note_stores (PATTERN (p), record_initial, ivs);
4470
4471 /* Record any test of a biv that branches around the loop if no store
4472 between it and the start of loop. We only care about tests with
4473 constants and registers and only certain of those. */
4474 if (GET_CODE (p) == JUMP_INSN
4475 && JUMP_LABEL (p) != 0
4476 && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop->end)
4477 && (test = get_condition_for_loop (loop, p)) != 0
4478 && GET_CODE (XEXP (test, 0)) == REG
4479 && REGNO (XEXP (test, 0)) < max_reg_before_loop
4480 && (bl = REG_IV_CLASS (ivs, REGNO (XEXP (test, 0)))) != 0
4481 && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop->start)
4482 && bl->init_insn == 0)
4483 {
4484 /* If an NE test, we have an initial value! */
4485 if (GET_CODE (test) == NE)
4486 {
4487 bl->init_insn = p;
4488 bl->init_set = gen_rtx_SET (VOIDmode,
4489 XEXP (test, 0), XEXP (test, 1));
4490 }
4491 else
4492 bl->initial_test = test;
4493 }
4494 }
4495 }
4496
4497
4498 /* Look at the each biv and see if we can say anything better about its
4499 initial value from any initializing insns set up above. (This is done
4500 in two passes to avoid missing SETs in a PARALLEL.) */
4501 static void
4502 loop_bivs_check (loop)
4503 struct loop *loop;
4504 {
4505 struct loop_ivs *ivs = LOOP_IVS (loop);
4506 /* Temporary list pointers for traversing ivs->list. */
4507 struct iv_class *bl;
4508 struct iv_class **backbl;
4509
4510 for (backbl = &ivs->list; (bl = *backbl); backbl = &bl->next)
4511 {
4512 rtx src;
4513 rtx note;
4514
4515 if (! bl->init_insn)
4516 continue;
4517
4518 /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value
4519 is a constant, use the value of that. */
4520 if (((note = find_reg_note (bl->init_insn, REG_EQUAL, 0)) != NULL
4521 && CONSTANT_P (XEXP (note, 0)))
4522 || ((note = find_reg_note (bl->init_insn, REG_EQUIV, 0)) != NULL
4523 && CONSTANT_P (XEXP (note, 0))))
4524 src = XEXP (note, 0);
4525 else
4526 src = SET_SRC (bl->init_set);
4527
4528 if (loop_dump_stream)
4529 fprintf (loop_dump_stream,
4530 "Biv %d: initialized at insn %d: initial value ",
4531 bl->regno, INSN_UID (bl->init_insn));
4532
4533 if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno])
4534 || GET_MODE (src) == VOIDmode)
4535 && valid_initial_value_p (src, bl->init_insn,
4536 LOOP_INFO (loop)->pre_header_has_call,
4537 loop->start))
4538 {
4539 bl->initial_value = src;
4540
4541 if (loop_dump_stream)
4542 {
4543 print_simple_rtl (loop_dump_stream, src);
4544 fputc ('\n', loop_dump_stream);
4545 }
4546 }
4547 /* If we can't make it a giv,
4548 let biv keep initial value of "itself". */
4549 else if (loop_dump_stream)
4550 fprintf (loop_dump_stream, "is complex\n");
4551 }
4552 }
4553
4554
4555 /* Search the loop for general induction variables. */
4556
4557 static void
4558 loop_givs_find (loop)
4559 struct loop* loop;
4560 {
4561 for_each_insn_in_loop (loop, check_insn_for_givs);
4562 }
4563
4564
4565 /* For each giv for which we still don't know whether or not it is
4566 replaceable, check to see if it is replaceable because its final value
4567 can be calculated. */
4568
4569 static void
4570 loop_givs_check (loop)
4571 struct loop *loop;
4572 {
4573 struct loop_ivs *ivs = LOOP_IVS (loop);
4574 struct iv_class *bl;
4575
4576 for (bl = ivs->list; bl; bl = bl->next)
4577 {
4578 struct induction *v;
4579
4580 for (v = bl->giv; v; v = v->next_iv)
4581 if (! v->replaceable && ! v->not_replaceable)
4582 check_final_value (loop, v);
4583 }
4584 }
4585
4586
4587 /* Return non-zero if it is possible to eliminate the biv BL provided
4588 all givs are reduced. This is possible if either the reg is not
4589 used outside the loop, or we can compute what its final value will
4590 be. */
4591
4592 static int
4593 loop_biv_eliminable_p (loop, bl, threshold, insn_count)
4594 struct loop *loop;
4595 struct iv_class *bl;
4596 int threshold;
4597 int insn_count;
4598 {
4599 /* For architectures with a decrement_and_branch_until_zero insn,
4600 don't do this if we put a REG_NONNEG note on the endtest for this
4601 biv. */
4602
4603 #ifdef HAVE_decrement_and_branch_until_zero
4604 if (bl->nonneg)
4605 {
4606 if (loop_dump_stream)
4607 fprintf (loop_dump_stream,
4608 "Cannot eliminate nonneg biv %d.\n", bl->regno);
4609 return 0;
4610 }
4611 #endif
4612
4613 /* Check that biv is used outside loop or if it has a final value.
4614 Compare against bl->init_insn rather than loop->start. We aren't
4615 concerned with any uses of the biv between init_insn and
4616 loop->start since these won't be affected by the value of the biv
4617 elsewhere in the function, so long as init_insn doesn't use the
4618 biv itself. */
4619
4620 if ((REGNO_LAST_LUID (bl->regno) < INSN_LUID (loop->end)
4621 && bl->init_insn
4622 && INSN_UID (bl->init_insn) < max_uid_for_loop
4623 && REGNO_FIRST_LUID (bl->regno) >= INSN_LUID (bl->init_insn)
4624 && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
4625 || (bl->final_value = final_biv_value (loop, bl)))
4626 return maybe_eliminate_biv (loop, bl, 0, threshold, insn_count);
4627
4628 if (loop_dump_stream)
4629 {
4630 fprintf (loop_dump_stream,
4631 "Cannot eliminate biv %d.\n",
4632 bl->regno);
4633 fprintf (loop_dump_stream,
4634 "First use: insn %d, last use: insn %d.\n",
4635 REGNO_FIRST_UID (bl->regno),
4636 REGNO_LAST_UID (bl->regno));
4637 }
4638 return 0;
4639 }
4640
4641
4642 /* Reduce each giv of BL that we have decided to reduce. */
4643
4644 static void
4645 loop_givs_reduce (loop, bl)
4646 struct loop *loop;
4647 struct iv_class *bl;
4648 {
4649 struct induction *v;
4650
4651 for (v = bl->giv; v; v = v->next_iv)
4652 {
4653 struct induction *tv;
4654 if (! v->ignore && v->same == 0)
4655 {
4656 int auto_inc_opt = 0;
4657
4658 /* If the code for derived givs immediately below has already
4659 allocated a new_reg, we must keep it. */
4660 if (! v->new_reg)
4661 v->new_reg = gen_reg_rtx (v->mode);
4662
4663 #ifdef AUTO_INC_DEC
4664 /* If the target has auto-increment addressing modes, and
4665 this is an address giv, then try to put the increment
4666 immediately after its use, so that flow can create an
4667 auto-increment addressing mode. */
4668 if (v->giv_type == DEST_ADDR && bl->biv_count == 1
4669 && bl->biv->always_executed && ! bl->biv->maybe_multiple
4670 /* We don't handle reversed biv's because bl->biv->insn
4671 does not have a valid INSN_LUID. */
4672 && ! bl->reversed
4673 && v->always_executed && ! v->maybe_multiple
4674 && INSN_UID (v->insn) < max_uid_for_loop)
4675 {
4676 /* If other giv's have been combined with this one, then
4677 this will work only if all uses of the other giv's occur
4678 before this giv's insn. This is difficult to check.
4679
4680 We simplify this by looking for the common case where
4681 there is one DEST_REG giv, and this giv's insn is the
4682 last use of the dest_reg of that DEST_REG giv. If the
4683 increment occurs after the address giv, then we can
4684 perform the optimization. (Otherwise, the increment
4685 would have to go before other_giv, and we would not be
4686 able to combine it with the address giv to get an
4687 auto-inc address.) */
4688 if (v->combined_with)
4689 {
4690 struct induction *other_giv = 0;
4691
4692 for (tv = bl->giv; tv; tv = tv->next_iv)
4693 if (tv->same == v)
4694 {
4695 if (other_giv)
4696 break;
4697 else
4698 other_giv = tv;
4699 }
4700 if (! tv && other_giv
4701 && REGNO (other_giv->dest_reg) < max_reg_before_loop
4702 && (REGNO_LAST_UID (REGNO (other_giv->dest_reg))
4703 == INSN_UID (v->insn))
4704 && INSN_LUID (v->insn) < INSN_LUID (bl->biv->insn))
4705 auto_inc_opt = 1;
4706 }
4707 /* Check for case where increment is before the address
4708 giv. Do this test in "loop order". */
4709 else if ((INSN_LUID (v->insn) > INSN_LUID (bl->biv->insn)
4710 && (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
4711 || (INSN_LUID (bl->biv->insn)
4712 > INSN_LUID (loop->scan_start))))
4713 || (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
4714 && (INSN_LUID (loop->scan_start)
4715 < INSN_LUID (bl->biv->insn))))
4716 auto_inc_opt = -1;
4717 else
4718 auto_inc_opt = 1;
4719
4720 #ifdef HAVE_cc0
4721 {
4722 rtx prev;
4723
4724 /* We can't put an insn immediately after one setting
4725 cc0, or immediately before one using cc0. */
4726 if ((auto_inc_opt == 1 && sets_cc0_p (PATTERN (v->insn)))
4727 || (auto_inc_opt == -1
4728 && (prev = prev_nonnote_insn (v->insn)) != 0
4729 && INSN_P (prev)
4730 && sets_cc0_p (PATTERN (prev))))
4731 auto_inc_opt = 0;
4732 }
4733 #endif
4734
4735 if (auto_inc_opt)
4736 v->auto_inc_opt = 1;
4737 }
4738 #endif
4739
4740 /* For each place where the biv is incremented, add an insn
4741 to increment the new, reduced reg for the giv. */
4742 for (tv = bl->biv; tv; tv = tv->next_iv)
4743 {
4744 rtx insert_before;
4745
4746 if (! auto_inc_opt)
4747 insert_before = tv->insn;
4748 else if (auto_inc_opt == 1)
4749 insert_before = NEXT_INSN (v->insn);
4750 else
4751 insert_before = v->insn;
4752
4753 if (tv->mult_val == const1_rtx)
4754 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
4755 v->new_reg, v->new_reg,
4756 0, insert_before);
4757 else /* tv->mult_val == const0_rtx */
4758 /* A multiply is acceptable here
4759 since this is presumed to be seldom executed. */
4760 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
4761 v->add_val, v->new_reg,
4762 0, insert_before);
4763 }
4764
4765 /* Add code at loop start to initialize giv's reduced reg. */
4766
4767 loop_iv_add_mult_hoist (loop,
4768 extend_value_for_giv (v, bl->initial_value),
4769 v->mult_val, v->add_val, v->new_reg);
4770 }
4771 }
4772 }
4773
4774
4775 /* Check for givs whose first use is their definition and whose
4776 last use is the definition of another giv. If so, it is likely
4777 dead and should not be used to derive another giv nor to
4778 eliminate a biv. */
4779
4780 static void
4781 loop_givs_dead_check (loop, bl)
4782 struct loop *loop ATTRIBUTE_UNUSED;
4783 struct iv_class *bl;
4784 {
4785 struct induction *v;
4786
4787 for (v = bl->giv; v; v = v->next_iv)
4788 {
4789 if (v->ignore
4790 || (v->same && v->same->ignore))
4791 continue;
4792
4793 if (v->giv_type == DEST_REG
4794 && REGNO_FIRST_UID (REGNO (v->dest_reg)) == INSN_UID (v->insn))
4795 {
4796 struct induction *v1;
4797
4798 for (v1 = bl->giv; v1; v1 = v1->next_iv)
4799 if (REGNO_LAST_UID (REGNO (v->dest_reg)) == INSN_UID (v1->insn))
4800 v->maybe_dead = 1;
4801 }
4802 }
4803 }
4804
4805
4806 static void
4807 loop_givs_rescan (loop, bl, reg_map)
4808 struct loop *loop;
4809 struct iv_class *bl;
4810 rtx *reg_map;
4811 {
4812 struct induction *v;
4813
4814 for (v = bl->giv; v; v = v->next_iv)
4815 {
4816 if (v->same && v->same->ignore)
4817 v->ignore = 1;
4818
4819 if (v->ignore)
4820 continue;
4821
4822 /* Update expression if this was combined, in case other giv was
4823 replaced. */
4824 if (v->same)
4825 v->new_reg = replace_rtx (v->new_reg,
4826 v->same->dest_reg, v->same->new_reg);
4827
4828 /* See if this register is known to be a pointer to something. If
4829 so, see if we can find the alignment. First see if there is a
4830 destination register that is a pointer. If so, this shares the
4831 alignment too. Next see if we can deduce anything from the
4832 computational information. If not, and this is a DEST_ADDR
4833 giv, at least we know that it's a pointer, though we don't know
4834 the alignment. */
4835 if (GET_CODE (v->new_reg) == REG
4836 && v->giv_type == DEST_REG
4837 && REG_POINTER (v->dest_reg))
4838 mark_reg_pointer (v->new_reg,
4839 REGNO_POINTER_ALIGN (REGNO (v->dest_reg)));
4840 else if (GET_CODE (v->new_reg) == REG
4841 && REG_POINTER (v->src_reg))
4842 {
4843 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->src_reg));
4844
4845 if (align == 0
4846 || GET_CODE (v->add_val) != CONST_INT
4847 || INTVAL (v->add_val) % (align / BITS_PER_UNIT) != 0)
4848 align = 0;
4849
4850 mark_reg_pointer (v->new_reg, align);
4851 }
4852 else if (GET_CODE (v->new_reg) == REG
4853 && GET_CODE (v->add_val) == REG
4854 && REG_POINTER (v->add_val))
4855 {
4856 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->add_val));
4857
4858 if (align == 0 || GET_CODE (v->mult_val) != CONST_INT
4859 || INTVAL (v->mult_val) % (align / BITS_PER_UNIT) != 0)
4860 align = 0;
4861
4862 mark_reg_pointer (v->new_reg, align);
4863 }
4864 else if (GET_CODE (v->new_reg) == REG && v->giv_type == DEST_ADDR)
4865 mark_reg_pointer (v->new_reg, 0);
4866
4867 if (v->giv_type == DEST_ADDR)
4868 /* Store reduced reg as the address in the memref where we found
4869 this giv. */
4870 validate_change (v->insn, v->location, v->new_reg, 0);
4871 else if (v->replaceable)
4872 {
4873 reg_map[REGNO (v->dest_reg)] = v->new_reg;
4874 }
4875 else
4876 {
4877 rtx original_insn = v->insn;
4878 rtx note;
4879
4880 /* Not replaceable; emit an insn to set the original giv reg from
4881 the reduced giv, same as above. */
4882 v->insn = loop_insn_emit_after (loop, 0, original_insn,
4883 gen_move_insn (v->dest_reg,
4884 v->new_reg));
4885
4886 /* The original insn may have a REG_EQUAL note. This note is
4887 now incorrect and may result in invalid substitutions later.
4888 The original insn is dead, but may be part of a libcall
4889 sequence, which doesn't seem worth the bother of handling. */
4890 note = find_reg_note (original_insn, REG_EQUAL, NULL_RTX);
4891 if (note)
4892 remove_note (original_insn, note);
4893 }
4894
4895 /* When a loop is reversed, givs which depend on the reversed
4896 biv, and which are live outside the loop, must be set to their
4897 correct final value. This insn is only needed if the giv is
4898 not replaceable. The correct final value is the same as the
4899 value that the giv starts the reversed loop with. */
4900 if (bl->reversed && ! v->replaceable)
4901 loop_iv_add_mult_sink (loop,
4902 extend_value_for_giv (v, bl->initial_value),
4903 v->mult_val, v->add_val, v->dest_reg);
4904 else if (v->final_value)
4905 loop_insn_sink_or_swim (loop,
4906 gen_load_of_final_value (v->dest_reg,
4907 v->final_value));
4908
4909 if (loop_dump_stream)
4910 {
4911 fprintf (loop_dump_stream, "giv at %d reduced to ",
4912 INSN_UID (v->insn));
4913 print_simple_rtl (loop_dump_stream, v->new_reg);
4914 fprintf (loop_dump_stream, "\n");
4915 }
4916 }
4917 }
4918
4919
4920 static int
4921 loop_giv_reduce_benefit (loop, bl, v, test_reg)
4922 struct loop *loop ATTRIBUTE_UNUSED;
4923 struct iv_class *bl;
4924 struct induction *v;
4925 rtx test_reg;
4926 {
4927 int add_cost;
4928 int benefit;
4929
4930 benefit = v->benefit;
4931 PUT_MODE (test_reg, v->mode);
4932 add_cost = iv_add_mult_cost (bl->biv->add_val, v->mult_val,
4933 test_reg, test_reg);
4934
4935 /* Reduce benefit if not replaceable, since we will insert a
4936 move-insn to replace the insn that calculates this giv. Don't do
4937 this unless the giv is a user variable, since it will often be
4938 marked non-replaceable because of the duplication of the exit
4939 code outside the loop. In such a case, the copies we insert are
4940 dead and will be deleted. So they don't have a cost. Similar
4941 situations exist. */
4942 /* ??? The new final_[bg]iv_value code does a much better job of
4943 finding replaceable giv's, and hence this code may no longer be
4944 necessary. */
4945 if (! v->replaceable && ! bl->eliminable
4946 && REG_USERVAR_P (v->dest_reg))
4947 benefit -= copy_cost;
4948
4949 /* Decrease the benefit to count the add-insns that we will insert
4950 to increment the reduced reg for the giv. ??? This can
4951 overestimate the run-time cost of the additional insns, e.g. if
4952 there are multiple basic blocks that increment the biv, but only
4953 one of these blocks is executed during each iteration. There is
4954 no good way to detect cases like this with the current structure
4955 of the loop optimizer. This code is more accurate for
4956 determining code size than run-time benefits. */
4957 benefit -= add_cost * bl->biv_count;
4958
4959 /* Decide whether to strength-reduce this giv or to leave the code
4960 unchanged (recompute it from the biv each time it is used). This
4961 decision can be made independently for each giv. */
4962
4963 #ifdef AUTO_INC_DEC
4964 /* Attempt to guess whether autoincrement will handle some of the
4965 new add insns; if so, increase BENEFIT (undo the subtraction of
4966 add_cost that was done above). */
4967 if (v->giv_type == DEST_ADDR
4968 /* Increasing the benefit is risky, since this is only a guess.
4969 Avoid increasing register pressure in cases where there would
4970 be no other benefit from reducing this giv. */
4971 && benefit > 0
4972 && GET_CODE (v->mult_val) == CONST_INT)
4973 {
4974 int size = GET_MODE_SIZE (GET_MODE (v->mem));
4975
4976 if (HAVE_POST_INCREMENT
4977 && INTVAL (v->mult_val) == size)
4978 benefit += add_cost * bl->biv_count;
4979 else if (HAVE_PRE_INCREMENT
4980 && INTVAL (v->mult_val) == size)
4981 benefit += add_cost * bl->biv_count;
4982 else if (HAVE_POST_DECREMENT
4983 && -INTVAL (v->mult_val) == size)
4984 benefit += add_cost * bl->biv_count;
4985 else if (HAVE_PRE_DECREMENT
4986 && -INTVAL (v->mult_val) == size)
4987 benefit += add_cost * bl->biv_count;
4988 }
4989 #endif
4990
4991 return benefit;
4992 }
4993
4994
4995 /* Free IV structures for LOOP. */
4996
4997 static void
4998 loop_ivs_free (loop)
4999 struct loop *loop;
5000 {
5001 struct loop_ivs *ivs = LOOP_IVS (loop);
5002 struct iv_class *iv = ivs->list;
5003
5004 free (ivs->regs);
5005
5006 while (iv)
5007 {
5008 struct iv_class *next = iv->next;
5009 struct induction *induction;
5010 struct induction *next_induction;
5011
5012 for (induction = iv->biv; induction; induction = next_induction)
5013 {
5014 next_induction = induction->next_iv;
5015 free (induction);
5016 }
5017 for (induction = iv->giv; induction; induction = next_induction)
5018 {
5019 next_induction = induction->next_iv;
5020 free (induction);
5021 }
5022
5023 free (iv);
5024 iv = next;
5025 }
5026 }
5027
5028
5029 /* Perform strength reduction and induction variable elimination.
5030
5031 Pseudo registers created during this function will be beyond the
5032 last valid index in several tables including
5033 REGS->ARRAY[I].N_TIMES_SET and REGNO_LAST_UID. This does not cause a
5034 problem here, because the added registers cannot be givs outside of
5035 their loop, and hence will never be reconsidered. But scan_loop
5036 must check regnos to make sure they are in bounds. */
5037
5038 static void
5039 strength_reduce (loop, flags)
5040 struct loop *loop;
5041 int flags;
5042 {
5043 struct loop_info *loop_info = LOOP_INFO (loop);
5044 struct loop_regs *regs = LOOP_REGS (loop);
5045 struct loop_ivs *ivs = LOOP_IVS (loop);
5046 rtx p;
5047 /* Temporary list pointer for traversing ivs->list. */
5048 struct iv_class *bl;
5049 /* Ratio of extra register life span we can justify
5050 for saving an instruction. More if loop doesn't call subroutines
5051 since in that case saving an insn makes more difference
5052 and more registers are available. */
5053 /* ??? could set this to last value of threshold in move_movables */
5054 int threshold = (loop_info->has_call ? 1 : 2) * (3 + n_non_fixed_regs);
5055 /* Map of pseudo-register replacements. */
5056 rtx *reg_map = NULL;
5057 int reg_map_size;
5058 int unrolled_insn_copies = 0;
5059 rtx test_reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
5060 int insn_count = count_insns_in_loop (loop);
5061
5062 addr_placeholder = gen_reg_rtx (Pmode);
5063
5064 ivs->n_regs = max_reg_before_loop;
5065 ivs->regs = (struct iv *) xcalloc (ivs->n_regs, sizeof (struct iv));
5066
5067 /* Find all BIVs in loop. */
5068 loop_bivs_find (loop);
5069
5070 /* Exit if there are no bivs. */
5071 if (! ivs->list)
5072 {
5073 /* Can still unroll the loop anyways, but indicate that there is no
5074 strength reduction info available. */
5075 if (flags & LOOP_UNROLL)
5076 unroll_loop (loop, insn_count, 0);
5077
5078 loop_ivs_free (loop);
5079 return;
5080 }
5081
5082 /* Determine how BIVS are initialised by looking through pre-header
5083 extended basic block. */
5084 loop_bivs_init_find (loop);
5085
5086 /* Look at the each biv and see if we can say anything better about its
5087 initial value from any initializing insns set up above. */
5088 loop_bivs_check (loop);
5089
5090 /* Search the loop for general induction variables. */
5091 loop_givs_find (loop);
5092
5093 /* Try to calculate and save the number of loop iterations. This is
5094 set to zero if the actual number can not be calculated. This must
5095 be called after all giv's have been identified, since otherwise it may
5096 fail if the iteration variable is a giv. */
5097 loop_iterations (loop);
5098
5099 #ifdef HAVE_prefetch
5100 if (flags & LOOP_PREFETCH)
5101 emit_prefetch_instructions (loop);
5102 #endif
5103
5104 /* Now for each giv for which we still don't know whether or not it is
5105 replaceable, check to see if it is replaceable because its final value
5106 can be calculated. This must be done after loop_iterations is called,
5107 so that final_giv_value will work correctly. */
5108 loop_givs_check (loop);
5109
5110 /* Try to prove that the loop counter variable (if any) is always
5111 nonnegative; if so, record that fact with a REG_NONNEG note
5112 so that "decrement and branch until zero" insn can be used. */
5113 check_dbra_loop (loop, insn_count);
5114
5115 /* Create reg_map to hold substitutions for replaceable giv regs.
5116 Some givs might have been made from biv increments, so look at
5117 ivs->reg_iv_type for a suitable size. */
5118 reg_map_size = ivs->n_regs;
5119 reg_map = (rtx *) xcalloc (reg_map_size, sizeof (rtx));
5120
5121 /* Examine each iv class for feasibility of strength reduction/induction
5122 variable elimination. */
5123
5124 for (bl = ivs->list; bl; bl = bl->next)
5125 {
5126 struct induction *v;
5127 int benefit;
5128
5129 /* Test whether it will be possible to eliminate this biv
5130 provided all givs are reduced. */
5131 bl->eliminable = loop_biv_eliminable_p (loop, bl, threshold, insn_count);
5132
5133 /* This will be true at the end, if all givs which depend on this
5134 biv have been strength reduced.
5135 We can't (currently) eliminate the biv unless this is so. */
5136 bl->all_reduced = 1;
5137
5138 /* Check each extension dependent giv in this class to see if its
5139 root biv is safe from wrapping in the interior mode. */
5140 check_ext_dependent_givs (bl, loop_info);
5141
5142 /* Combine all giv's for this iv_class. */
5143 combine_givs (regs, bl);
5144
5145 for (v = bl->giv; v; v = v->next_iv)
5146 {
5147 struct induction *tv;
5148
5149 if (v->ignore || v->same)
5150 continue;
5151
5152 benefit = loop_giv_reduce_benefit (loop, bl, v, test_reg);
5153
5154 /* If an insn is not to be strength reduced, then set its ignore
5155 flag, and clear bl->all_reduced. */
5156
5157 /* A giv that depends on a reversed biv must be reduced if it is
5158 used after the loop exit, otherwise, it would have the wrong
5159 value after the loop exit. To make it simple, just reduce all
5160 of such giv's whether or not we know they are used after the loop
5161 exit. */
5162
5163 if (! flag_reduce_all_givs
5164 && v->lifetime * threshold * benefit < insn_count
5165 && ! bl->reversed)
5166 {
5167 if (loop_dump_stream)
5168 fprintf (loop_dump_stream,
5169 "giv of insn %d not worth while, %d vs %d.\n",
5170 INSN_UID (v->insn),
5171 v->lifetime * threshold * benefit, insn_count);
5172 v->ignore = 1;
5173 bl->all_reduced = 0;
5174 }
5175 else
5176 {
5177 /* Check that we can increment the reduced giv without a
5178 multiply insn. If not, reject it. */
5179
5180 for (tv = bl->biv; tv; tv = tv->next_iv)
5181 if (tv->mult_val == const1_rtx
5182 && ! product_cheap_p (tv->add_val, v->mult_val))
5183 {
5184 if (loop_dump_stream)
5185 fprintf (loop_dump_stream,
5186 "giv of insn %d: would need a multiply.\n",
5187 INSN_UID (v->insn));
5188 v->ignore = 1;
5189 bl->all_reduced = 0;
5190 break;
5191 }
5192 }
5193 }
5194
5195 /* Check for givs whose first use is their definition and whose
5196 last use is the definition of another giv. If so, it is likely
5197 dead and should not be used to derive another giv nor to
5198 eliminate a biv. */
5199 loop_givs_dead_check (loop, bl);
5200
5201 /* Reduce each giv that we decided to reduce. */
5202 loop_givs_reduce (loop, bl);
5203
5204 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
5205 as not reduced.
5206
5207 For each giv register that can be reduced now: if replaceable,
5208 substitute reduced reg wherever the old giv occurs;
5209 else add new move insn "giv_reg = reduced_reg". */
5210 loop_givs_rescan (loop, bl, reg_map);
5211
5212 /* All the givs based on the biv bl have been reduced if they
5213 merit it. */
5214
5215 /* For each giv not marked as maybe dead that has been combined with a
5216 second giv, clear any "maybe dead" mark on that second giv.
5217 v->new_reg will either be or refer to the register of the giv it
5218 combined with.
5219
5220 Doing this clearing avoids problems in biv elimination where
5221 a giv's new_reg is a complex value that can't be put in the
5222 insn but the giv combined with (with a reg as new_reg) is
5223 marked maybe_dead. Since the register will be used in either
5224 case, we'd prefer it be used from the simpler giv. */
5225
5226 for (v = bl->giv; v; v = v->next_iv)
5227 if (! v->maybe_dead && v->same)
5228 v->same->maybe_dead = 0;
5229
5230 /* Try to eliminate the biv, if it is a candidate.
5231 This won't work if ! bl->all_reduced,
5232 since the givs we planned to use might not have been reduced.
5233
5234 We have to be careful that we didn't initially think we could
5235 eliminate this biv because of a giv that we now think may be
5236 dead and shouldn't be used as a biv replacement.
5237
5238 Also, there is the possibility that we may have a giv that looks
5239 like it can be used to eliminate a biv, but the resulting insn
5240 isn't valid. This can happen, for example, on the 88k, where a
5241 JUMP_INSN can compare a register only with zero. Attempts to
5242 replace it with a compare with a constant will fail.
5243
5244 Note that in cases where this call fails, we may have replaced some
5245 of the occurrences of the biv with a giv, but no harm was done in
5246 doing so in the rare cases where it can occur. */
5247
5248 if (bl->all_reduced == 1 && bl->eliminable
5249 && maybe_eliminate_biv (loop, bl, 1, threshold, insn_count))
5250 {
5251 /* ?? If we created a new test to bypass the loop entirely,
5252 or otherwise drop straight in, based on this test, then
5253 we might want to rewrite it also. This way some later
5254 pass has more hope of removing the initialization of this
5255 biv entirely. */
5256
5257 /* If final_value != 0, then the biv may be used after loop end
5258 and we must emit an insn to set it just in case.
5259
5260 Reversed bivs already have an insn after the loop setting their
5261 value, so we don't need another one. We can't calculate the
5262 proper final value for such a biv here anyways. */
5263 if (bl->final_value && ! bl->reversed)
5264 loop_insn_sink_or_swim (loop,
5265 gen_load_of_final_value (bl->biv->dest_reg,
5266 bl->final_value));
5267
5268 if (loop_dump_stream)
5269 fprintf (loop_dump_stream, "Reg %d: biv eliminated\n",
5270 bl->regno);
5271 }
5272 /* See above note wrt final_value. But since we couldn't eliminate
5273 the biv, we must set the value after the loop instead of before. */
5274 else if (bl->final_value && ! bl->reversed)
5275 loop_insn_sink (loop, gen_load_of_final_value (bl->biv->dest_reg,
5276 bl->final_value));
5277 }
5278
5279 /* Go through all the instructions in the loop, making all the
5280 register substitutions scheduled in REG_MAP. */
5281
5282 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
5283 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5284 || GET_CODE (p) == CALL_INSN)
5285 {
5286 replace_regs (PATTERN (p), reg_map, reg_map_size, 0);
5287 replace_regs (REG_NOTES (p), reg_map, reg_map_size, 0);
5288 INSN_CODE (p) = -1;
5289 }
5290
5291 if (loop_info->n_iterations > 0)
5292 {
5293 /* When we completely unroll a loop we will likely not need the increment
5294 of the loop BIV and we will not need the conditional branch at the
5295 end of the loop. */
5296 unrolled_insn_copies = insn_count - 2;
5297
5298 #ifdef HAVE_cc0
5299 /* When we completely unroll a loop on a HAVE_cc0 machine we will not
5300 need the comparison before the conditional branch at the end of the
5301 loop. */
5302 unrolled_insn_copies -= 1;
5303 #endif
5304
5305 /* We'll need one copy for each loop iteration. */
5306 unrolled_insn_copies *= loop_info->n_iterations;
5307
5308 /* A little slop to account for the ability to remove initialization
5309 code, better CSE, and other secondary benefits of completely
5310 unrolling some loops. */
5311 unrolled_insn_copies -= 1;
5312
5313 /* Clamp the value. */
5314 if (unrolled_insn_copies < 0)
5315 unrolled_insn_copies = 0;
5316 }
5317
5318 /* Unroll loops from within strength reduction so that we can use the
5319 induction variable information that strength_reduce has already
5320 collected. Always unroll loops that would be as small or smaller
5321 unrolled than when rolled. */
5322 if ((flags & LOOP_UNROLL)
5323 || (!(flags & LOOP_FIRST_PASS)
5324 && loop_info->n_iterations > 0
5325 && unrolled_insn_copies <= insn_count))
5326 unroll_loop (loop, insn_count, 1);
5327
5328 #ifdef HAVE_doloop_end
5329 if (HAVE_doloop_end && (flags & LOOP_BCT) && flag_branch_on_count_reg)
5330 doloop_optimize (loop);
5331 #endif /* HAVE_doloop_end */
5332
5333 /* In case number of iterations is known, drop branch prediction note
5334 in the branch. Do that only in second loop pass, as loop unrolling
5335 may change the number of iterations performed. */
5336 if (flags & LOOP_BCT)
5337 {
5338 unsigned HOST_WIDE_INT n
5339 = loop_info->n_iterations / loop_info->unroll_number;
5340 if (n > 1)
5341 predict_insn (PREV_INSN (loop->end), PRED_LOOP_ITERATIONS,
5342 REG_BR_PROB_BASE - REG_BR_PROB_BASE / n);
5343 }
5344
5345 if (loop_dump_stream)
5346 fprintf (loop_dump_stream, "\n");
5347
5348 loop_ivs_free (loop);
5349 if (reg_map)
5350 free (reg_map);
5351 }
5352 \f
5353 /*Record all basic induction variables calculated in the insn. */
5354 static rtx
5355 check_insn_for_bivs (loop, p, not_every_iteration, maybe_multiple)
5356 struct loop *loop;
5357 rtx p;
5358 int not_every_iteration;
5359 int maybe_multiple;
5360 {
5361 struct loop_ivs *ivs = LOOP_IVS (loop);
5362 rtx set;
5363 rtx dest_reg;
5364 rtx inc_val;
5365 rtx mult_val;
5366 rtx *location;
5367
5368 if (GET_CODE (p) == INSN
5369 && (set = single_set (p))
5370 && GET_CODE (SET_DEST (set)) == REG)
5371 {
5372 dest_reg = SET_DEST (set);
5373 if (REGNO (dest_reg) < max_reg_before_loop
5374 && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER
5375 && REG_IV_TYPE (ivs, REGNO (dest_reg)) != NOT_BASIC_INDUCT)
5376 {
5377 if (basic_induction_var (loop, SET_SRC (set),
5378 GET_MODE (SET_SRC (set)),
5379 dest_reg, p, &inc_val, &mult_val,
5380 &location))
5381 {
5382 /* It is a possible basic induction variable.
5383 Create and initialize an induction structure for it. */
5384
5385 struct induction *v
5386 = (struct induction *) xmalloc (sizeof (struct induction));
5387
5388 record_biv (loop, v, p, dest_reg, inc_val, mult_val, location,
5389 not_every_iteration, maybe_multiple);
5390 REG_IV_TYPE (ivs, REGNO (dest_reg)) = BASIC_INDUCT;
5391 }
5392 else if (REGNO (dest_reg) < ivs->n_regs)
5393 REG_IV_TYPE (ivs, REGNO (dest_reg)) = NOT_BASIC_INDUCT;
5394 }
5395 }
5396 return p;
5397 }
5398 \f
5399 /* Record all givs calculated in the insn.
5400 A register is a giv if: it is only set once, it is a function of a
5401 biv and a constant (or invariant), and it is not a biv. */
5402 static rtx
5403 check_insn_for_givs (loop, p, not_every_iteration, maybe_multiple)
5404 struct loop *loop;
5405 rtx p;
5406 int not_every_iteration;
5407 int maybe_multiple;
5408 {
5409 struct loop_regs *regs = LOOP_REGS (loop);
5410
5411 rtx set;
5412 /* Look for a general induction variable in a register. */
5413 if (GET_CODE (p) == INSN
5414 && (set = single_set (p))
5415 && GET_CODE (SET_DEST (set)) == REG
5416 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
5417 {
5418 rtx src_reg;
5419 rtx dest_reg;
5420 rtx add_val;
5421 rtx mult_val;
5422 rtx ext_val;
5423 int benefit;
5424 rtx regnote = 0;
5425 rtx last_consec_insn;
5426
5427 dest_reg = SET_DEST (set);
5428 if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER)
5429 return p;
5430
5431 if (/* SET_SRC is a giv. */
5432 (general_induction_var (loop, SET_SRC (set), &src_reg, &add_val,
5433 &mult_val, &ext_val, 0, &benefit, VOIDmode)
5434 /* Equivalent expression is a giv. */
5435 || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX))
5436 && general_induction_var (loop, XEXP (regnote, 0), &src_reg,
5437 &add_val, &mult_val, &ext_val, 0,
5438 &benefit, VOIDmode)))
5439 /* Don't try to handle any regs made by loop optimization.
5440 We have nothing on them in regno_first_uid, etc. */
5441 && REGNO (dest_reg) < max_reg_before_loop
5442 /* Don't recognize a BASIC_INDUCT_VAR here. */
5443 && dest_reg != src_reg
5444 /* This must be the only place where the register is set. */
5445 && (regs->array[REGNO (dest_reg)].n_times_set == 1
5446 /* or all sets must be consecutive and make a giv. */
5447 || (benefit = consec_sets_giv (loop, benefit, p,
5448 src_reg, dest_reg,
5449 &add_val, &mult_val, &ext_val,
5450 &last_consec_insn))))
5451 {
5452 struct induction *v
5453 = (struct induction *) xmalloc (sizeof (struct induction));
5454
5455 /* If this is a library call, increase benefit. */
5456 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
5457 benefit += libcall_benefit (p);
5458
5459 /* Skip the consecutive insns, if there are any. */
5460 if (regs->array[REGNO (dest_reg)].n_times_set != 1)
5461 p = last_consec_insn;
5462
5463 record_giv (loop, v, p, src_reg, dest_reg, mult_val, add_val,
5464 ext_val, benefit, DEST_REG, not_every_iteration,
5465 maybe_multiple, (rtx*) 0);
5466
5467 }
5468 }
5469
5470 #ifndef DONT_REDUCE_ADDR
5471 /* Look for givs which are memory addresses. */
5472 /* This resulted in worse code on a VAX 8600. I wonder if it
5473 still does. */
5474 if (GET_CODE (p) == INSN)
5475 find_mem_givs (loop, PATTERN (p), p, not_every_iteration,
5476 maybe_multiple);
5477 #endif
5478
5479 /* Update the status of whether giv can derive other givs. This can
5480 change when we pass a label or an insn that updates a biv. */
5481 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5482 || GET_CODE (p) == CODE_LABEL)
5483 update_giv_derive (loop, p);
5484 return p;
5485 }
5486 \f
5487 /* Return 1 if X is a valid source for an initial value (or as value being
5488 compared against in an initial test).
5489
5490 X must be either a register or constant and must not be clobbered between
5491 the current insn and the start of the loop.
5492
5493 INSN is the insn containing X. */
5494
5495 static int
5496 valid_initial_value_p (x, insn, call_seen, loop_start)
5497 rtx x;
5498 rtx insn;
5499 int call_seen;
5500 rtx loop_start;
5501 {
5502 if (CONSTANT_P (x))
5503 return 1;
5504
5505 /* Only consider pseudos we know about initialized in insns whose luids
5506 we know. */
5507 if (GET_CODE (x) != REG
5508 || REGNO (x) >= max_reg_before_loop)
5509 return 0;
5510
5511 /* Don't use call-clobbered registers across a call which clobbers it. On
5512 some machines, don't use any hard registers at all. */
5513 if (REGNO (x) < FIRST_PSEUDO_REGISTER
5514 && (SMALL_REGISTER_CLASSES
5515 || (call_used_regs[REGNO (x)] && call_seen)))
5516 return 0;
5517
5518 /* Don't use registers that have been clobbered before the start of the
5519 loop. */
5520 if (reg_set_between_p (x, insn, loop_start))
5521 return 0;
5522
5523 return 1;
5524 }
5525 \f
5526 /* Scan X for memory refs and check each memory address
5527 as a possible giv. INSN is the insn whose pattern X comes from.
5528 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
5529 every loop iteration. MAYBE_MULTIPLE is 1 if the insn might be executed
5530 more thanonce in each loop iteration. */
5531
5532 static void
5533 find_mem_givs (loop, x, insn, not_every_iteration, maybe_multiple)
5534 const struct loop *loop;
5535 rtx x;
5536 rtx insn;
5537 int not_every_iteration, maybe_multiple;
5538 {
5539 int i, j;
5540 enum rtx_code code;
5541 const char *fmt;
5542
5543 if (x == 0)
5544 return;
5545
5546 code = GET_CODE (x);
5547 switch (code)
5548 {
5549 case REG:
5550 case CONST_INT:
5551 case CONST:
5552 case CONST_DOUBLE:
5553 case SYMBOL_REF:
5554 case LABEL_REF:
5555 case PC:
5556 case CC0:
5557 case ADDR_VEC:
5558 case ADDR_DIFF_VEC:
5559 case USE:
5560 case CLOBBER:
5561 return;
5562
5563 case MEM:
5564 {
5565 rtx src_reg;
5566 rtx add_val;
5567 rtx mult_val;
5568 rtx ext_val;
5569 int benefit;
5570
5571 /* This code used to disable creating GIVs with mult_val == 1 and
5572 add_val == 0. However, this leads to lost optimizations when
5573 it comes time to combine a set of related DEST_ADDR GIVs, since
5574 this one would not be seen. */
5575
5576 if (general_induction_var (loop, XEXP (x, 0), &src_reg, &add_val,
5577 &mult_val, &ext_val, 1, &benefit,
5578 GET_MODE (x)))
5579 {
5580 /* Found one; record it. */
5581 struct induction *v
5582 = (struct induction *) xmalloc (sizeof (struct induction));
5583
5584 record_giv (loop, v, insn, src_reg, addr_placeholder, mult_val,
5585 add_val, ext_val, benefit, DEST_ADDR,
5586 not_every_iteration, maybe_multiple, &XEXP (x, 0));
5587
5588 v->mem = x;
5589 }
5590 }
5591 return;
5592
5593 default:
5594 break;
5595 }
5596
5597 /* Recursively scan the subexpressions for other mem refs. */
5598
5599 fmt = GET_RTX_FORMAT (code);
5600 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5601 if (fmt[i] == 'e')
5602 find_mem_givs (loop, XEXP (x, i), insn, not_every_iteration,
5603 maybe_multiple);
5604 else if (fmt[i] == 'E')
5605 for (j = 0; j < XVECLEN (x, i); j++)
5606 find_mem_givs (loop, XVECEXP (x, i, j), insn, not_every_iteration,
5607 maybe_multiple);
5608 }
5609 \f
5610 /* Fill in the data about one biv update.
5611 V is the `struct induction' in which we record the biv. (It is
5612 allocated by the caller, with alloca.)
5613 INSN is the insn that sets it.
5614 DEST_REG is the biv's reg.
5615
5616 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
5617 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
5618 being set to INC_VAL.
5619
5620 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
5621 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
5622 can be executed more than once per iteration. If MAYBE_MULTIPLE
5623 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
5624 executed exactly once per iteration. */
5625
5626 static void
5627 record_biv (loop, v, insn, dest_reg, inc_val, mult_val, location,
5628 not_every_iteration, maybe_multiple)
5629 struct loop *loop;
5630 struct induction *v;
5631 rtx insn;
5632 rtx dest_reg;
5633 rtx inc_val;
5634 rtx mult_val;
5635 rtx *location;
5636 int not_every_iteration;
5637 int maybe_multiple;
5638 {
5639 struct loop_ivs *ivs = LOOP_IVS (loop);
5640 struct iv_class *bl;
5641
5642 v->insn = insn;
5643 v->src_reg = dest_reg;
5644 v->dest_reg = dest_reg;
5645 v->mult_val = mult_val;
5646 v->add_val = inc_val;
5647 v->ext_dependent = NULL_RTX;
5648 v->location = location;
5649 v->mode = GET_MODE (dest_reg);
5650 v->always_computable = ! not_every_iteration;
5651 v->always_executed = ! not_every_iteration;
5652 v->maybe_multiple = maybe_multiple;
5653
5654 /* Add this to the reg's iv_class, creating a class
5655 if this is the first incrementation of the reg. */
5656
5657 bl = REG_IV_CLASS (ivs, REGNO (dest_reg));
5658 if (bl == 0)
5659 {
5660 /* Create and initialize new iv_class. */
5661
5662 bl = (struct iv_class *) xmalloc (sizeof (struct iv_class));
5663
5664 bl->regno = REGNO (dest_reg);
5665 bl->biv = 0;
5666 bl->giv = 0;
5667 bl->biv_count = 0;
5668 bl->giv_count = 0;
5669
5670 /* Set initial value to the reg itself. */
5671 bl->initial_value = dest_reg;
5672 bl->final_value = 0;
5673 /* We haven't seen the initializing insn yet */
5674 bl->init_insn = 0;
5675 bl->init_set = 0;
5676 bl->initial_test = 0;
5677 bl->incremented = 0;
5678 bl->eliminable = 0;
5679 bl->nonneg = 0;
5680 bl->reversed = 0;
5681 bl->total_benefit = 0;
5682
5683 /* Add this class to ivs->list. */
5684 bl->next = ivs->list;
5685 ivs->list = bl;
5686
5687 /* Put it in the array of biv register classes. */
5688 REG_IV_CLASS (ivs, REGNO (dest_reg)) = bl;
5689 }
5690
5691 /* Update IV_CLASS entry for this biv. */
5692 v->next_iv = bl->biv;
5693 bl->biv = v;
5694 bl->biv_count++;
5695 if (mult_val == const1_rtx)
5696 bl->incremented = 1;
5697
5698 if (loop_dump_stream)
5699 loop_biv_dump (v, loop_dump_stream, 0);
5700 }
5701 \f
5702 /* Fill in the data about one giv.
5703 V is the `struct induction' in which we record the giv. (It is
5704 allocated by the caller, with alloca.)
5705 INSN is the insn that sets it.
5706 BENEFIT estimates the savings from deleting this insn.
5707 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
5708 into a register or is used as a memory address.
5709
5710 SRC_REG is the biv reg which the giv is computed from.
5711 DEST_REG is the giv's reg (if the giv is stored in a reg).
5712 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
5713 LOCATION points to the place where this giv's value appears in INSN. */
5714
5715 static void
5716 record_giv (loop, v, insn, src_reg, dest_reg, mult_val, add_val, ext_val,
5717 benefit, type, not_every_iteration, maybe_multiple, location)
5718 const struct loop *loop;
5719 struct induction *v;
5720 rtx insn;
5721 rtx src_reg;
5722 rtx dest_reg;
5723 rtx mult_val, add_val, ext_val;
5724 int benefit;
5725 enum g_types type;
5726 int not_every_iteration, maybe_multiple;
5727 rtx *location;
5728 {
5729 struct loop_ivs *ivs = LOOP_IVS (loop);
5730 struct induction *b;
5731 struct iv_class *bl;
5732 rtx set = single_set (insn);
5733 rtx temp;
5734
5735 /* Attempt to prove constantness of the values. Don't let simplity_rtx
5736 undo the MULT canonicalization that we performed earlier. */
5737 temp = simplify_rtx (add_val);
5738 if (temp
5739 && ! (GET_CODE (add_val) == MULT
5740 && GET_CODE (temp) == ASHIFT))
5741 add_val = temp;
5742
5743 v->insn = insn;
5744 v->src_reg = src_reg;
5745 v->giv_type = type;
5746 v->dest_reg = dest_reg;
5747 v->mult_val = mult_val;
5748 v->add_val = add_val;
5749 v->ext_dependent = ext_val;
5750 v->benefit = benefit;
5751 v->location = location;
5752 v->cant_derive = 0;
5753 v->combined_with = 0;
5754 v->maybe_multiple = maybe_multiple;
5755 v->maybe_dead = 0;
5756 v->derive_adjustment = 0;
5757 v->same = 0;
5758 v->ignore = 0;
5759 v->new_reg = 0;
5760 v->final_value = 0;
5761 v->same_insn = 0;
5762 v->auto_inc_opt = 0;
5763 v->unrolled = 0;
5764 v->shared = 0;
5765
5766 /* The v->always_computable field is used in update_giv_derive, to
5767 determine whether a giv can be used to derive another giv. For a
5768 DEST_REG giv, INSN computes a new value for the giv, so its value
5769 isn't computable if INSN insn't executed every iteration.
5770 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
5771 it does not compute a new value. Hence the value is always computable
5772 regardless of whether INSN is executed each iteration. */
5773
5774 if (type == DEST_ADDR)
5775 v->always_computable = 1;
5776 else
5777 v->always_computable = ! not_every_iteration;
5778
5779 v->always_executed = ! not_every_iteration;
5780
5781 if (type == DEST_ADDR)
5782 {
5783 v->mode = GET_MODE (*location);
5784 v->lifetime = 1;
5785 }
5786 else /* type == DEST_REG */
5787 {
5788 v->mode = GET_MODE (SET_DEST (set));
5789
5790 v->lifetime = LOOP_REG_LIFETIME (loop, REGNO (dest_reg));
5791
5792 /* If the lifetime is zero, it means that this register is
5793 really a dead store. So mark this as a giv that can be
5794 ignored. This will not prevent the biv from being eliminated. */
5795 if (v->lifetime == 0)
5796 v->ignore = 1;
5797
5798 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
5799 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
5800 }
5801
5802 /* Add the giv to the class of givs computed from one biv. */
5803
5804 bl = REG_IV_CLASS (ivs, REGNO (src_reg));
5805 if (bl)
5806 {
5807 v->next_iv = bl->giv;
5808 bl->giv = v;
5809 /* Don't count DEST_ADDR. This is supposed to count the number of
5810 insns that calculate givs. */
5811 if (type == DEST_REG)
5812 bl->giv_count++;
5813 bl->total_benefit += benefit;
5814 }
5815 else
5816 /* Fatal error, biv missing for this giv? */
5817 abort ();
5818
5819 if (type == DEST_ADDR)
5820 v->replaceable = 1;
5821 else
5822 {
5823 /* The giv can be replaced outright by the reduced register only if all
5824 of the following conditions are true:
5825 - the insn that sets the giv is always executed on any iteration
5826 on which the giv is used at all
5827 (there are two ways to deduce this:
5828 either the insn is executed on every iteration,
5829 or all uses follow that insn in the same basic block),
5830 - the giv is not used outside the loop
5831 - no assignments to the biv occur during the giv's lifetime. */
5832
5833 if (REGNO_FIRST_UID (REGNO (dest_reg)) == INSN_UID (insn)
5834 /* Previous line always fails if INSN was moved by loop opt. */
5835 && REGNO_LAST_LUID (REGNO (dest_reg))
5836 < INSN_LUID (loop->end)
5837 && (! not_every_iteration
5838 || last_use_this_basic_block (dest_reg, insn)))
5839 {
5840 /* Now check that there are no assignments to the biv within the
5841 giv's lifetime. This requires two separate checks. */
5842
5843 /* Check each biv update, and fail if any are between the first
5844 and last use of the giv.
5845
5846 If this loop contains an inner loop that was unrolled, then
5847 the insn modifying the biv may have been emitted by the loop
5848 unrolling code, and hence does not have a valid luid. Just
5849 mark the biv as not replaceable in this case. It is not very
5850 useful as a biv, because it is used in two different loops.
5851 It is very unlikely that we would be able to optimize the giv
5852 using this biv anyways. */
5853
5854 v->replaceable = 1;
5855 for (b = bl->biv; b; b = b->next_iv)
5856 {
5857 if (INSN_UID (b->insn) >= max_uid_for_loop
5858 || ((INSN_LUID (b->insn)
5859 >= REGNO_FIRST_LUID (REGNO (dest_reg)))
5860 && (INSN_LUID (b->insn)
5861 <= REGNO_LAST_LUID (REGNO (dest_reg)))))
5862 {
5863 v->replaceable = 0;
5864 v->not_replaceable = 1;
5865 break;
5866 }
5867 }
5868
5869 /* If there are any backwards branches that go from after the
5870 biv update to before it, then this giv is not replaceable. */
5871 if (v->replaceable)
5872 for (b = bl->biv; b; b = b->next_iv)
5873 if (back_branch_in_range_p (loop, b->insn))
5874 {
5875 v->replaceable = 0;
5876 v->not_replaceable = 1;
5877 break;
5878 }
5879 }
5880 else
5881 {
5882 /* May still be replaceable, we don't have enough info here to
5883 decide. */
5884 v->replaceable = 0;
5885 v->not_replaceable = 0;
5886 }
5887 }
5888
5889 /* Record whether the add_val contains a const_int, for later use by
5890 combine_givs. */
5891 {
5892 rtx tem = add_val;
5893
5894 v->no_const_addval = 1;
5895 if (tem == const0_rtx)
5896 ;
5897 else if (CONSTANT_P (add_val))
5898 v->no_const_addval = 0;
5899 if (GET_CODE (tem) == PLUS)
5900 {
5901 while (1)
5902 {
5903 if (GET_CODE (XEXP (tem, 0)) == PLUS)
5904 tem = XEXP (tem, 0);
5905 else if (GET_CODE (XEXP (tem, 1)) == PLUS)
5906 tem = XEXP (tem, 1);
5907 else
5908 break;
5909 }
5910 if (CONSTANT_P (XEXP (tem, 1)))
5911 v->no_const_addval = 0;
5912 }
5913 }
5914
5915 if (loop_dump_stream)
5916 loop_giv_dump (v, loop_dump_stream, 0);
5917 }
5918
5919 /* All this does is determine whether a giv can be made replaceable because
5920 its final value can be calculated. This code can not be part of record_giv
5921 above, because final_giv_value requires that the number of loop iterations
5922 be known, and that can not be accurately calculated until after all givs
5923 have been identified. */
5924
5925 static void
5926 check_final_value (loop, v)
5927 const struct loop *loop;
5928 struct induction *v;
5929 {
5930 struct loop_ivs *ivs = LOOP_IVS (loop);
5931 struct iv_class *bl;
5932 rtx final_value = 0;
5933
5934 bl = REG_IV_CLASS (ivs, REGNO (v->src_reg));
5935
5936 /* DEST_ADDR givs will never reach here, because they are always marked
5937 replaceable above in record_giv. */
5938
5939 /* The giv can be replaced outright by the reduced register only if all
5940 of the following conditions are true:
5941 - the insn that sets the giv is always executed on any iteration
5942 on which the giv is used at all
5943 (there are two ways to deduce this:
5944 either the insn is executed on every iteration,
5945 or all uses follow that insn in the same basic block),
5946 - its final value can be calculated (this condition is different
5947 than the one above in record_giv)
5948 - it's not used before the it's set
5949 - no assignments to the biv occur during the giv's lifetime. */
5950
5951 #if 0
5952 /* This is only called now when replaceable is known to be false. */
5953 /* Clear replaceable, so that it won't confuse final_giv_value. */
5954 v->replaceable = 0;
5955 #endif
5956
5957 if ((final_value = final_giv_value (loop, v))
5958 && (v->always_executed
5959 || last_use_this_basic_block (v->dest_reg, v->insn)))
5960 {
5961 int biv_increment_seen = 0, before_giv_insn = 0;
5962 rtx p = v->insn;
5963 rtx last_giv_use;
5964
5965 v->replaceable = 1;
5966
5967 /* When trying to determine whether or not a biv increment occurs
5968 during the lifetime of the giv, we can ignore uses of the variable
5969 outside the loop because final_value is true. Hence we can not
5970 use regno_last_uid and regno_first_uid as above in record_giv. */
5971
5972 /* Search the loop to determine whether any assignments to the
5973 biv occur during the giv's lifetime. Start with the insn
5974 that sets the giv, and search around the loop until we come
5975 back to that insn again.
5976
5977 Also fail if there is a jump within the giv's lifetime that jumps
5978 to somewhere outside the lifetime but still within the loop. This
5979 catches spaghetti code where the execution order is not linear, and
5980 hence the above test fails. Here we assume that the giv lifetime
5981 does not extend from one iteration of the loop to the next, so as
5982 to make the test easier. Since the lifetime isn't known yet,
5983 this requires two loops. See also record_giv above. */
5984
5985 last_giv_use = v->insn;
5986
5987 while (1)
5988 {
5989 p = NEXT_INSN (p);
5990 if (p == loop->end)
5991 {
5992 before_giv_insn = 1;
5993 p = NEXT_INSN (loop->start);
5994 }
5995 if (p == v->insn)
5996 break;
5997
5998 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5999 || GET_CODE (p) == CALL_INSN)
6000 {
6001 /* It is possible for the BIV increment to use the GIV if we
6002 have a cycle. Thus we must be sure to check each insn for
6003 both BIV and GIV uses, and we must check for BIV uses
6004 first. */
6005
6006 if (! biv_increment_seen
6007 && reg_set_p (v->src_reg, PATTERN (p)))
6008 biv_increment_seen = 1;
6009
6010 if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
6011 {
6012 if (biv_increment_seen || before_giv_insn)
6013 {
6014 v->replaceable = 0;
6015 v->not_replaceable = 1;
6016 break;
6017 }
6018 last_giv_use = p;
6019 }
6020 }
6021 }
6022
6023 /* Now that the lifetime of the giv is known, check for branches
6024 from within the lifetime to outside the lifetime if it is still
6025 replaceable. */
6026
6027 if (v->replaceable)
6028 {
6029 p = v->insn;
6030 while (1)
6031 {
6032 p = NEXT_INSN (p);
6033 if (p == loop->end)
6034 p = NEXT_INSN (loop->start);
6035 if (p == last_giv_use)
6036 break;
6037
6038 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
6039 && LABEL_NAME (JUMP_LABEL (p))
6040 && ((loop_insn_first_p (JUMP_LABEL (p), v->insn)
6041 && loop_insn_first_p (loop->start, JUMP_LABEL (p)))
6042 || (loop_insn_first_p (last_giv_use, JUMP_LABEL (p))
6043 && loop_insn_first_p (JUMP_LABEL (p), loop->end))))
6044 {
6045 v->replaceable = 0;
6046 v->not_replaceable = 1;
6047
6048 if (loop_dump_stream)
6049 fprintf (loop_dump_stream,
6050 "Found branch outside giv lifetime.\n");
6051
6052 break;
6053 }
6054 }
6055 }
6056
6057 /* If it is replaceable, then save the final value. */
6058 if (v->replaceable)
6059 v->final_value = final_value;
6060 }
6061
6062 if (loop_dump_stream && v->replaceable)
6063 fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n",
6064 INSN_UID (v->insn), REGNO (v->dest_reg));
6065 }
6066 \f
6067 /* Update the status of whether a giv can derive other givs.
6068
6069 We need to do something special if there is or may be an update to the biv
6070 between the time the giv is defined and the time it is used to derive
6071 another giv.
6072
6073 In addition, a giv that is only conditionally set is not allowed to
6074 derive another giv once a label has been passed.
6075
6076 The cases we look at are when a label or an update to a biv is passed. */
6077
6078 static void
6079 update_giv_derive (loop, p)
6080 const struct loop *loop;
6081 rtx p;
6082 {
6083 struct loop_ivs *ivs = LOOP_IVS (loop);
6084 struct iv_class *bl;
6085 struct induction *biv, *giv;
6086 rtx tem;
6087 int dummy;
6088
6089 /* Search all IV classes, then all bivs, and finally all givs.
6090
6091 There are three cases we are concerned with. First we have the situation
6092 of a giv that is only updated conditionally. In that case, it may not
6093 derive any givs after a label is passed.
6094
6095 The second case is when a biv update occurs, or may occur, after the
6096 definition of a giv. For certain biv updates (see below) that are
6097 known to occur between the giv definition and use, we can adjust the
6098 giv definition. For others, or when the biv update is conditional,
6099 we must prevent the giv from deriving any other givs. There are two
6100 sub-cases within this case.
6101
6102 If this is a label, we are concerned with any biv update that is done
6103 conditionally, since it may be done after the giv is defined followed by
6104 a branch here (actually, we need to pass both a jump and a label, but
6105 this extra tracking doesn't seem worth it).
6106
6107 If this is a jump, we are concerned about any biv update that may be
6108 executed multiple times. We are actually only concerned about
6109 backward jumps, but it is probably not worth performing the test
6110 on the jump again here.
6111
6112 If this is a biv update, we must adjust the giv status to show that a
6113 subsequent biv update was performed. If this adjustment cannot be done,
6114 the giv cannot derive further givs. */
6115
6116 for (bl = ivs->list; bl; bl = bl->next)
6117 for (biv = bl->biv; biv; biv = biv->next_iv)
6118 if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN
6119 || biv->insn == p)
6120 {
6121 for (giv = bl->giv; giv; giv = giv->next_iv)
6122 {
6123 /* If cant_derive is already true, there is no point in
6124 checking all of these conditions again. */
6125 if (giv->cant_derive)
6126 continue;
6127
6128 /* If this giv is conditionally set and we have passed a label,
6129 it cannot derive anything. */
6130 if (GET_CODE (p) == CODE_LABEL && ! giv->always_computable)
6131 giv->cant_derive = 1;
6132
6133 /* Skip givs that have mult_val == 0, since
6134 they are really invariants. Also skip those that are
6135 replaceable, since we know their lifetime doesn't contain
6136 any biv update. */
6137 else if (giv->mult_val == const0_rtx || giv->replaceable)
6138 continue;
6139
6140 /* The only way we can allow this giv to derive another
6141 is if this is a biv increment and we can form the product
6142 of biv->add_val and giv->mult_val. In this case, we will
6143 be able to compute a compensation. */
6144 else if (biv->insn == p)
6145 {
6146 rtx ext_val_dummy;
6147
6148 tem = 0;
6149 if (biv->mult_val == const1_rtx)
6150 tem = simplify_giv_expr (loop,
6151 gen_rtx_MULT (giv->mode,
6152 biv->add_val,
6153 giv->mult_val),
6154 &ext_val_dummy, &dummy);
6155
6156 if (tem && giv->derive_adjustment)
6157 tem = simplify_giv_expr
6158 (loop,
6159 gen_rtx_PLUS (giv->mode, tem, giv->derive_adjustment),
6160 &ext_val_dummy, &dummy);
6161
6162 if (tem)
6163 giv->derive_adjustment = tem;
6164 else
6165 giv->cant_derive = 1;
6166 }
6167 else if ((GET_CODE (p) == CODE_LABEL && ! biv->always_computable)
6168 || (GET_CODE (p) == JUMP_INSN && biv->maybe_multiple))
6169 giv->cant_derive = 1;
6170 }
6171 }
6172 }
6173 \f
6174 /* Check whether an insn is an increment legitimate for a basic induction var.
6175 X is the source of insn P, or a part of it.
6176 MODE is the mode in which X should be interpreted.
6177
6178 DEST_REG is the putative biv, also the destination of the insn.
6179 We accept patterns of these forms:
6180 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
6181 REG = INVARIANT + REG
6182
6183 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
6184 store the additive term into *INC_VAL, and store the place where
6185 we found the additive term into *LOCATION.
6186
6187 If X is an assignment of an invariant into DEST_REG, we set
6188 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
6189
6190 We also want to detect a BIV when it corresponds to a variable
6191 whose mode was promoted via PROMOTED_MODE. In that case, an increment
6192 of the variable may be a PLUS that adds a SUBREG of that variable to
6193 an invariant and then sign- or zero-extends the result of the PLUS
6194 into the variable.
6195
6196 Most GIVs in such cases will be in the promoted mode, since that is the
6197 probably the natural computation mode (and almost certainly the mode
6198 used for addresses) on the machine. So we view the pseudo-reg containing
6199 the variable as the BIV, as if it were simply incremented.
6200
6201 Note that treating the entire pseudo as a BIV will result in making
6202 simple increments to any GIVs based on it. However, if the variable
6203 overflows in its declared mode but not its promoted mode, the result will
6204 be incorrect. This is acceptable if the variable is signed, since
6205 overflows in such cases are undefined, but not if it is unsigned, since
6206 those overflows are defined. So we only check for SIGN_EXTEND and
6207 not ZERO_EXTEND.
6208
6209 If we cannot find a biv, we return 0. */
6210
6211 static int
6212 basic_induction_var (loop, x, mode, dest_reg, p, inc_val, mult_val, location)
6213 const struct loop *loop;
6214 rtx x;
6215 enum machine_mode mode;
6216 rtx dest_reg;
6217 rtx p;
6218 rtx *inc_val;
6219 rtx *mult_val;
6220 rtx **location;
6221 {
6222 enum rtx_code code;
6223 rtx *argp, arg;
6224 rtx insn, set = 0;
6225
6226 code = GET_CODE (x);
6227 *location = NULL;
6228 switch (code)
6229 {
6230 case PLUS:
6231 if (rtx_equal_p (XEXP (x, 0), dest_reg)
6232 || (GET_CODE (XEXP (x, 0)) == SUBREG
6233 && SUBREG_PROMOTED_VAR_P (XEXP (x, 0))
6234 && SUBREG_REG (XEXP (x, 0)) == dest_reg))
6235 {
6236 argp = &XEXP (x, 1);
6237 }
6238 else if (rtx_equal_p (XEXP (x, 1), dest_reg)
6239 || (GET_CODE (XEXP (x, 1)) == SUBREG
6240 && SUBREG_PROMOTED_VAR_P (XEXP (x, 1))
6241 && SUBREG_REG (XEXP (x, 1)) == dest_reg))
6242 {
6243 argp = &XEXP (x, 0);
6244 }
6245 else
6246 return 0;
6247
6248 arg = *argp;
6249 if (loop_invariant_p (loop, arg) != 1)
6250 return 0;
6251
6252 *inc_val = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
6253 *mult_val = const1_rtx;
6254 *location = argp;
6255 return 1;
6256
6257 case SUBREG:
6258 /* If what's inside the SUBREG is a BIV, then the SUBREG. This will
6259 handle addition of promoted variables.
6260 ??? The comment at the start of this function is wrong: promoted
6261 variable increments don't look like it says they do. */
6262 return basic_induction_var (loop, SUBREG_REG (x),
6263 GET_MODE (SUBREG_REG (x)),
6264 dest_reg, p, inc_val, mult_val, location);
6265
6266 case REG:
6267 /* If this register is assigned in a previous insn, look at its
6268 source, but don't go outside the loop or past a label. */
6269
6270 /* If this sets a register to itself, we would repeat any previous
6271 biv increment if we applied this strategy blindly. */
6272 if (rtx_equal_p (dest_reg, x))
6273 return 0;
6274
6275 insn = p;
6276 while (1)
6277 {
6278 rtx dest;
6279 do
6280 {
6281 insn = PREV_INSN (insn);
6282 }
6283 while (insn && GET_CODE (insn) == NOTE
6284 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
6285
6286 if (!insn)
6287 break;
6288 set = single_set (insn);
6289 if (set == 0)
6290 break;
6291 dest = SET_DEST (set);
6292 if (dest == x
6293 || (GET_CODE (dest) == SUBREG
6294 && (GET_MODE_SIZE (GET_MODE (dest)) <= UNITS_PER_WORD)
6295 && (GET_MODE_CLASS (GET_MODE (dest)) == MODE_INT)
6296 && SUBREG_REG (dest) == x))
6297 return basic_induction_var (loop, SET_SRC (set),
6298 (GET_MODE (SET_SRC (set)) == VOIDmode
6299 ? GET_MODE (x)
6300 : GET_MODE (SET_SRC (set))),
6301 dest_reg, insn,
6302 inc_val, mult_val, location);
6303
6304 while (GET_CODE (dest) == SIGN_EXTRACT
6305 || GET_CODE (dest) == ZERO_EXTRACT
6306 || GET_CODE (dest) == SUBREG
6307 || GET_CODE (dest) == STRICT_LOW_PART)
6308 dest = XEXP (dest, 0);
6309 if (dest == x)
6310 break;
6311 }
6312 /* Fall through. */
6313
6314 /* Can accept constant setting of biv only when inside inner most loop.
6315 Otherwise, a biv of an inner loop may be incorrectly recognized
6316 as a biv of the outer loop,
6317 causing code to be moved INTO the inner loop. */
6318 case MEM:
6319 if (loop_invariant_p (loop, x) != 1)
6320 return 0;
6321 case CONST_INT:
6322 case SYMBOL_REF:
6323 case CONST:
6324 /* convert_modes aborts if we try to convert to or from CCmode, so just
6325 exclude that case. It is very unlikely that a condition code value
6326 would be a useful iterator anyways. convert_modes aborts if we try to
6327 convert a float mode to non-float or vice versa too. */
6328 if (loop->level == 1
6329 && GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (dest_reg))
6330 && GET_MODE_CLASS (mode) != MODE_CC)
6331 {
6332 /* Possible bug here? Perhaps we don't know the mode of X. */
6333 *inc_val = convert_modes (GET_MODE (dest_reg), mode, x, 0);
6334 *mult_val = const0_rtx;
6335 return 1;
6336 }
6337 else
6338 return 0;
6339
6340 case SIGN_EXTEND:
6341 return basic_induction_var (loop, XEXP (x, 0), GET_MODE (XEXP (x, 0)),
6342 dest_reg, p, inc_val, mult_val, location);
6343
6344 case ASHIFTRT:
6345 /* Similar, since this can be a sign extension. */
6346 for (insn = PREV_INSN (p);
6347 (insn && GET_CODE (insn) == NOTE
6348 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
6349 insn = PREV_INSN (insn))
6350 ;
6351
6352 if (insn)
6353 set = single_set (insn);
6354
6355 if (! rtx_equal_p (dest_reg, XEXP (x, 0))
6356 && set && SET_DEST (set) == XEXP (x, 0)
6357 && GET_CODE (XEXP (x, 1)) == CONST_INT
6358 && INTVAL (XEXP (x, 1)) >= 0
6359 && GET_CODE (SET_SRC (set)) == ASHIFT
6360 && XEXP (x, 1) == XEXP (SET_SRC (set), 1))
6361 return basic_induction_var (loop, XEXP (SET_SRC (set), 0),
6362 GET_MODE (XEXP (x, 0)),
6363 dest_reg, insn, inc_val, mult_val,
6364 location);
6365 return 0;
6366
6367 default:
6368 return 0;
6369 }
6370 }
6371 \f
6372 /* A general induction variable (giv) is any quantity that is a linear
6373 function of a basic induction variable,
6374 i.e. giv = biv * mult_val + add_val.
6375 The coefficients can be any loop invariant quantity.
6376 A giv need not be computed directly from the biv;
6377 it can be computed by way of other givs. */
6378
6379 /* Determine whether X computes a giv.
6380 If it does, return a nonzero value
6381 which is the benefit from eliminating the computation of X;
6382 set *SRC_REG to the register of the biv that it is computed from;
6383 set *ADD_VAL and *MULT_VAL to the coefficients,
6384 such that the value of X is biv * mult + add; */
6385
6386 static int
6387 general_induction_var (loop, x, src_reg, add_val, mult_val, ext_val,
6388 is_addr, pbenefit, addr_mode)
6389 const struct loop *loop;
6390 rtx x;
6391 rtx *src_reg;
6392 rtx *add_val;
6393 rtx *mult_val;
6394 rtx *ext_val;
6395 int is_addr;
6396 int *pbenefit;
6397 enum machine_mode addr_mode;
6398 {
6399 struct loop_ivs *ivs = LOOP_IVS (loop);
6400 rtx orig_x = x;
6401
6402 /* If this is an invariant, forget it, it isn't a giv. */
6403 if (loop_invariant_p (loop, x) == 1)
6404 return 0;
6405
6406 *pbenefit = 0;
6407 *ext_val = NULL_RTX;
6408 x = simplify_giv_expr (loop, x, ext_val, pbenefit);
6409 if (x == 0)
6410 return 0;
6411
6412 switch (GET_CODE (x))
6413 {
6414 case USE:
6415 case CONST_INT:
6416 /* Since this is now an invariant and wasn't before, it must be a giv
6417 with MULT_VAL == 0. It doesn't matter which BIV we associate this
6418 with. */
6419 *src_reg = ivs->list->biv->dest_reg;
6420 *mult_val = const0_rtx;
6421 *add_val = x;
6422 break;
6423
6424 case REG:
6425 /* This is equivalent to a BIV. */
6426 *src_reg = x;
6427 *mult_val = const1_rtx;
6428 *add_val = const0_rtx;
6429 break;
6430
6431 case PLUS:
6432 /* Either (plus (biv) (invar)) or
6433 (plus (mult (biv) (invar_1)) (invar_2)). */
6434 if (GET_CODE (XEXP (x, 0)) == MULT)
6435 {
6436 *src_reg = XEXP (XEXP (x, 0), 0);
6437 *mult_val = XEXP (XEXP (x, 0), 1);
6438 }
6439 else
6440 {
6441 *src_reg = XEXP (x, 0);
6442 *mult_val = const1_rtx;
6443 }
6444 *add_val = XEXP (x, 1);
6445 break;
6446
6447 case MULT:
6448 /* ADD_VAL is zero. */
6449 *src_reg = XEXP (x, 0);
6450 *mult_val = XEXP (x, 1);
6451 *add_val = const0_rtx;
6452 break;
6453
6454 default:
6455 abort ();
6456 }
6457
6458 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
6459 unless they are CONST_INT). */
6460 if (GET_CODE (*add_val) == USE)
6461 *add_val = XEXP (*add_val, 0);
6462 if (GET_CODE (*mult_val) == USE)
6463 *mult_val = XEXP (*mult_val, 0);
6464
6465 if (is_addr)
6466 *pbenefit += address_cost (orig_x, addr_mode) - reg_address_cost;
6467 else
6468 *pbenefit += rtx_cost (orig_x, SET);
6469
6470 /* Always return true if this is a giv so it will be detected as such,
6471 even if the benefit is zero or negative. This allows elimination
6472 of bivs that might otherwise not be eliminated. */
6473 return 1;
6474 }
6475 \f
6476 /* Given an expression, X, try to form it as a linear function of a biv.
6477 We will canonicalize it to be of the form
6478 (plus (mult (BIV) (invar_1))
6479 (invar_2))
6480 with possible degeneracies.
6481
6482 The invariant expressions must each be of a form that can be used as a
6483 machine operand. We surround then with a USE rtx (a hack, but localized
6484 and certainly unambiguous!) if not a CONST_INT for simplicity in this
6485 routine; it is the caller's responsibility to strip them.
6486
6487 If no such canonicalization is possible (i.e., two biv's are used or an
6488 expression that is neither invariant nor a biv or giv), this routine
6489 returns 0.
6490
6491 For a non-zero return, the result will have a code of CONST_INT, USE,
6492 REG (for a BIV), PLUS, or MULT. No other codes will occur.
6493
6494 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
6495
6496 static rtx sge_plus PARAMS ((enum machine_mode, rtx, rtx));
6497 static rtx sge_plus_constant PARAMS ((rtx, rtx));
6498
6499 static rtx
6500 simplify_giv_expr (loop, x, ext_val, benefit)
6501 const struct loop *loop;
6502 rtx x;
6503 rtx *ext_val;
6504 int *benefit;
6505 {
6506 struct loop_ivs *ivs = LOOP_IVS (loop);
6507 struct loop_regs *regs = LOOP_REGS (loop);
6508 enum machine_mode mode = GET_MODE (x);
6509 rtx arg0, arg1;
6510 rtx tem;
6511
6512 /* If this is not an integer mode, or if we cannot do arithmetic in this
6513 mode, this can't be a giv. */
6514 if (mode != VOIDmode
6515 && (GET_MODE_CLASS (mode) != MODE_INT
6516 || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT))
6517 return NULL_RTX;
6518
6519 switch (GET_CODE (x))
6520 {
6521 case PLUS:
6522 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6523 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
6524 if (arg0 == 0 || arg1 == 0)
6525 return NULL_RTX;
6526
6527 /* Put constant last, CONST_INT last if both constant. */
6528 if ((GET_CODE (arg0) == USE
6529 || GET_CODE (arg0) == CONST_INT)
6530 && ! ((GET_CODE (arg0) == USE
6531 && GET_CODE (arg1) == USE)
6532 || GET_CODE (arg1) == CONST_INT))
6533 tem = arg0, arg0 = arg1, arg1 = tem;
6534
6535 /* Handle addition of zero, then addition of an invariant. */
6536 if (arg1 == const0_rtx)
6537 return arg0;
6538 else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE)
6539 switch (GET_CODE (arg0))
6540 {
6541 case CONST_INT:
6542 case USE:
6543 /* Adding two invariants must result in an invariant, so enclose
6544 addition operation inside a USE and return it. */
6545 if (GET_CODE (arg0) == USE)
6546 arg0 = XEXP (arg0, 0);
6547 if (GET_CODE (arg1) == USE)
6548 arg1 = XEXP (arg1, 0);
6549
6550 if (GET_CODE (arg0) == CONST_INT)
6551 tem = arg0, arg0 = arg1, arg1 = tem;
6552 if (GET_CODE (arg1) == CONST_INT)
6553 tem = sge_plus_constant (arg0, arg1);
6554 else
6555 tem = sge_plus (mode, arg0, arg1);
6556
6557 if (GET_CODE (tem) != CONST_INT)
6558 tem = gen_rtx_USE (mode, tem);
6559 return tem;
6560
6561 case REG:
6562 case MULT:
6563 /* biv + invar or mult + invar. Return sum. */
6564 return gen_rtx_PLUS (mode, arg0, arg1);
6565
6566 case PLUS:
6567 /* (a + invar_1) + invar_2. Associate. */
6568 return
6569 simplify_giv_expr (loop,
6570 gen_rtx_PLUS (mode,
6571 XEXP (arg0, 0),
6572 gen_rtx_PLUS (mode,
6573 XEXP (arg0, 1),
6574 arg1)),
6575 ext_val, benefit);
6576
6577 default:
6578 abort ();
6579 }
6580
6581 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
6582 MULT to reduce cases. */
6583 if (GET_CODE (arg0) == REG)
6584 arg0 = gen_rtx_MULT (mode, arg0, const1_rtx);
6585 if (GET_CODE (arg1) == REG)
6586 arg1 = gen_rtx_MULT (mode, arg1, const1_rtx);
6587
6588 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
6589 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
6590 Recurse to associate the second PLUS. */
6591 if (GET_CODE (arg1) == MULT)
6592 tem = arg0, arg0 = arg1, arg1 = tem;
6593
6594 if (GET_CODE (arg1) == PLUS)
6595 return
6596 simplify_giv_expr (loop,
6597 gen_rtx_PLUS (mode,
6598 gen_rtx_PLUS (mode, arg0,
6599 XEXP (arg1, 0)),
6600 XEXP (arg1, 1)),
6601 ext_val, benefit);
6602
6603 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
6604 if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT)
6605 return NULL_RTX;
6606
6607 if (!rtx_equal_p (arg0, arg1))
6608 return NULL_RTX;
6609
6610 return simplify_giv_expr (loop,
6611 gen_rtx_MULT (mode,
6612 XEXP (arg0, 0),
6613 gen_rtx_PLUS (mode,
6614 XEXP (arg0, 1),
6615 XEXP (arg1, 1))),
6616 ext_val, benefit);
6617
6618 case MINUS:
6619 /* Handle "a - b" as "a + b * (-1)". */
6620 return simplify_giv_expr (loop,
6621 gen_rtx_PLUS (mode,
6622 XEXP (x, 0),
6623 gen_rtx_MULT (mode,
6624 XEXP (x, 1),
6625 constm1_rtx)),
6626 ext_val, benefit);
6627
6628 case MULT:
6629 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6630 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
6631 if (arg0 == 0 || arg1 == 0)
6632 return NULL_RTX;
6633
6634 /* Put constant last, CONST_INT last if both constant. */
6635 if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT)
6636 && GET_CODE (arg1) != CONST_INT)
6637 tem = arg0, arg0 = arg1, arg1 = tem;
6638
6639 /* If second argument is not now constant, not giv. */
6640 if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT)
6641 return NULL_RTX;
6642
6643 /* Handle multiply by 0 or 1. */
6644 if (arg1 == const0_rtx)
6645 return const0_rtx;
6646
6647 else if (arg1 == const1_rtx)
6648 return arg0;
6649
6650 switch (GET_CODE (arg0))
6651 {
6652 case REG:
6653 /* biv * invar. Done. */
6654 return gen_rtx_MULT (mode, arg0, arg1);
6655
6656 case CONST_INT:
6657 /* Product of two constants. */
6658 return GEN_INT (INTVAL (arg0) * INTVAL (arg1));
6659
6660 case USE:
6661 /* invar * invar is a giv, but attempt to simplify it somehow. */
6662 if (GET_CODE (arg1) != CONST_INT)
6663 return NULL_RTX;
6664
6665 arg0 = XEXP (arg0, 0);
6666 if (GET_CODE (arg0) == MULT)
6667 {
6668 /* (invar_0 * invar_1) * invar_2. Associate. */
6669 return simplify_giv_expr (loop,
6670 gen_rtx_MULT (mode,
6671 XEXP (arg0, 0),
6672 gen_rtx_MULT (mode,
6673 XEXP (arg0,
6674 1),
6675 arg1)),
6676 ext_val, benefit);
6677 }
6678 /* Porpagate the MULT expressions to the intermost nodes. */
6679 else if (GET_CODE (arg0) == PLUS)
6680 {
6681 /* (invar_0 + invar_1) * invar_2. Distribute. */
6682 return simplify_giv_expr (loop,
6683 gen_rtx_PLUS (mode,
6684 gen_rtx_MULT (mode,
6685 XEXP (arg0,
6686 0),
6687 arg1),
6688 gen_rtx_MULT (mode,
6689 XEXP (arg0,
6690 1),
6691 arg1)),
6692 ext_val, benefit);
6693 }
6694 return gen_rtx_USE (mode, gen_rtx_MULT (mode, arg0, arg1));
6695
6696 case MULT:
6697 /* (a * invar_1) * invar_2. Associate. */
6698 return simplify_giv_expr (loop,
6699 gen_rtx_MULT (mode,
6700 XEXP (arg0, 0),
6701 gen_rtx_MULT (mode,
6702 XEXP (arg0, 1),
6703 arg1)),
6704 ext_val, benefit);
6705
6706 case PLUS:
6707 /* (a + invar_1) * invar_2. Distribute. */
6708 return simplify_giv_expr (loop,
6709 gen_rtx_PLUS (mode,
6710 gen_rtx_MULT (mode,
6711 XEXP (arg0, 0),
6712 arg1),
6713 gen_rtx_MULT (mode,
6714 XEXP (arg0, 1),
6715 arg1)),
6716 ext_val, benefit);
6717
6718 default:
6719 abort ();
6720 }
6721
6722 case ASHIFT:
6723 /* Shift by constant is multiply by power of two. */
6724 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
6725 return 0;
6726
6727 return
6728 simplify_giv_expr (loop,
6729 gen_rtx_MULT (mode,
6730 XEXP (x, 0),
6731 GEN_INT ((HOST_WIDE_INT) 1
6732 << INTVAL (XEXP (x, 1)))),
6733 ext_val, benefit);
6734
6735 case NEG:
6736 /* "-a" is "a * (-1)" */
6737 return simplify_giv_expr (loop,
6738 gen_rtx_MULT (mode, XEXP (x, 0), constm1_rtx),
6739 ext_val, benefit);
6740
6741 case NOT:
6742 /* "~a" is "-a - 1". Silly, but easy. */
6743 return simplify_giv_expr (loop,
6744 gen_rtx_MINUS (mode,
6745 gen_rtx_NEG (mode, XEXP (x, 0)),
6746 const1_rtx),
6747 ext_val, benefit);
6748
6749 case USE:
6750 /* Already in proper form for invariant. */
6751 return x;
6752
6753 case SIGN_EXTEND:
6754 case ZERO_EXTEND:
6755 case TRUNCATE:
6756 /* Conditionally recognize extensions of simple IVs. After we've
6757 computed loop traversal counts and verified the range of the
6758 source IV, we'll reevaluate this as a GIV. */
6759 if (*ext_val == NULL_RTX)
6760 {
6761 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6762 if (arg0 && *ext_val == NULL_RTX && GET_CODE (arg0) == REG)
6763 {
6764 *ext_val = gen_rtx_fmt_e (GET_CODE (x), mode, arg0);
6765 return arg0;
6766 }
6767 }
6768 goto do_default;
6769
6770 case REG:
6771 /* If this is a new register, we can't deal with it. */
6772 if (REGNO (x) >= max_reg_before_loop)
6773 return 0;
6774
6775 /* Check for biv or giv. */
6776 switch (REG_IV_TYPE (ivs, REGNO (x)))
6777 {
6778 case BASIC_INDUCT:
6779 return x;
6780 case GENERAL_INDUCT:
6781 {
6782 struct induction *v = REG_IV_INFO (ivs, REGNO (x));
6783
6784 /* Form expression from giv and add benefit. Ensure this giv
6785 can derive another and subtract any needed adjustment if so. */
6786
6787 /* Increasing the benefit here is risky. The only case in which it
6788 is arguably correct is if this is the only use of V. In other
6789 cases, this will artificially inflate the benefit of the current
6790 giv, and lead to suboptimal code. Thus, it is disabled, since
6791 potentially not reducing an only marginally beneficial giv is
6792 less harmful than reducing many givs that are not really
6793 beneficial. */
6794 {
6795 rtx single_use = regs->array[REGNO (x)].single_usage;
6796 if (single_use && single_use != const0_rtx)
6797 *benefit += v->benefit;
6798 }
6799
6800 if (v->cant_derive)
6801 return 0;
6802
6803 tem = gen_rtx_PLUS (mode, gen_rtx_MULT (mode,
6804 v->src_reg, v->mult_val),
6805 v->add_val);
6806
6807 if (v->derive_adjustment)
6808 tem = gen_rtx_MINUS (mode, tem, v->derive_adjustment);
6809 arg0 = simplify_giv_expr (loop, tem, ext_val, benefit);
6810 if (*ext_val)
6811 {
6812 if (!v->ext_dependent)
6813 return arg0;
6814 }
6815 else
6816 {
6817 *ext_val = v->ext_dependent;
6818 return arg0;
6819 }
6820 return 0;
6821 }
6822
6823 default:
6824 do_default:
6825 /* If it isn't an induction variable, and it is invariant, we
6826 may be able to simplify things further by looking through
6827 the bits we just moved outside the loop. */
6828 if (loop_invariant_p (loop, x) == 1)
6829 {
6830 struct movable *m;
6831 struct loop_movables *movables = LOOP_MOVABLES (loop);
6832
6833 for (m = movables->head; m; m = m->next)
6834 if (rtx_equal_p (x, m->set_dest))
6835 {
6836 /* Ok, we found a match. Substitute and simplify. */
6837
6838 /* If we match another movable, we must use that, as
6839 this one is going away. */
6840 if (m->match)
6841 return simplify_giv_expr (loop, m->match->set_dest,
6842 ext_val, benefit);
6843
6844 /* If consec is non-zero, this is a member of a group of
6845 instructions that were moved together. We handle this
6846 case only to the point of seeking to the last insn and
6847 looking for a REG_EQUAL. Fail if we don't find one. */
6848 if (m->consec != 0)
6849 {
6850 int i = m->consec;
6851 tem = m->insn;
6852 do
6853 {
6854 tem = NEXT_INSN (tem);
6855 }
6856 while (--i > 0);
6857
6858 tem = find_reg_note (tem, REG_EQUAL, NULL_RTX);
6859 if (tem)
6860 tem = XEXP (tem, 0);
6861 }
6862 else
6863 {
6864 tem = single_set (m->insn);
6865 if (tem)
6866 tem = SET_SRC (tem);
6867 }
6868
6869 if (tem)
6870 {
6871 /* What we are most interested in is pointer
6872 arithmetic on invariants -- only take
6873 patterns we may be able to do something with. */
6874 if (GET_CODE (tem) == PLUS
6875 || GET_CODE (tem) == MULT
6876 || GET_CODE (tem) == ASHIFT
6877 || GET_CODE (tem) == CONST_INT
6878 || GET_CODE (tem) == SYMBOL_REF)
6879 {
6880 tem = simplify_giv_expr (loop, tem, ext_val,
6881 benefit);
6882 if (tem)
6883 return tem;
6884 }
6885 else if (GET_CODE (tem) == CONST
6886 && GET_CODE (XEXP (tem, 0)) == PLUS
6887 && GET_CODE (XEXP (XEXP (tem, 0), 0)) == SYMBOL_REF
6888 && GET_CODE (XEXP (XEXP (tem, 0), 1)) == CONST_INT)
6889 {
6890 tem = simplify_giv_expr (loop, XEXP (tem, 0),
6891 ext_val, benefit);
6892 if (tem)
6893 return tem;
6894 }
6895 }
6896 break;
6897 }
6898 }
6899 break;
6900 }
6901
6902 /* Fall through to general case. */
6903 default:
6904 /* If invariant, return as USE (unless CONST_INT).
6905 Otherwise, not giv. */
6906 if (GET_CODE (x) == USE)
6907 x = XEXP (x, 0);
6908
6909 if (loop_invariant_p (loop, x) == 1)
6910 {
6911 if (GET_CODE (x) == CONST_INT)
6912 return x;
6913 if (GET_CODE (x) == CONST
6914 && GET_CODE (XEXP (x, 0)) == PLUS
6915 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
6916 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
6917 x = XEXP (x, 0);
6918 return gen_rtx_USE (mode, x);
6919 }
6920 else
6921 return 0;
6922 }
6923 }
6924
6925 /* This routine folds invariants such that there is only ever one
6926 CONST_INT in the summation. It is only used by simplify_giv_expr. */
6927
6928 static rtx
6929 sge_plus_constant (x, c)
6930 rtx x, c;
6931 {
6932 if (GET_CODE (x) == CONST_INT)
6933 return GEN_INT (INTVAL (x) + INTVAL (c));
6934 else if (GET_CODE (x) != PLUS)
6935 return gen_rtx_PLUS (GET_MODE (x), x, c);
6936 else if (GET_CODE (XEXP (x, 1)) == CONST_INT)
6937 {
6938 return gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
6939 GEN_INT (INTVAL (XEXP (x, 1)) + INTVAL (c)));
6940 }
6941 else if (GET_CODE (XEXP (x, 0)) == PLUS
6942 || GET_CODE (XEXP (x, 1)) != PLUS)
6943 {
6944 return gen_rtx_PLUS (GET_MODE (x),
6945 sge_plus_constant (XEXP (x, 0), c), XEXP (x, 1));
6946 }
6947 else
6948 {
6949 return gen_rtx_PLUS (GET_MODE (x),
6950 sge_plus_constant (XEXP (x, 1), c), XEXP (x, 0));
6951 }
6952 }
6953
6954 static rtx
6955 sge_plus (mode, x, y)
6956 enum machine_mode mode;
6957 rtx x, y;
6958 {
6959 while (GET_CODE (y) == PLUS)
6960 {
6961 rtx a = XEXP (y, 0);
6962 if (GET_CODE (a) == CONST_INT)
6963 x = sge_plus_constant (x, a);
6964 else
6965 x = gen_rtx_PLUS (mode, x, a);
6966 y = XEXP (y, 1);
6967 }
6968 if (GET_CODE (y) == CONST_INT)
6969 x = sge_plus_constant (x, y);
6970 else
6971 x = gen_rtx_PLUS (mode, x, y);
6972 return x;
6973 }
6974 \f
6975 /* Help detect a giv that is calculated by several consecutive insns;
6976 for example,
6977 giv = biv * M
6978 giv = giv + A
6979 The caller has already identified the first insn P as having a giv as dest;
6980 we check that all other insns that set the same register follow
6981 immediately after P, that they alter nothing else,
6982 and that the result of the last is still a giv.
6983
6984 The value is 0 if the reg set in P is not really a giv.
6985 Otherwise, the value is the amount gained by eliminating
6986 all the consecutive insns that compute the value.
6987
6988 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
6989 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
6990
6991 The coefficients of the ultimate giv value are stored in
6992 *MULT_VAL and *ADD_VAL. */
6993
6994 static int
6995 consec_sets_giv (loop, first_benefit, p, src_reg, dest_reg,
6996 add_val, mult_val, ext_val, last_consec_insn)
6997 const struct loop *loop;
6998 int first_benefit;
6999 rtx p;
7000 rtx src_reg;
7001 rtx dest_reg;
7002 rtx *add_val;
7003 rtx *mult_val;
7004 rtx *ext_val;
7005 rtx *last_consec_insn;
7006 {
7007 struct loop_ivs *ivs = LOOP_IVS (loop);
7008 struct loop_regs *regs = LOOP_REGS (loop);
7009 int count;
7010 enum rtx_code code;
7011 int benefit;
7012 rtx temp;
7013 rtx set;
7014
7015 /* Indicate that this is a giv so that we can update the value produced in
7016 each insn of the multi-insn sequence.
7017
7018 This induction structure will be used only by the call to
7019 general_induction_var below, so we can allocate it on our stack.
7020 If this is a giv, our caller will replace the induct var entry with
7021 a new induction structure. */
7022 struct induction *v;
7023
7024 if (REG_IV_TYPE (ivs, REGNO (dest_reg)) != UNKNOWN_INDUCT)
7025 return 0;
7026
7027 v = (struct induction *) alloca (sizeof (struct induction));
7028 v->src_reg = src_reg;
7029 v->mult_val = *mult_val;
7030 v->add_val = *add_val;
7031 v->benefit = first_benefit;
7032 v->cant_derive = 0;
7033 v->derive_adjustment = 0;
7034 v->ext_dependent = NULL_RTX;
7035
7036 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
7037 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
7038
7039 count = regs->array[REGNO (dest_reg)].n_times_set - 1;
7040
7041 while (count > 0)
7042 {
7043 p = NEXT_INSN (p);
7044 code = GET_CODE (p);
7045
7046 /* If libcall, skip to end of call sequence. */
7047 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
7048 p = XEXP (temp, 0);
7049
7050 if (code == INSN
7051 && (set = single_set (p))
7052 && GET_CODE (SET_DEST (set)) == REG
7053 && SET_DEST (set) == dest_reg
7054 && (general_induction_var (loop, SET_SRC (set), &src_reg,
7055 add_val, mult_val, ext_val, 0,
7056 &benefit, VOIDmode)
7057 /* Giv created by equivalent expression. */
7058 || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
7059 && general_induction_var (loop, XEXP (temp, 0), &src_reg,
7060 add_val, mult_val, ext_val, 0,
7061 &benefit, VOIDmode)))
7062 && src_reg == v->src_reg)
7063 {
7064 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
7065 benefit += libcall_benefit (p);
7066
7067 count--;
7068 v->mult_val = *mult_val;
7069 v->add_val = *add_val;
7070 v->benefit += benefit;
7071 }
7072 else if (code != NOTE)
7073 {
7074 /* Allow insns that set something other than this giv to a
7075 constant. Such insns are needed on machines which cannot
7076 include long constants and should not disqualify a giv. */
7077 if (code == INSN
7078 && (set = single_set (p))
7079 && SET_DEST (set) != dest_reg
7080 && CONSTANT_P (SET_SRC (set)))
7081 continue;
7082
7083 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
7084 return 0;
7085 }
7086 }
7087
7088 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
7089 *last_consec_insn = p;
7090 return v->benefit;
7091 }
7092 \f
7093 /* Return an rtx, if any, that expresses giv G2 as a function of the register
7094 represented by G1. If no such expression can be found, or it is clear that
7095 it cannot possibly be a valid address, 0 is returned.
7096
7097 To perform the computation, we note that
7098 G1 = x * v + a and
7099 G2 = y * v + b
7100 where `v' is the biv.
7101
7102 So G2 = (y/b) * G1 + (b - a*y/x).
7103
7104 Note that MULT = y/x.
7105
7106 Update: A and B are now allowed to be additive expressions such that
7107 B contains all variables in A. That is, computing B-A will not require
7108 subtracting variables. */
7109
7110 static rtx
7111 express_from_1 (a, b, mult)
7112 rtx a, b, mult;
7113 {
7114 /* If MULT is zero, then A*MULT is zero, and our expression is B. */
7115
7116 if (mult == const0_rtx)
7117 return b;
7118
7119 /* If MULT is not 1, we cannot handle A with non-constants, since we
7120 would then be required to subtract multiples of the registers in A.
7121 This is theoretically possible, and may even apply to some Fortran
7122 constructs, but it is a lot of work and we do not attempt it here. */
7123
7124 if (mult != const1_rtx && GET_CODE (a) != CONST_INT)
7125 return NULL_RTX;
7126
7127 /* In general these structures are sorted top to bottom (down the PLUS
7128 chain), but not left to right across the PLUS. If B is a higher
7129 order giv than A, we can strip one level and recurse. If A is higher
7130 order, we'll eventually bail out, but won't know that until the end.
7131 If they are the same, we'll strip one level around this loop. */
7132
7133 while (GET_CODE (a) == PLUS && GET_CODE (b) == PLUS)
7134 {
7135 rtx ra, rb, oa, ob, tmp;
7136
7137 ra = XEXP (a, 0), oa = XEXP (a, 1);
7138 if (GET_CODE (ra) == PLUS)
7139 tmp = ra, ra = oa, oa = tmp;
7140
7141 rb = XEXP (b, 0), ob = XEXP (b, 1);
7142 if (GET_CODE (rb) == PLUS)
7143 tmp = rb, rb = ob, ob = tmp;
7144
7145 if (rtx_equal_p (ra, rb))
7146 /* We matched: remove one reg completely. */
7147 a = oa, b = ob;
7148 else if (GET_CODE (ob) != PLUS && rtx_equal_p (ra, ob))
7149 /* An alternate match. */
7150 a = oa, b = rb;
7151 else if (GET_CODE (oa) != PLUS && rtx_equal_p (oa, rb))
7152 /* An alternate match. */
7153 a = ra, b = ob;
7154 else
7155 {
7156 /* Indicates an extra register in B. Strip one level from B and
7157 recurse, hoping B was the higher order expression. */
7158 ob = express_from_1 (a, ob, mult);
7159 if (ob == NULL_RTX)
7160 return NULL_RTX;
7161 return gen_rtx_PLUS (GET_MODE (b), rb, ob);
7162 }
7163 }
7164
7165 /* Here we are at the last level of A, go through the cases hoping to
7166 get rid of everything but a constant. */
7167
7168 if (GET_CODE (a) == PLUS)
7169 {
7170 rtx ra, oa;
7171
7172 ra = XEXP (a, 0), oa = XEXP (a, 1);
7173 if (rtx_equal_p (oa, b))
7174 oa = ra;
7175 else if (!rtx_equal_p (ra, b))
7176 return NULL_RTX;
7177
7178 if (GET_CODE (oa) != CONST_INT)
7179 return NULL_RTX;
7180
7181 return GEN_INT (-INTVAL (oa) * INTVAL (mult));
7182 }
7183 else if (GET_CODE (a) == CONST_INT)
7184 {
7185 return plus_constant (b, -INTVAL (a) * INTVAL (mult));
7186 }
7187 else if (CONSTANT_P (a))
7188 {
7189 enum machine_mode mode_a = GET_MODE (a);
7190 enum machine_mode mode_b = GET_MODE (b);
7191 enum machine_mode mode = mode_b == VOIDmode ? mode_a : mode_b;
7192 return simplify_gen_binary (MINUS, mode, b, a);
7193 }
7194 else if (GET_CODE (b) == PLUS)
7195 {
7196 if (rtx_equal_p (a, XEXP (b, 0)))
7197 return XEXP (b, 1);
7198 else if (rtx_equal_p (a, XEXP (b, 1)))
7199 return XEXP (b, 0);
7200 else
7201 return NULL_RTX;
7202 }
7203 else if (rtx_equal_p (a, b))
7204 return const0_rtx;
7205
7206 return NULL_RTX;
7207 }
7208
7209 rtx
7210 express_from (g1, g2)
7211 struct induction *g1, *g2;
7212 {
7213 rtx mult, add;
7214
7215 /* The value that G1 will be multiplied by must be a constant integer. Also,
7216 the only chance we have of getting a valid address is if b*c/a (see above
7217 for notation) is also an integer. */
7218 if (GET_CODE (g1->mult_val) == CONST_INT
7219 && GET_CODE (g2->mult_val) == CONST_INT)
7220 {
7221 if (g1->mult_val == const0_rtx
7222 || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0)
7223 return NULL_RTX;
7224 mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val));
7225 }
7226 else if (rtx_equal_p (g1->mult_val, g2->mult_val))
7227 mult = const1_rtx;
7228 else
7229 {
7230 /* ??? Find out if the one is a multiple of the other? */
7231 return NULL_RTX;
7232 }
7233
7234 add = express_from_1 (g1->add_val, g2->add_val, mult);
7235 if (add == NULL_RTX)
7236 {
7237 /* Failed. If we've got a multiplication factor between G1 and G2,
7238 scale G1's addend and try again. */
7239 if (INTVAL (mult) > 1)
7240 {
7241 rtx g1_add_val = g1->add_val;
7242 if (GET_CODE (g1_add_val) == MULT
7243 && GET_CODE (XEXP (g1_add_val, 1)) == CONST_INT)
7244 {
7245 HOST_WIDE_INT m;
7246 m = INTVAL (mult) * INTVAL (XEXP (g1_add_val, 1));
7247 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val),
7248 XEXP (g1_add_val, 0), GEN_INT (m));
7249 }
7250 else
7251 {
7252 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val), g1_add_val,
7253 mult);
7254 }
7255
7256 add = express_from_1 (g1_add_val, g2->add_val, const1_rtx);
7257 }
7258 }
7259 if (add == NULL_RTX)
7260 return NULL_RTX;
7261
7262 /* Form simplified final result. */
7263 if (mult == const0_rtx)
7264 return add;
7265 else if (mult == const1_rtx)
7266 mult = g1->dest_reg;
7267 else
7268 mult = gen_rtx_MULT (g2->mode, g1->dest_reg, mult);
7269
7270 if (add == const0_rtx)
7271 return mult;
7272 else
7273 {
7274 if (GET_CODE (add) == PLUS
7275 && CONSTANT_P (XEXP (add, 1)))
7276 {
7277 rtx tem = XEXP (add, 1);
7278 mult = gen_rtx_PLUS (g2->mode, mult, XEXP (add, 0));
7279 add = tem;
7280 }
7281
7282 return gen_rtx_PLUS (g2->mode, mult, add);
7283 }
7284 }
7285 \f
7286 /* Return an rtx, if any, that expresses giv G2 as a function of the register
7287 represented by G1. This indicates that G2 should be combined with G1 and
7288 that G2 can use (either directly or via an address expression) a register
7289 used to represent G1. */
7290
7291 static rtx
7292 combine_givs_p (g1, g2)
7293 struct induction *g1, *g2;
7294 {
7295 rtx comb, ret;
7296
7297 /* With the introduction of ext dependent givs, we must care for modes.
7298 G2 must not use a wider mode than G1. */
7299 if (GET_MODE_SIZE (g1->mode) < GET_MODE_SIZE (g2->mode))
7300 return NULL_RTX;
7301
7302 ret = comb = express_from (g1, g2);
7303 if (comb == NULL_RTX)
7304 return NULL_RTX;
7305 if (g1->mode != g2->mode)
7306 ret = gen_lowpart (g2->mode, comb);
7307
7308 /* If these givs are identical, they can be combined. We use the results
7309 of express_from because the addends are not in a canonical form, so
7310 rtx_equal_p is a weaker test. */
7311 /* But don't combine a DEST_REG giv with a DEST_ADDR giv; we want the
7312 combination to be the other way round. */
7313 if (comb == g1->dest_reg
7314 && (g1->giv_type == DEST_REG || g2->giv_type == DEST_ADDR))
7315 {
7316 return ret;
7317 }
7318
7319 /* If G2 can be expressed as a function of G1 and that function is valid
7320 as an address and no more expensive than using a register for G2,
7321 the expression of G2 in terms of G1 can be used. */
7322 if (ret != NULL_RTX
7323 && g2->giv_type == DEST_ADDR
7324 && memory_address_p (GET_MODE (g2->mem), ret)
7325 /* ??? Looses, especially with -fforce-addr, where *g2->location
7326 will always be a register, and so anything more complicated
7327 gets discarded. */
7328 #if 0
7329 #ifdef ADDRESS_COST
7330 && ADDRESS_COST (tem) <= ADDRESS_COST (*g2->location)
7331 #else
7332 && rtx_cost (tem, MEM) <= rtx_cost (*g2->location, MEM)
7333 #endif
7334 #endif
7335 )
7336 {
7337 return ret;
7338 }
7339
7340 return NULL_RTX;
7341 }
7342 \f
7343 /* Check each extension dependent giv in this class to see if its
7344 root biv is safe from wrapping in the interior mode, which would
7345 make the giv illegal. */
7346
7347 static void
7348 check_ext_dependent_givs (bl, loop_info)
7349 struct iv_class *bl;
7350 struct loop_info *loop_info;
7351 {
7352 int ze_ok = 0, se_ok = 0, info_ok = 0;
7353 enum machine_mode biv_mode = GET_MODE (bl->biv->src_reg);
7354 HOST_WIDE_INT start_val;
7355 unsigned HOST_WIDE_INT u_end_val = 0;
7356 unsigned HOST_WIDE_INT u_start_val = 0;
7357 rtx incr = pc_rtx;
7358 struct induction *v;
7359
7360 /* Make sure the iteration data is available. We must have
7361 constants in order to be certain of no overflow. */
7362 /* ??? An unknown iteration count with an increment of +-1
7363 combined with friendly exit tests of against an invariant
7364 value is also ameanable to optimization. Not implemented. */
7365 if (loop_info->n_iterations > 0
7366 && bl->initial_value
7367 && GET_CODE (bl->initial_value) == CONST_INT
7368 && (incr = biv_total_increment (bl))
7369 && GET_CODE (incr) == CONST_INT
7370 /* Make sure the host can represent the arithmetic. */
7371 && HOST_BITS_PER_WIDE_INT >= GET_MODE_BITSIZE (biv_mode))
7372 {
7373 unsigned HOST_WIDE_INT abs_incr, total_incr;
7374 HOST_WIDE_INT s_end_val;
7375 int neg_incr;
7376
7377 info_ok = 1;
7378 start_val = INTVAL (bl->initial_value);
7379 u_start_val = start_val;
7380
7381 neg_incr = 0, abs_incr = INTVAL (incr);
7382 if (INTVAL (incr) < 0)
7383 neg_incr = 1, abs_incr = -abs_incr;
7384 total_incr = abs_incr * loop_info->n_iterations;
7385
7386 /* Check for host arithmatic overflow. */
7387 if (total_incr / loop_info->n_iterations == abs_incr)
7388 {
7389 unsigned HOST_WIDE_INT u_max;
7390 HOST_WIDE_INT s_max;
7391
7392 u_end_val = start_val + (neg_incr ? -total_incr : total_incr);
7393 s_end_val = u_end_val;
7394 u_max = GET_MODE_MASK (biv_mode);
7395 s_max = u_max >> 1;
7396
7397 /* Check zero extension of biv ok. */
7398 if (start_val >= 0
7399 /* Check for host arithmatic overflow. */
7400 && (neg_incr
7401 ? u_end_val < u_start_val
7402 : u_end_val > u_start_val)
7403 /* Check for target arithmetic overflow. */
7404 && (neg_incr
7405 ? 1 /* taken care of with host overflow */
7406 : u_end_val <= u_max))
7407 {
7408 ze_ok = 1;
7409 }
7410
7411 /* Check sign extension of biv ok. */
7412 /* ??? While it is true that overflow with signed and pointer
7413 arithmetic is undefined, I fear too many programmers don't
7414 keep this fact in mind -- myself included on occasion.
7415 So leave alone with the signed overflow optimizations. */
7416 if (start_val >= -s_max - 1
7417 /* Check for host arithmatic overflow. */
7418 && (neg_incr
7419 ? s_end_val < start_val
7420 : s_end_val > start_val)
7421 /* Check for target arithmetic overflow. */
7422 && (neg_incr
7423 ? s_end_val >= -s_max - 1
7424 : s_end_val <= s_max))
7425 {
7426 se_ok = 1;
7427 }
7428 }
7429 }
7430
7431 /* Invalidate givs that fail the tests. */
7432 for (v = bl->giv; v; v = v->next_iv)
7433 if (v->ext_dependent)
7434 {
7435 enum rtx_code code = GET_CODE (v->ext_dependent);
7436 int ok = 0;
7437
7438 switch (code)
7439 {
7440 case SIGN_EXTEND:
7441 ok = se_ok;
7442 break;
7443 case ZERO_EXTEND:
7444 ok = ze_ok;
7445 break;
7446
7447 case TRUNCATE:
7448 /* We don't know whether this value is being used as either
7449 signed or unsigned, so to safely truncate we must satisfy
7450 both. The initial check here verifies the BIV itself;
7451 once that is successful we may check its range wrt the
7452 derived GIV. */
7453 if (se_ok && ze_ok)
7454 {
7455 enum machine_mode outer_mode = GET_MODE (v->ext_dependent);
7456 unsigned HOST_WIDE_INT max = GET_MODE_MASK (outer_mode) >> 1;
7457
7458 /* We know from the above that both endpoints are nonnegative,
7459 and that there is no wrapping. Verify that both endpoints
7460 are within the (signed) range of the outer mode. */
7461 if (u_start_val <= max && u_end_val <= max)
7462 ok = 1;
7463 }
7464 break;
7465
7466 default:
7467 abort ();
7468 }
7469
7470 if (ok)
7471 {
7472 if (loop_dump_stream)
7473 {
7474 fprintf (loop_dump_stream,
7475 "Verified ext dependent giv at %d of reg %d\n",
7476 INSN_UID (v->insn), bl->regno);
7477 }
7478 }
7479 else
7480 {
7481 if (loop_dump_stream)
7482 {
7483 const char *why;
7484
7485 if (info_ok)
7486 why = "biv iteration values overflowed";
7487 else
7488 {
7489 if (incr == pc_rtx)
7490 incr = biv_total_increment (bl);
7491 if (incr == const1_rtx)
7492 why = "biv iteration info incomplete; incr by 1";
7493 else
7494 why = "biv iteration info incomplete";
7495 }
7496
7497 fprintf (loop_dump_stream,
7498 "Failed ext dependent giv at %d, %s\n",
7499 INSN_UID (v->insn), why);
7500 }
7501 v->ignore = 1;
7502 bl->all_reduced = 0;
7503 }
7504 }
7505 }
7506
7507 /* Generate a version of VALUE in a mode appropriate for initializing V. */
7508
7509 rtx
7510 extend_value_for_giv (v, value)
7511 struct induction *v;
7512 rtx value;
7513 {
7514 rtx ext_dep = v->ext_dependent;
7515
7516 if (! ext_dep)
7517 return value;
7518
7519 /* Recall that check_ext_dependent_givs verified that the known bounds
7520 of a biv did not overflow or wrap with respect to the extension for
7521 the giv. Therefore, constants need no additional adjustment. */
7522 if (CONSTANT_P (value) && GET_MODE (value) == VOIDmode)
7523 return value;
7524
7525 /* Otherwise, we must adjust the value to compensate for the
7526 differing modes of the biv and the giv. */
7527 return gen_rtx_fmt_e (GET_CODE (ext_dep), GET_MODE (ext_dep), value);
7528 }
7529 \f
7530 struct combine_givs_stats
7531 {
7532 int giv_number;
7533 int total_benefit;
7534 };
7535
7536 static int
7537 cmp_combine_givs_stats (xp, yp)
7538 const PTR xp;
7539 const PTR yp;
7540 {
7541 const struct combine_givs_stats * const x =
7542 (const struct combine_givs_stats *) xp;
7543 const struct combine_givs_stats * const y =
7544 (const struct combine_givs_stats *) yp;
7545 int d;
7546 d = y->total_benefit - x->total_benefit;
7547 /* Stabilize the sort. */
7548 if (!d)
7549 d = x->giv_number - y->giv_number;
7550 return d;
7551 }
7552
7553 /* Check all pairs of givs for iv_class BL and see if any can be combined with
7554 any other. If so, point SAME to the giv combined with and set NEW_REG to
7555 be an expression (in terms of the other giv's DEST_REG) equivalent to the
7556 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
7557
7558 static void
7559 combine_givs (regs, bl)
7560 struct loop_regs *regs;
7561 struct iv_class *bl;
7562 {
7563 /* Additional benefit to add for being combined multiple times. */
7564 const int extra_benefit = 3;
7565
7566 struct induction *g1, *g2, **giv_array;
7567 int i, j, k, giv_count;
7568 struct combine_givs_stats *stats;
7569 rtx *can_combine;
7570
7571 /* Count givs, because bl->giv_count is incorrect here. */
7572 giv_count = 0;
7573 for (g1 = bl->giv; g1; g1 = g1->next_iv)
7574 if (!g1->ignore)
7575 giv_count++;
7576
7577 giv_array
7578 = (struct induction **) alloca (giv_count * sizeof (struct induction *));
7579 i = 0;
7580 for (g1 = bl->giv; g1; g1 = g1->next_iv)
7581 if (!g1->ignore)
7582 giv_array[i++] = g1;
7583
7584 stats = (struct combine_givs_stats *) xcalloc (giv_count, sizeof (*stats));
7585 can_combine = (rtx *) xcalloc (giv_count, giv_count * sizeof (rtx));
7586
7587 for (i = 0; i < giv_count; i++)
7588 {
7589 int this_benefit;
7590 rtx single_use;
7591
7592 g1 = giv_array[i];
7593 stats[i].giv_number = i;
7594
7595 /* If a DEST_REG GIV is used only once, do not allow it to combine
7596 with anything, for in doing so we will gain nothing that cannot
7597 be had by simply letting the GIV with which we would have combined
7598 to be reduced on its own. The losage shows up in particular with
7599 DEST_ADDR targets on hosts with reg+reg addressing, though it can
7600 be seen elsewhere as well. */
7601 if (g1->giv_type == DEST_REG
7602 && (single_use = regs->array[REGNO (g1->dest_reg)].single_usage)
7603 && single_use != const0_rtx)
7604 continue;
7605
7606 this_benefit = g1->benefit;
7607 /* Add an additional weight for zero addends. */
7608 if (g1->no_const_addval)
7609 this_benefit += 1;
7610
7611 for (j = 0; j < giv_count; j++)
7612 {
7613 rtx this_combine;
7614
7615 g2 = giv_array[j];
7616 if (g1 != g2
7617 && (this_combine = combine_givs_p (g1, g2)) != NULL_RTX)
7618 {
7619 can_combine[i * giv_count + j] = this_combine;
7620 this_benefit += g2->benefit + extra_benefit;
7621 }
7622 }
7623 stats[i].total_benefit = this_benefit;
7624 }
7625
7626 /* Iterate, combining until we can't. */
7627 restart:
7628 qsort (stats, giv_count, sizeof (*stats), cmp_combine_givs_stats);
7629
7630 if (loop_dump_stream)
7631 {
7632 fprintf (loop_dump_stream, "Sorted combine statistics:\n");
7633 for (k = 0; k < giv_count; k++)
7634 {
7635 g1 = giv_array[stats[k].giv_number];
7636 if (!g1->combined_with && !g1->same)
7637 fprintf (loop_dump_stream, " {%d, %d}",
7638 INSN_UID (giv_array[stats[k].giv_number]->insn),
7639 stats[k].total_benefit);
7640 }
7641 putc ('\n', loop_dump_stream);
7642 }
7643
7644 for (k = 0; k < giv_count; k++)
7645 {
7646 int g1_add_benefit = 0;
7647
7648 i = stats[k].giv_number;
7649 g1 = giv_array[i];
7650
7651 /* If it has already been combined, skip. */
7652 if (g1->combined_with || g1->same)
7653 continue;
7654
7655 for (j = 0; j < giv_count; j++)
7656 {
7657 g2 = giv_array[j];
7658 if (g1 != g2 && can_combine[i * giv_count + j]
7659 /* If it has already been combined, skip. */
7660 && ! g2->same && ! g2->combined_with)
7661 {
7662 int l;
7663
7664 g2->new_reg = can_combine[i * giv_count + j];
7665 g2->same = g1;
7666 /* For destination, we now may replace by mem expression instead
7667 of register. This changes the costs considerably, so add the
7668 compensation. */
7669 if (g2->giv_type == DEST_ADDR)
7670 g2->benefit = (g2->benefit + reg_address_cost
7671 - address_cost (g2->new_reg,
7672 GET_MODE (g2->mem)));
7673 g1->combined_with++;
7674 g1->lifetime += g2->lifetime;
7675
7676 g1_add_benefit += g2->benefit;
7677
7678 /* ??? The new final_[bg]iv_value code does a much better job
7679 of finding replaceable giv's, and hence this code may no
7680 longer be necessary. */
7681 if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg))
7682 g1_add_benefit -= copy_cost;
7683
7684 /* To help optimize the next set of combinations, remove
7685 this giv from the benefits of other potential mates. */
7686 for (l = 0; l < giv_count; ++l)
7687 {
7688 int m = stats[l].giv_number;
7689 if (can_combine[m * giv_count + j])
7690 stats[l].total_benefit -= g2->benefit + extra_benefit;
7691 }
7692
7693 if (loop_dump_stream)
7694 fprintf (loop_dump_stream,
7695 "giv at %d combined with giv at %d; new benefit %d + %d, lifetime %d\n",
7696 INSN_UID (g2->insn), INSN_UID (g1->insn),
7697 g1->benefit, g1_add_benefit, g1->lifetime);
7698 }
7699 }
7700
7701 /* To help optimize the next set of combinations, remove
7702 this giv from the benefits of other potential mates. */
7703 if (g1->combined_with)
7704 {
7705 for (j = 0; j < giv_count; ++j)
7706 {
7707 int m = stats[j].giv_number;
7708 if (can_combine[m * giv_count + i])
7709 stats[j].total_benefit -= g1->benefit + extra_benefit;
7710 }
7711
7712 g1->benefit += g1_add_benefit;
7713
7714 /* We've finished with this giv, and everything it touched.
7715 Restart the combination so that proper weights for the
7716 rest of the givs are properly taken into account. */
7717 /* ??? Ideally we would compact the arrays at this point, so
7718 as to not cover old ground. But sanely compacting
7719 can_combine is tricky. */
7720 goto restart;
7721 }
7722 }
7723
7724 /* Clean up. */
7725 free (stats);
7726 free (can_combine);
7727 }
7728 \f
7729 /* Generate sequence for REG = B * M + A. */
7730
7731 static rtx
7732 gen_add_mult (b, m, a, reg)
7733 rtx b; /* initial value of basic induction variable */
7734 rtx m; /* multiplicative constant */
7735 rtx a; /* additive constant */
7736 rtx reg; /* destination register */
7737 {
7738 rtx seq;
7739 rtx result;
7740
7741 start_sequence ();
7742 /* Use unsigned arithmetic. */
7743 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
7744 if (reg != result)
7745 emit_move_insn (reg, result);
7746 seq = get_insns ();
7747 end_sequence ();
7748
7749 return seq;
7750 }
7751
7752
7753 /* Update registers created in insn sequence SEQ. */
7754
7755 static void
7756 loop_regs_update (loop, seq)
7757 const struct loop *loop ATTRIBUTE_UNUSED;
7758 rtx seq;
7759 {
7760 rtx insn;
7761
7762 /* Update register info for alias analysis. */
7763
7764 if (seq == NULL_RTX)
7765 return;
7766
7767 if (INSN_P (seq))
7768 {
7769 insn = seq;
7770 while (insn != NULL_RTX)
7771 {
7772 rtx set = single_set (insn);
7773
7774 if (set && GET_CODE (SET_DEST (set)) == REG)
7775 record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0);
7776
7777 insn = NEXT_INSN (insn);
7778 }
7779 }
7780 else if (GET_CODE (seq) == SET
7781 && GET_CODE (SET_DEST (seq)) == REG)
7782 record_base_value (REGNO (SET_DEST (seq)), SET_SRC (seq), 0);
7783 }
7784
7785
7786 /* EMIT code before BEFORE_BB/BEFORE_INSN to set REG = B * M + A. */
7787
7788 void
7789 loop_iv_add_mult_emit_before (loop, b, m, a, reg, before_bb, before_insn)
7790 const struct loop *loop;
7791 rtx b; /* initial value of basic induction variable */
7792 rtx m; /* multiplicative constant */
7793 rtx a; /* additive constant */
7794 rtx reg; /* destination register */
7795 basic_block before_bb;
7796 rtx before_insn;
7797 {
7798 rtx seq;
7799
7800 if (! before_insn)
7801 {
7802 loop_iv_add_mult_hoist (loop, b, m, a, reg);
7803 return;
7804 }
7805
7806 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7807 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
7808
7809 /* Increase the lifetime of any invariants moved further in code. */
7810 update_reg_last_use (a, before_insn);
7811 update_reg_last_use (b, before_insn);
7812 update_reg_last_use (m, before_insn);
7813
7814 loop_insn_emit_before (loop, before_bb, before_insn, seq);
7815
7816 /* It is possible that the expansion created lots of new registers.
7817 Iterate over the sequence we just created and record them all. */
7818 loop_regs_update (loop, seq);
7819 }
7820
7821
7822 /* Emit insns in loop pre-header to set REG = B * M + A. */
7823
7824 void
7825 loop_iv_add_mult_sink (loop, b, m, a, reg)
7826 const struct loop *loop;
7827 rtx b; /* initial value of basic induction variable */
7828 rtx m; /* multiplicative constant */
7829 rtx a; /* additive constant */
7830 rtx reg; /* destination register */
7831 {
7832 rtx seq;
7833
7834 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7835 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
7836
7837 /* Increase the lifetime of any invariants moved further in code.
7838 ???? Is this really necessary? */
7839 update_reg_last_use (a, loop->sink);
7840 update_reg_last_use (b, loop->sink);
7841 update_reg_last_use (m, loop->sink);
7842
7843 loop_insn_sink (loop, seq);
7844
7845 /* It is possible that the expansion created lots of new registers.
7846 Iterate over the sequence we just created and record them all. */
7847 loop_regs_update (loop, seq);
7848 }
7849
7850
7851 /* Emit insns after loop to set REG = B * M + A. */
7852
7853 void
7854 loop_iv_add_mult_hoist (loop, b, m, a, reg)
7855 const struct loop *loop;
7856 rtx b; /* initial value of basic induction variable */
7857 rtx m; /* multiplicative constant */
7858 rtx a; /* additive constant */
7859 rtx reg; /* destination register */
7860 {
7861 rtx seq;
7862
7863 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7864 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
7865
7866 loop_insn_hoist (loop, seq);
7867
7868 /* It is possible that the expansion created lots of new registers.
7869 Iterate over the sequence we just created and record them all. */
7870 loop_regs_update (loop, seq);
7871 }
7872
7873
7874
7875 /* Similar to gen_add_mult, but compute cost rather than generating
7876 sequence. */
7877
7878 static int
7879 iv_add_mult_cost (b, m, a, reg)
7880 rtx b; /* initial value of basic induction variable */
7881 rtx m; /* multiplicative constant */
7882 rtx a; /* additive constant */
7883 rtx reg; /* destination register */
7884 {
7885 int cost = 0;
7886 rtx last, result;
7887
7888 start_sequence ();
7889 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
7890 if (reg != result)
7891 emit_move_insn (reg, result);
7892 last = get_last_insn ();
7893 while (last)
7894 {
7895 rtx t = single_set (last);
7896 if (t)
7897 cost += rtx_cost (SET_SRC (t), SET);
7898 last = PREV_INSN (last);
7899 }
7900 end_sequence ();
7901 return cost;
7902 }
7903 \f
7904 /* Test whether A * B can be computed without
7905 an actual multiply insn. Value is 1 if so.
7906
7907 ??? This function stinks because it generates a ton of wasted RTL
7908 ??? and as a result fragments GC memory to no end. There are other
7909 ??? places in the compiler which are invoked a lot and do the same
7910 ??? thing, generate wasted RTL just to see if something is possible. */
7911
7912 static int
7913 product_cheap_p (a, b)
7914 rtx a;
7915 rtx b;
7916 {
7917 rtx tmp;
7918 int win, n_insns;
7919
7920 /* If only one is constant, make it B. */
7921 if (GET_CODE (a) == CONST_INT)
7922 tmp = a, a = b, b = tmp;
7923
7924 /* If first constant, both constant, so don't need multiply. */
7925 if (GET_CODE (a) == CONST_INT)
7926 return 1;
7927
7928 /* If second not constant, neither is constant, so would need multiply. */
7929 if (GET_CODE (b) != CONST_INT)
7930 return 0;
7931
7932 /* One operand is constant, so might not need multiply insn. Generate the
7933 code for the multiply and see if a call or multiply, or long sequence
7934 of insns is generated. */
7935
7936 start_sequence ();
7937 expand_mult (GET_MODE (a), a, b, NULL_RTX, 1);
7938 tmp = get_insns ();
7939 end_sequence ();
7940
7941 win = 1;
7942 if (INSN_P (tmp))
7943 {
7944 n_insns = 0;
7945 while (tmp != NULL_RTX)
7946 {
7947 rtx next = NEXT_INSN (tmp);
7948
7949 if (++n_insns > 3
7950 || GET_CODE (tmp) != INSN
7951 || (GET_CODE (PATTERN (tmp)) == SET
7952 && GET_CODE (SET_SRC (PATTERN (tmp))) == MULT)
7953 || (GET_CODE (PATTERN (tmp)) == PARALLEL
7954 && GET_CODE (XVECEXP (PATTERN (tmp), 0, 0)) == SET
7955 && GET_CODE (SET_SRC (XVECEXP (PATTERN (tmp), 0, 0))) == MULT))
7956 {
7957 win = 0;
7958 break;
7959 }
7960
7961 tmp = next;
7962 }
7963 }
7964 else if (GET_CODE (tmp) == SET
7965 && GET_CODE (SET_SRC (tmp)) == MULT)
7966 win = 0;
7967 else if (GET_CODE (tmp) == PARALLEL
7968 && GET_CODE (XVECEXP (tmp, 0, 0)) == SET
7969 && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT)
7970 win = 0;
7971
7972 return win;
7973 }
7974 \f
7975 /* Check to see if loop can be terminated by a "decrement and branch until
7976 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
7977 Also try reversing an increment loop to a decrement loop
7978 to see if the optimization can be performed.
7979 Value is nonzero if optimization was performed. */
7980
7981 /* This is useful even if the architecture doesn't have such an insn,
7982 because it might change a loops which increments from 0 to n to a loop
7983 which decrements from n to 0. A loop that decrements to zero is usually
7984 faster than one that increments from zero. */
7985
7986 /* ??? This could be rewritten to use some of the loop unrolling procedures,
7987 such as approx_final_value, biv_total_increment, loop_iterations, and
7988 final_[bg]iv_value. */
7989
7990 static int
7991 check_dbra_loop (loop, insn_count)
7992 struct loop *loop;
7993 int insn_count;
7994 {
7995 struct loop_info *loop_info = LOOP_INFO (loop);
7996 struct loop_regs *regs = LOOP_REGS (loop);
7997 struct loop_ivs *ivs = LOOP_IVS (loop);
7998 struct iv_class *bl;
7999 rtx reg;
8000 rtx jump_label;
8001 rtx final_value;
8002 rtx start_value;
8003 rtx new_add_val;
8004 rtx comparison;
8005 rtx before_comparison;
8006 rtx p;
8007 rtx jump;
8008 rtx first_compare;
8009 int compare_and_branch;
8010 rtx loop_start = loop->start;
8011 rtx loop_end = loop->end;
8012
8013 /* If last insn is a conditional branch, and the insn before tests a
8014 register value, try to optimize it. Otherwise, we can't do anything. */
8015
8016 jump = PREV_INSN (loop_end);
8017 comparison = get_condition_for_loop (loop, jump);
8018 if (comparison == 0)
8019 return 0;
8020 if (!onlyjump_p (jump))
8021 return 0;
8022
8023 /* Try to compute whether the compare/branch at the loop end is one or
8024 two instructions. */
8025 get_condition (jump, &first_compare);
8026 if (first_compare == jump)
8027 compare_and_branch = 1;
8028 else if (first_compare == prev_nonnote_insn (jump))
8029 compare_and_branch = 2;
8030 else
8031 return 0;
8032
8033 {
8034 /* If more than one condition is present to control the loop, then
8035 do not proceed, as this function does not know how to rewrite
8036 loop tests with more than one condition.
8037
8038 Look backwards from the first insn in the last comparison
8039 sequence and see if we've got another comparison sequence. */
8040
8041 rtx jump1;
8042 if ((jump1 = prev_nonnote_insn (first_compare)) != loop->cont)
8043 if (GET_CODE (jump1) == JUMP_INSN)
8044 return 0;
8045 }
8046
8047 /* Check all of the bivs to see if the compare uses one of them.
8048 Skip biv's set more than once because we can't guarantee that
8049 it will be zero on the last iteration. Also skip if the biv is
8050 used between its update and the test insn. */
8051
8052 for (bl = ivs->list; bl; bl = bl->next)
8053 {
8054 if (bl->biv_count == 1
8055 && ! bl->biv->maybe_multiple
8056 && bl->biv->dest_reg == XEXP (comparison, 0)
8057 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
8058 first_compare))
8059 break;
8060 }
8061
8062 if (! bl)
8063 return 0;
8064
8065 /* Look for the case where the basic induction variable is always
8066 nonnegative, and equals zero on the last iteration.
8067 In this case, add a reg_note REG_NONNEG, which allows the
8068 m68k DBRA instruction to be used. */
8069
8070 if (((GET_CODE (comparison) == GT
8071 && GET_CODE (XEXP (comparison, 1)) == CONST_INT
8072 && INTVAL (XEXP (comparison, 1)) == -1)
8073 || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx))
8074 && GET_CODE (bl->biv->add_val) == CONST_INT
8075 && INTVAL (bl->biv->add_val) < 0)
8076 {
8077 /* Initial value must be greater than 0,
8078 init_val % -dec_value == 0 to ensure that it equals zero on
8079 the last iteration */
8080
8081 if (GET_CODE (bl->initial_value) == CONST_INT
8082 && INTVAL (bl->initial_value) > 0
8083 && (INTVAL (bl->initial_value)
8084 % (-INTVAL (bl->biv->add_val))) == 0)
8085 {
8086 /* register always nonnegative, add REG_NOTE to branch */
8087 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
8088 REG_NOTES (jump)
8089 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
8090 REG_NOTES (jump));
8091 bl->nonneg = 1;
8092
8093 return 1;
8094 }
8095
8096 /* If the decrement is 1 and the value was tested as >= 0 before
8097 the loop, then we can safely optimize. */
8098 for (p = loop_start; p; p = PREV_INSN (p))
8099 {
8100 if (GET_CODE (p) == CODE_LABEL)
8101 break;
8102 if (GET_CODE (p) != JUMP_INSN)
8103 continue;
8104
8105 before_comparison = get_condition_for_loop (loop, p);
8106 if (before_comparison
8107 && XEXP (before_comparison, 0) == bl->biv->dest_reg
8108 && GET_CODE (before_comparison) == LT
8109 && XEXP (before_comparison, 1) == const0_rtx
8110 && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start)
8111 && INTVAL (bl->biv->add_val) == -1)
8112 {
8113 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
8114 REG_NOTES (jump)
8115 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
8116 REG_NOTES (jump));
8117 bl->nonneg = 1;
8118
8119 return 1;
8120 }
8121 }
8122 }
8123 else if (GET_CODE (bl->biv->add_val) == CONST_INT
8124 && INTVAL (bl->biv->add_val) > 0)
8125 {
8126 /* Try to change inc to dec, so can apply above optimization. */
8127 /* Can do this if:
8128 all registers modified are induction variables or invariant,
8129 all memory references have non-overlapping addresses
8130 (obviously true if only one write)
8131 allow 2 insns for the compare/jump at the end of the loop. */
8132 /* Also, we must avoid any instructions which use both the reversed
8133 biv and another biv. Such instructions will fail if the loop is
8134 reversed. We meet this condition by requiring that either
8135 no_use_except_counting is true, or else that there is only
8136 one biv. */
8137 int num_nonfixed_reads = 0;
8138 /* 1 if the iteration var is used only to count iterations. */
8139 int no_use_except_counting = 0;
8140 /* 1 if the loop has no memory store, or it has a single memory store
8141 which is reversible. */
8142 int reversible_mem_store = 1;
8143
8144 if (bl->giv_count == 0
8145 && !loop->exit_count
8146 && !loop_info->has_multiple_exit_targets)
8147 {
8148 rtx bivreg = regno_reg_rtx[bl->regno];
8149 struct iv_class *blt;
8150
8151 /* If there are no givs for this biv, and the only exit is the
8152 fall through at the end of the loop, then
8153 see if perhaps there are no uses except to count. */
8154 no_use_except_counting = 1;
8155 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8156 if (INSN_P (p))
8157 {
8158 rtx set = single_set (p);
8159
8160 if (set && GET_CODE (SET_DEST (set)) == REG
8161 && REGNO (SET_DEST (set)) == bl->regno)
8162 /* An insn that sets the biv is okay. */
8163 ;
8164 else if ((p == prev_nonnote_insn (prev_nonnote_insn (loop_end))
8165 || p == prev_nonnote_insn (loop_end))
8166 && reg_mentioned_p (bivreg, PATTERN (p)))
8167 {
8168 /* If either of these insns uses the biv and sets a pseudo
8169 that has more than one usage, then the biv has uses
8170 other than counting since it's used to derive a value
8171 that is used more than one time. */
8172 note_stores (PATTERN (p), note_set_pseudo_multiple_uses,
8173 regs);
8174 if (regs->multiple_uses)
8175 {
8176 no_use_except_counting = 0;
8177 break;
8178 }
8179 }
8180 else if (reg_mentioned_p (bivreg, PATTERN (p)))
8181 {
8182 no_use_except_counting = 0;
8183 break;
8184 }
8185 }
8186
8187 /* A biv has uses besides counting if it is used to set
8188 another biv. */
8189 for (blt = ivs->list; blt; blt = blt->next)
8190 if (blt->init_set
8191 && reg_mentioned_p (bivreg, SET_SRC (blt->init_set)))
8192 {
8193 no_use_except_counting = 0;
8194 break;
8195 }
8196 }
8197
8198 if (no_use_except_counting)
8199 /* No need to worry about MEMs. */
8200 ;
8201 else if (loop_info->num_mem_sets <= 1)
8202 {
8203 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8204 if (INSN_P (p))
8205 num_nonfixed_reads += count_nonfixed_reads (loop, PATTERN (p));
8206
8207 /* If the loop has a single store, and the destination address is
8208 invariant, then we can't reverse the loop, because this address
8209 might then have the wrong value at loop exit.
8210 This would work if the source was invariant also, however, in that
8211 case, the insn should have been moved out of the loop. */
8212
8213 if (loop_info->num_mem_sets == 1)
8214 {
8215 struct induction *v;
8216
8217 /* If we could prove that each of the memory locations
8218 written to was different, then we could reverse the
8219 store -- but we don't presently have any way of
8220 knowing that. */
8221 reversible_mem_store = 0;
8222
8223 /* If the store depends on a register that is set after the
8224 store, it depends on the initial value, and is thus not
8225 reversible. */
8226 for (v = bl->giv; reversible_mem_store && v; v = v->next_iv)
8227 {
8228 if (v->giv_type == DEST_REG
8229 && reg_mentioned_p (v->dest_reg,
8230 PATTERN (loop_info->first_loop_store_insn))
8231 && loop_insn_first_p (loop_info->first_loop_store_insn,
8232 v->insn))
8233 reversible_mem_store = 0;
8234 }
8235 }
8236 }
8237 else
8238 return 0;
8239
8240 /* This code only acts for innermost loops. Also it simplifies
8241 the memory address check by only reversing loops with
8242 zero or one memory access.
8243 Two memory accesses could involve parts of the same array,
8244 and that can't be reversed.
8245 If the biv is used only for counting, than we don't need to worry
8246 about all these things. */
8247
8248 if ((num_nonfixed_reads <= 1
8249 && ! loop_info->has_nonconst_call
8250 && ! loop_info->has_prefetch
8251 && ! loop_info->has_volatile
8252 && reversible_mem_store
8253 && (bl->giv_count + bl->biv_count + loop_info->num_mem_sets
8254 + num_unmoved_movables (loop) + compare_and_branch == insn_count)
8255 && (bl == ivs->list && bl->next == 0))
8256 || (no_use_except_counting && ! loop_info->has_prefetch))
8257 {
8258 rtx tem;
8259
8260 /* Loop can be reversed. */
8261 if (loop_dump_stream)
8262 fprintf (loop_dump_stream, "Can reverse loop\n");
8263
8264 /* Now check other conditions:
8265
8266 The increment must be a constant, as must the initial value,
8267 and the comparison code must be LT.
8268
8269 This test can probably be improved since +/- 1 in the constant
8270 can be obtained by changing LT to LE and vice versa; this is
8271 confusing. */
8272
8273 if (comparison
8274 /* for constants, LE gets turned into LT */
8275 && (GET_CODE (comparison) == LT
8276 || (GET_CODE (comparison) == LE
8277 && no_use_except_counting)))
8278 {
8279 HOST_WIDE_INT add_val, add_adjust, comparison_val = 0;
8280 rtx initial_value, comparison_value;
8281 int nonneg = 0;
8282 enum rtx_code cmp_code;
8283 int comparison_const_width;
8284 unsigned HOST_WIDE_INT comparison_sign_mask;
8285
8286 add_val = INTVAL (bl->biv->add_val);
8287 comparison_value = XEXP (comparison, 1);
8288 if (GET_MODE (comparison_value) == VOIDmode)
8289 comparison_const_width
8290 = GET_MODE_BITSIZE (GET_MODE (XEXP (comparison, 0)));
8291 else
8292 comparison_const_width
8293 = GET_MODE_BITSIZE (GET_MODE (comparison_value));
8294 if (comparison_const_width > HOST_BITS_PER_WIDE_INT)
8295 comparison_const_width = HOST_BITS_PER_WIDE_INT;
8296 comparison_sign_mask
8297 = (unsigned HOST_WIDE_INT) 1 << (comparison_const_width - 1);
8298
8299 /* If the comparison value is not a loop invariant, then we
8300 can not reverse this loop.
8301
8302 ??? If the insns which initialize the comparison value as
8303 a whole compute an invariant result, then we could move
8304 them out of the loop and proceed with loop reversal. */
8305 if (! loop_invariant_p (loop, comparison_value))
8306 return 0;
8307
8308 if (GET_CODE (comparison_value) == CONST_INT)
8309 comparison_val = INTVAL (comparison_value);
8310 initial_value = bl->initial_value;
8311
8312 /* Normalize the initial value if it is an integer and
8313 has no other use except as a counter. This will allow
8314 a few more loops to be reversed. */
8315 if (no_use_except_counting
8316 && GET_CODE (comparison_value) == CONST_INT
8317 && GET_CODE (initial_value) == CONST_INT)
8318 {
8319 comparison_val = comparison_val - INTVAL (bl->initial_value);
8320 /* The code below requires comparison_val to be a multiple
8321 of add_val in order to do the loop reversal, so
8322 round up comparison_val to a multiple of add_val.
8323 Since comparison_value is constant, we know that the
8324 current comparison code is LT. */
8325 comparison_val = comparison_val + add_val - 1;
8326 comparison_val
8327 -= (unsigned HOST_WIDE_INT) comparison_val % add_val;
8328 /* We postpone overflow checks for COMPARISON_VAL here;
8329 even if there is an overflow, we might still be able to
8330 reverse the loop, if converting the loop exit test to
8331 NE is possible. */
8332 initial_value = const0_rtx;
8333 }
8334
8335 /* First check if we can do a vanilla loop reversal. */
8336 if (initial_value == const0_rtx
8337 /* If we have a decrement_and_branch_on_count,
8338 prefer the NE test, since this will allow that
8339 instruction to be generated. Note that we must
8340 use a vanilla loop reversal if the biv is used to
8341 calculate a giv or has a non-counting use. */
8342 #if ! defined (HAVE_decrement_and_branch_until_zero) \
8343 && defined (HAVE_decrement_and_branch_on_count)
8344 && (! (add_val == 1 && loop->vtop
8345 && (bl->biv_count == 0
8346 || no_use_except_counting)))
8347 #endif
8348 && GET_CODE (comparison_value) == CONST_INT
8349 /* Now do postponed overflow checks on COMPARISON_VAL. */
8350 && ! (((comparison_val - add_val) ^ INTVAL (comparison_value))
8351 & comparison_sign_mask))
8352 {
8353 /* Register will always be nonnegative, with value
8354 0 on last iteration */
8355 add_adjust = add_val;
8356 nonneg = 1;
8357 cmp_code = GE;
8358 }
8359 else if (add_val == 1 && loop->vtop
8360 && (bl->biv_count == 0
8361 || no_use_except_counting))
8362 {
8363 add_adjust = 0;
8364 cmp_code = NE;
8365 }
8366 else
8367 return 0;
8368
8369 if (GET_CODE (comparison) == LE)
8370 add_adjust -= add_val;
8371
8372 /* If the initial value is not zero, or if the comparison
8373 value is not an exact multiple of the increment, then we
8374 can not reverse this loop. */
8375 if (initial_value == const0_rtx
8376 && GET_CODE (comparison_value) == CONST_INT)
8377 {
8378 if (((unsigned HOST_WIDE_INT) comparison_val % add_val) != 0)
8379 return 0;
8380 }
8381 else
8382 {
8383 if (! no_use_except_counting || add_val != 1)
8384 return 0;
8385 }
8386
8387 final_value = comparison_value;
8388
8389 /* Reset these in case we normalized the initial value
8390 and comparison value above. */
8391 if (GET_CODE (comparison_value) == CONST_INT
8392 && GET_CODE (initial_value) == CONST_INT)
8393 {
8394 comparison_value = GEN_INT (comparison_val);
8395 final_value
8396 = GEN_INT (comparison_val + INTVAL (bl->initial_value));
8397 }
8398 bl->initial_value = initial_value;
8399
8400 /* Save some info needed to produce the new insns. */
8401 reg = bl->biv->dest_reg;
8402 jump_label = condjump_label (PREV_INSN (loop_end));
8403 new_add_val = GEN_INT (-INTVAL (bl->biv->add_val));
8404
8405 /* Set start_value; if this is not a CONST_INT, we need
8406 to generate a SUB.
8407 Initialize biv to start_value before loop start.
8408 The old initializing insn will be deleted as a
8409 dead store by flow.c. */
8410 if (initial_value == const0_rtx
8411 && GET_CODE (comparison_value) == CONST_INT)
8412 {
8413 start_value = GEN_INT (comparison_val - add_adjust);
8414 loop_insn_hoist (loop, gen_move_insn (reg, start_value));
8415 }
8416 else if (GET_CODE (initial_value) == CONST_INT)
8417 {
8418 enum machine_mode mode = GET_MODE (reg);
8419 rtx offset = GEN_INT (-INTVAL (initial_value) - add_adjust);
8420 rtx add_insn = gen_add3_insn (reg, comparison_value, offset);
8421
8422 if (add_insn == 0)
8423 return 0;
8424
8425 start_value
8426 = gen_rtx_PLUS (mode, comparison_value, offset);
8427 loop_insn_hoist (loop, add_insn);
8428 if (GET_CODE (comparison) == LE)
8429 final_value = gen_rtx_PLUS (mode, comparison_value,
8430 GEN_INT (add_val));
8431 }
8432 else if (! add_adjust)
8433 {
8434 enum machine_mode mode = GET_MODE (reg);
8435 rtx sub_insn = gen_sub3_insn (reg, comparison_value,
8436 initial_value);
8437
8438 if (sub_insn == 0)
8439 return 0;
8440 start_value
8441 = gen_rtx_MINUS (mode, comparison_value, initial_value);
8442 loop_insn_hoist (loop, sub_insn);
8443 }
8444 else
8445 /* We could handle the other cases too, but it'll be
8446 better to have a testcase first. */
8447 return 0;
8448
8449 /* We may not have a single insn which can increment a reg, so
8450 create a sequence to hold all the insns from expand_inc. */
8451 start_sequence ();
8452 expand_inc (reg, new_add_val);
8453 tem = get_insns ();
8454 end_sequence ();
8455
8456 p = loop_insn_emit_before (loop, 0, bl->biv->insn, tem);
8457 delete_insn (bl->biv->insn);
8458
8459 /* Update biv info to reflect its new status. */
8460 bl->biv->insn = p;
8461 bl->initial_value = start_value;
8462 bl->biv->add_val = new_add_val;
8463
8464 /* Update loop info. */
8465 loop_info->initial_value = reg;
8466 loop_info->initial_equiv_value = reg;
8467 loop_info->final_value = const0_rtx;
8468 loop_info->final_equiv_value = const0_rtx;
8469 loop_info->comparison_value = const0_rtx;
8470 loop_info->comparison_code = cmp_code;
8471 loop_info->increment = new_add_val;
8472
8473 /* Inc LABEL_NUSES so that delete_insn will
8474 not delete the label. */
8475 LABEL_NUSES (XEXP (jump_label, 0))++;
8476
8477 /* Emit an insn after the end of the loop to set the biv's
8478 proper exit value if it is used anywhere outside the loop. */
8479 if ((REGNO_LAST_UID (bl->regno) != INSN_UID (first_compare))
8480 || ! bl->init_insn
8481 || REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn))
8482 loop_insn_sink (loop, gen_load_of_final_value (reg, final_value));
8483
8484 /* Delete compare/branch at end of loop. */
8485 delete_related_insns (PREV_INSN (loop_end));
8486 if (compare_and_branch == 2)
8487 delete_related_insns (first_compare);
8488
8489 /* Add new compare/branch insn at end of loop. */
8490 start_sequence ();
8491 emit_cmp_and_jump_insns (reg, const0_rtx, cmp_code, NULL_RTX,
8492 GET_MODE (reg), 0,
8493 XEXP (jump_label, 0));
8494 tem = get_insns ();
8495 end_sequence ();
8496 emit_jump_insn_before (tem, loop_end);
8497
8498 for (tem = PREV_INSN (loop_end);
8499 tem && GET_CODE (tem) != JUMP_INSN;
8500 tem = PREV_INSN (tem))
8501 ;
8502
8503 if (tem)
8504 JUMP_LABEL (tem) = XEXP (jump_label, 0);
8505
8506 if (nonneg)
8507 {
8508 if (tem)
8509 {
8510 /* Increment of LABEL_NUSES done above. */
8511 /* Register is now always nonnegative,
8512 so add REG_NONNEG note to the branch. */
8513 REG_NOTES (tem) = gen_rtx_EXPR_LIST (REG_NONNEG, reg,
8514 REG_NOTES (tem));
8515 }
8516 bl->nonneg = 1;
8517 }
8518
8519 /* No insn may reference both the reversed and another biv or it
8520 will fail (see comment near the top of the loop reversal
8521 code).
8522 Earlier on, we have verified that the biv has no use except
8523 counting, or it is the only biv in this function.
8524 However, the code that computes no_use_except_counting does
8525 not verify reg notes. It's possible to have an insn that
8526 references another biv, and has a REG_EQUAL note with an
8527 expression based on the reversed biv. To avoid this case,
8528 remove all REG_EQUAL notes based on the reversed biv
8529 here. */
8530 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8531 if (INSN_P (p))
8532 {
8533 rtx *pnote;
8534 rtx set = single_set (p);
8535 /* If this is a set of a GIV based on the reversed biv, any
8536 REG_EQUAL notes should still be correct. */
8537 if (! set
8538 || GET_CODE (SET_DEST (set)) != REG
8539 || (size_t) REGNO (SET_DEST (set)) >= ivs->n_regs
8540 || REG_IV_TYPE (ivs, REGNO (SET_DEST (set))) != GENERAL_INDUCT
8541 || REG_IV_INFO (ivs, REGNO (SET_DEST (set)))->src_reg != bl->biv->src_reg)
8542 for (pnote = &REG_NOTES (p); *pnote;)
8543 {
8544 if (REG_NOTE_KIND (*pnote) == REG_EQUAL
8545 && reg_mentioned_p (regno_reg_rtx[bl->regno],
8546 XEXP (*pnote, 0)))
8547 *pnote = XEXP (*pnote, 1);
8548 else
8549 pnote = &XEXP (*pnote, 1);
8550 }
8551 }
8552
8553 /* Mark that this biv has been reversed. Each giv which depends
8554 on this biv, and which is also live past the end of the loop
8555 will have to be fixed up. */
8556
8557 bl->reversed = 1;
8558
8559 if (loop_dump_stream)
8560 {
8561 fprintf (loop_dump_stream, "Reversed loop");
8562 if (bl->nonneg)
8563 fprintf (loop_dump_stream, " and added reg_nonneg\n");
8564 else
8565 fprintf (loop_dump_stream, "\n");
8566 }
8567
8568 return 1;
8569 }
8570 }
8571 }
8572
8573 return 0;
8574 }
8575 \f
8576 /* Verify whether the biv BL appears to be eliminable,
8577 based on the insns in the loop that refer to it.
8578
8579 If ELIMINATE_P is non-zero, actually do the elimination.
8580
8581 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
8582 determine whether invariant insns should be placed inside or at the
8583 start of the loop. */
8584
8585 static int
8586 maybe_eliminate_biv (loop, bl, eliminate_p, threshold, insn_count)
8587 const struct loop *loop;
8588 struct iv_class *bl;
8589 int eliminate_p;
8590 int threshold, insn_count;
8591 {
8592 struct loop_ivs *ivs = LOOP_IVS (loop);
8593 rtx reg = bl->biv->dest_reg;
8594 rtx p;
8595
8596 /* Scan all insns in the loop, stopping if we find one that uses the
8597 biv in a way that we cannot eliminate. */
8598
8599 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
8600 {
8601 enum rtx_code code = GET_CODE (p);
8602 basic_block where_bb = 0;
8603 rtx where_insn = threshold >= insn_count ? 0 : p;
8604
8605 /* If this is a libcall that sets a giv, skip ahead to its end. */
8606 if (GET_RTX_CLASS (code) == 'i')
8607 {
8608 rtx note = find_reg_note (p, REG_LIBCALL, NULL_RTX);
8609
8610 if (note)
8611 {
8612 rtx last = XEXP (note, 0);
8613 rtx set = single_set (last);
8614
8615 if (set && GET_CODE (SET_DEST (set)) == REG)
8616 {
8617 unsigned int regno = REGNO (SET_DEST (set));
8618
8619 if (regno < ivs->n_regs
8620 && REG_IV_TYPE (ivs, regno) == GENERAL_INDUCT
8621 && REG_IV_INFO (ivs, regno)->src_reg == bl->biv->src_reg)
8622 p = last;
8623 }
8624 }
8625 }
8626 if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
8627 && reg_mentioned_p (reg, PATTERN (p))
8628 && ! maybe_eliminate_biv_1 (loop, PATTERN (p), p, bl,
8629 eliminate_p, where_bb, where_insn))
8630 {
8631 if (loop_dump_stream)
8632 fprintf (loop_dump_stream,
8633 "Cannot eliminate biv %d: biv used in insn %d.\n",
8634 bl->regno, INSN_UID (p));
8635 break;
8636 }
8637 }
8638
8639 if (p == loop->end)
8640 {
8641 if (loop_dump_stream)
8642 fprintf (loop_dump_stream, "biv %d %s eliminated.\n",
8643 bl->regno, eliminate_p ? "was" : "can be");
8644 return 1;
8645 }
8646
8647 return 0;
8648 }
8649 \f
8650 /* INSN and REFERENCE are instructions in the same insn chain.
8651 Return non-zero if INSN is first. */
8652
8653 int
8654 loop_insn_first_p (insn, reference)
8655 rtx insn, reference;
8656 {
8657 rtx p, q;
8658
8659 for (p = insn, q = reference;;)
8660 {
8661 /* Start with test for not first so that INSN == REFERENCE yields not
8662 first. */
8663 if (q == insn || ! p)
8664 return 0;
8665 if (p == reference || ! q)
8666 return 1;
8667
8668 /* Either of P or Q might be a NOTE. Notes have the same LUID as the
8669 previous insn, hence the <= comparison below does not work if
8670 P is a note. */
8671 if (INSN_UID (p) < max_uid_for_loop
8672 && INSN_UID (q) < max_uid_for_loop
8673 && GET_CODE (p) != NOTE)
8674 return INSN_LUID (p) <= INSN_LUID (q);
8675
8676 if (INSN_UID (p) >= max_uid_for_loop
8677 || GET_CODE (p) == NOTE)
8678 p = NEXT_INSN (p);
8679 if (INSN_UID (q) >= max_uid_for_loop)
8680 q = NEXT_INSN (q);
8681 }
8682 }
8683
8684 /* We are trying to eliminate BIV in INSN using GIV. Return non-zero if
8685 the offset that we have to take into account due to auto-increment /
8686 div derivation is zero. */
8687 static int
8688 biv_elimination_giv_has_0_offset (biv, giv, insn)
8689 struct induction *biv, *giv;
8690 rtx insn;
8691 {
8692 /* If the giv V had the auto-inc address optimization applied
8693 to it, and INSN occurs between the giv insn and the biv
8694 insn, then we'd have to adjust the value used here.
8695 This is rare, so we don't bother to make this possible. */
8696 if (giv->auto_inc_opt
8697 && ((loop_insn_first_p (giv->insn, insn)
8698 && loop_insn_first_p (insn, biv->insn))
8699 || (loop_insn_first_p (biv->insn, insn)
8700 && loop_insn_first_p (insn, giv->insn))))
8701 return 0;
8702
8703 return 1;
8704 }
8705
8706 /* If BL appears in X (part of the pattern of INSN), see if we can
8707 eliminate its use. If so, return 1. If not, return 0.
8708
8709 If BIV does not appear in X, return 1.
8710
8711 If ELIMINATE_P is non-zero, actually do the elimination.
8712 WHERE_INSN/WHERE_BB indicate where extra insns should be added.
8713 Depending on how many items have been moved out of the loop, it
8714 will either be before INSN (when WHERE_INSN is non-zero) or at the
8715 start of the loop (when WHERE_INSN is zero). */
8716
8717 static int
8718 maybe_eliminate_biv_1 (loop, x, insn, bl, eliminate_p, where_bb, where_insn)
8719 const struct loop *loop;
8720 rtx x, insn;
8721 struct iv_class *bl;
8722 int eliminate_p;
8723 basic_block where_bb;
8724 rtx where_insn;
8725 {
8726 enum rtx_code code = GET_CODE (x);
8727 rtx reg = bl->biv->dest_reg;
8728 enum machine_mode mode = GET_MODE (reg);
8729 struct induction *v;
8730 rtx arg, tem;
8731 #ifdef HAVE_cc0
8732 rtx new;
8733 #endif
8734 int arg_operand;
8735 const char *fmt;
8736 int i, j;
8737
8738 switch (code)
8739 {
8740 case REG:
8741 /* If we haven't already been able to do something with this BIV,
8742 we can't eliminate it. */
8743 if (x == reg)
8744 return 0;
8745 return 1;
8746
8747 case SET:
8748 /* If this sets the BIV, it is not a problem. */
8749 if (SET_DEST (x) == reg)
8750 return 1;
8751
8752 /* If this is an insn that defines a giv, it is also ok because
8753 it will go away when the giv is reduced. */
8754 for (v = bl->giv; v; v = v->next_iv)
8755 if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg)
8756 return 1;
8757
8758 #ifdef HAVE_cc0
8759 if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg)
8760 {
8761 /* Can replace with any giv that was reduced and
8762 that has (MULT_VAL != 0) and (ADD_VAL == 0).
8763 Require a constant for MULT_VAL, so we know it's nonzero.
8764 ??? We disable this optimization to avoid potential
8765 overflows. */
8766
8767 for (v = bl->giv; v; v = v->next_iv)
8768 if (GET_CODE (v->mult_val) == CONST_INT && v->mult_val != const0_rtx
8769 && v->add_val == const0_rtx
8770 && ! v->ignore && ! v->maybe_dead && v->always_computable
8771 && v->mode == mode
8772 && 0)
8773 {
8774 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8775 continue;
8776
8777 if (! eliminate_p)
8778 return 1;
8779
8780 /* If the giv has the opposite direction of change,
8781 then reverse the comparison. */
8782 if (INTVAL (v->mult_val) < 0)
8783 new = gen_rtx_COMPARE (GET_MODE (v->new_reg),
8784 const0_rtx, v->new_reg);
8785 else
8786 new = v->new_reg;
8787
8788 /* We can probably test that giv's reduced reg. */
8789 if (validate_change (insn, &SET_SRC (x), new, 0))
8790 return 1;
8791 }
8792
8793 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
8794 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
8795 Require a constant for MULT_VAL, so we know it's nonzero.
8796 ??? Do this only if ADD_VAL is a pointer to avoid a potential
8797 overflow problem. */
8798
8799 for (v = bl->giv; v; v = v->next_iv)
8800 if (GET_CODE (v->mult_val) == CONST_INT
8801 && v->mult_val != const0_rtx
8802 && ! v->ignore && ! v->maybe_dead && v->always_computable
8803 && v->mode == mode
8804 && (GET_CODE (v->add_val) == SYMBOL_REF
8805 || GET_CODE (v->add_val) == LABEL_REF
8806 || GET_CODE (v->add_val) == CONST
8807 || (GET_CODE (v->add_val) == REG
8808 && REG_POINTER (v->add_val))))
8809 {
8810 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8811 continue;
8812
8813 if (! eliminate_p)
8814 return 1;
8815
8816 /* If the giv has the opposite direction of change,
8817 then reverse the comparison. */
8818 if (INTVAL (v->mult_val) < 0)
8819 new = gen_rtx_COMPARE (VOIDmode, copy_rtx (v->add_val),
8820 v->new_reg);
8821 else
8822 new = gen_rtx_COMPARE (VOIDmode, v->new_reg,
8823 copy_rtx (v->add_val));
8824
8825 /* Replace biv with the giv's reduced register. */
8826 update_reg_last_use (v->add_val, insn);
8827 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
8828 return 1;
8829
8830 /* Insn doesn't support that constant or invariant. Copy it
8831 into a register (it will be a loop invariant.) */
8832 tem = gen_reg_rtx (GET_MODE (v->new_reg));
8833
8834 loop_insn_emit_before (loop, 0, where_insn,
8835 gen_move_insn (tem,
8836 copy_rtx (v->add_val)));
8837
8838 /* Substitute the new register for its invariant value in
8839 the compare expression. */
8840 XEXP (new, (INTVAL (v->mult_val) < 0) ? 0 : 1) = tem;
8841 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
8842 return 1;
8843 }
8844 }
8845 #endif
8846 break;
8847
8848 case COMPARE:
8849 case EQ: case NE:
8850 case GT: case GE: case GTU: case GEU:
8851 case LT: case LE: case LTU: case LEU:
8852 /* See if either argument is the biv. */
8853 if (XEXP (x, 0) == reg)
8854 arg = XEXP (x, 1), arg_operand = 1;
8855 else if (XEXP (x, 1) == reg)
8856 arg = XEXP (x, 0), arg_operand = 0;
8857 else
8858 break;
8859
8860 if (CONSTANT_P (arg))
8861 {
8862 /* First try to replace with any giv that has constant positive
8863 mult_val and constant add_val. We might be able to support
8864 negative mult_val, but it seems complex to do it in general. */
8865
8866 for (v = bl->giv; v; v = v->next_iv)
8867 if (GET_CODE (v->mult_val) == CONST_INT
8868 && INTVAL (v->mult_val) > 0
8869 && (GET_CODE (v->add_val) == SYMBOL_REF
8870 || GET_CODE (v->add_val) == LABEL_REF
8871 || GET_CODE (v->add_val) == CONST
8872 || (GET_CODE (v->add_val) == REG
8873 && REG_POINTER (v->add_val)))
8874 && ! v->ignore && ! v->maybe_dead && v->always_computable
8875 && v->mode == mode)
8876 {
8877 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8878 continue;
8879
8880 /* Don't eliminate if the linear combination that makes up
8881 the giv overflows when it is applied to ARG. */
8882 if (GET_CODE (arg) == CONST_INT)
8883 {
8884 rtx add_val;
8885
8886 if (GET_CODE (v->add_val) == CONST_INT)
8887 add_val = v->add_val;
8888 else
8889 add_val = const0_rtx;
8890
8891 if (const_mult_add_overflow_p (arg, v->mult_val,
8892 add_val, mode, 1))
8893 continue;
8894 }
8895
8896 if (! eliminate_p)
8897 return 1;
8898
8899 /* Replace biv with the giv's reduced reg. */
8900 validate_change (insn, &XEXP (x, 1 - arg_operand), v->new_reg, 1);
8901
8902 /* If all constants are actually constant integers and
8903 the derived constant can be directly placed in the COMPARE,
8904 do so. */
8905 if (GET_CODE (arg) == CONST_INT
8906 && GET_CODE (v->add_val) == CONST_INT)
8907 {
8908 tem = expand_mult_add (arg, NULL_RTX, v->mult_val,
8909 v->add_val, mode, 1);
8910 }
8911 else
8912 {
8913 /* Otherwise, load it into a register. */
8914 tem = gen_reg_rtx (mode);
8915 loop_iv_add_mult_emit_before (loop, arg,
8916 v->mult_val, v->add_val,
8917 tem, where_bb, where_insn);
8918 }
8919
8920 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8921
8922 if (apply_change_group ())
8923 return 1;
8924 }
8925
8926 /* Look for giv with positive constant mult_val and nonconst add_val.
8927 Insert insns to calculate new compare value.
8928 ??? Turn this off due to possible overflow. */
8929
8930 for (v = bl->giv; v; v = v->next_iv)
8931 if (GET_CODE (v->mult_val) == CONST_INT
8932 && INTVAL (v->mult_val) > 0
8933 && ! v->ignore && ! v->maybe_dead && v->always_computable
8934 && v->mode == mode
8935 && 0)
8936 {
8937 rtx tem;
8938
8939 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8940 continue;
8941
8942 if (! eliminate_p)
8943 return 1;
8944
8945 tem = gen_reg_rtx (mode);
8946
8947 /* Replace biv with giv's reduced register. */
8948 validate_change (insn, &XEXP (x, 1 - arg_operand),
8949 v->new_reg, 1);
8950
8951 /* Compute value to compare against. */
8952 loop_iv_add_mult_emit_before (loop, arg,
8953 v->mult_val, v->add_val,
8954 tem, where_bb, where_insn);
8955 /* Use it in this insn. */
8956 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8957 if (apply_change_group ())
8958 return 1;
8959 }
8960 }
8961 else if (GET_CODE (arg) == REG || GET_CODE (arg) == MEM)
8962 {
8963 if (loop_invariant_p (loop, arg) == 1)
8964 {
8965 /* Look for giv with constant positive mult_val and nonconst
8966 add_val. Insert insns to compute new compare value.
8967 ??? Turn this off due to possible overflow. */
8968
8969 for (v = bl->giv; v; v = v->next_iv)
8970 if (GET_CODE (v->mult_val) == CONST_INT && INTVAL (v->mult_val) > 0
8971 && ! v->ignore && ! v->maybe_dead && v->always_computable
8972 && v->mode == mode
8973 && 0)
8974 {
8975 rtx tem;
8976
8977 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8978 continue;
8979
8980 if (! eliminate_p)
8981 return 1;
8982
8983 tem = gen_reg_rtx (mode);
8984
8985 /* Replace biv with giv's reduced register. */
8986 validate_change (insn, &XEXP (x, 1 - arg_operand),
8987 v->new_reg, 1);
8988
8989 /* Compute value to compare against. */
8990 loop_iv_add_mult_emit_before (loop, arg,
8991 v->mult_val, v->add_val,
8992 tem, where_bb, where_insn);
8993 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8994 if (apply_change_group ())
8995 return 1;
8996 }
8997 }
8998
8999 /* This code has problems. Basically, you can't know when
9000 seeing if we will eliminate BL, whether a particular giv
9001 of ARG will be reduced. If it isn't going to be reduced,
9002 we can't eliminate BL. We can try forcing it to be reduced,
9003 but that can generate poor code.
9004
9005 The problem is that the benefit of reducing TV, below should
9006 be increased if BL can actually be eliminated, but this means
9007 we might have to do a topological sort of the order in which
9008 we try to process biv. It doesn't seem worthwhile to do
9009 this sort of thing now. */
9010
9011 #if 0
9012 /* Otherwise the reg compared with had better be a biv. */
9013 if (GET_CODE (arg) != REG
9014 || REG_IV_TYPE (ivs, REGNO (arg)) != BASIC_INDUCT)
9015 return 0;
9016
9017 /* Look for a pair of givs, one for each biv,
9018 with identical coefficients. */
9019 for (v = bl->giv; v; v = v->next_iv)
9020 {
9021 struct induction *tv;
9022
9023 if (v->ignore || v->maybe_dead || v->mode != mode)
9024 continue;
9025
9026 for (tv = REG_IV_CLASS (ivs, REGNO (arg))->giv; tv;
9027 tv = tv->next_iv)
9028 if (! tv->ignore && ! tv->maybe_dead
9029 && rtx_equal_p (tv->mult_val, v->mult_val)
9030 && rtx_equal_p (tv->add_val, v->add_val)
9031 && tv->mode == mode)
9032 {
9033 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
9034 continue;
9035
9036 if (! eliminate_p)
9037 return 1;
9038
9039 /* Replace biv with its giv's reduced reg. */
9040 XEXP (x, 1 - arg_operand) = v->new_reg;
9041 /* Replace other operand with the other giv's
9042 reduced reg. */
9043 XEXP (x, arg_operand) = tv->new_reg;
9044 return 1;
9045 }
9046 }
9047 #endif
9048 }
9049
9050 /* If we get here, the biv can't be eliminated. */
9051 return 0;
9052
9053 case MEM:
9054 /* If this address is a DEST_ADDR giv, it doesn't matter if the
9055 biv is used in it, since it will be replaced. */
9056 for (v = bl->giv; v; v = v->next_iv)
9057 if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0))
9058 return 1;
9059 break;
9060
9061 default:
9062 break;
9063 }
9064
9065 /* See if any subexpression fails elimination. */
9066 fmt = GET_RTX_FORMAT (code);
9067 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
9068 {
9069 switch (fmt[i])
9070 {
9071 case 'e':
9072 if (! maybe_eliminate_biv_1 (loop, XEXP (x, i), insn, bl,
9073 eliminate_p, where_bb, where_insn))
9074 return 0;
9075 break;
9076
9077 case 'E':
9078 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9079 if (! maybe_eliminate_biv_1 (loop, XVECEXP (x, i, j), insn, bl,
9080 eliminate_p, where_bb, where_insn))
9081 return 0;
9082 break;
9083 }
9084 }
9085
9086 return 1;
9087 }
9088 \f
9089 /* Return nonzero if the last use of REG
9090 is in an insn following INSN in the same basic block. */
9091
9092 static int
9093 last_use_this_basic_block (reg, insn)
9094 rtx reg;
9095 rtx insn;
9096 {
9097 rtx n;
9098 for (n = insn;
9099 n && GET_CODE (n) != CODE_LABEL && GET_CODE (n) != JUMP_INSN;
9100 n = NEXT_INSN (n))
9101 {
9102 if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (n))
9103 return 1;
9104 }
9105 return 0;
9106 }
9107 \f
9108 /* Called via `note_stores' to record the initial value of a biv. Here we
9109 just record the location of the set and process it later. */
9110
9111 static void
9112 record_initial (dest, set, data)
9113 rtx dest;
9114 rtx set;
9115 void *data ATTRIBUTE_UNUSED;
9116 {
9117 struct loop_ivs *ivs = (struct loop_ivs *) data;
9118 struct iv_class *bl;
9119
9120 if (GET_CODE (dest) != REG
9121 || REGNO (dest) >= ivs->n_regs
9122 || REG_IV_TYPE (ivs, REGNO (dest)) != BASIC_INDUCT)
9123 return;
9124
9125 bl = REG_IV_CLASS (ivs, REGNO (dest));
9126
9127 /* If this is the first set found, record it. */
9128 if (bl->init_insn == 0)
9129 {
9130 bl->init_insn = note_insn;
9131 bl->init_set = set;
9132 }
9133 }
9134 \f
9135 /* If any of the registers in X are "old" and currently have a last use earlier
9136 than INSN, update them to have a last use of INSN. Their actual last use
9137 will be the previous insn but it will not have a valid uid_luid so we can't
9138 use it. X must be a source expression only. */
9139
9140 static void
9141 update_reg_last_use (x, insn)
9142 rtx x;
9143 rtx insn;
9144 {
9145 /* Check for the case where INSN does not have a valid luid. In this case,
9146 there is no need to modify the regno_last_uid, as this can only happen
9147 when code is inserted after the loop_end to set a pseudo's final value,
9148 and hence this insn will never be the last use of x.
9149 ???? This comment is not correct. See for example loop_givs_reduce.
9150 This may insert an insn before another new insn. */
9151 if (GET_CODE (x) == REG && REGNO (x) < max_reg_before_loop
9152 && INSN_UID (insn) < max_uid_for_loop
9153 && REGNO_LAST_LUID (REGNO (x)) < INSN_LUID (insn))
9154 {
9155 REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn);
9156 }
9157 else
9158 {
9159 int i, j;
9160 const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
9161 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
9162 {
9163 if (fmt[i] == 'e')
9164 update_reg_last_use (XEXP (x, i), insn);
9165 else if (fmt[i] == 'E')
9166 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9167 update_reg_last_use (XVECEXP (x, i, j), insn);
9168 }
9169 }
9170 }
9171 \f
9172 /* Given an insn INSN and condition COND, return the condition in a
9173 canonical form to simplify testing by callers. Specifically:
9174
9175 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
9176 (2) Both operands will be machine operands; (cc0) will have been replaced.
9177 (3) If an operand is a constant, it will be the second operand.
9178 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
9179 for GE, GEU, and LEU.
9180
9181 If the condition cannot be understood, or is an inequality floating-point
9182 comparison which needs to be reversed, 0 will be returned.
9183
9184 If REVERSE is non-zero, then reverse the condition prior to canonizing it.
9185
9186 If EARLIEST is non-zero, it is a pointer to a place where the earliest
9187 insn used in locating the condition was found. If a replacement test
9188 of the condition is desired, it should be placed in front of that
9189 insn and we will be sure that the inputs are still valid.
9190
9191 If WANT_REG is non-zero, we wish the condition to be relative to that
9192 register, if possible. Therefore, do not canonicalize the condition
9193 further. */
9194
9195 rtx
9196 canonicalize_condition (insn, cond, reverse, earliest, want_reg)
9197 rtx insn;
9198 rtx cond;
9199 int reverse;
9200 rtx *earliest;
9201 rtx want_reg;
9202 {
9203 enum rtx_code code;
9204 rtx prev = insn;
9205 rtx set;
9206 rtx tem;
9207 rtx op0, op1;
9208 int reverse_code = 0;
9209 enum machine_mode mode;
9210
9211 code = GET_CODE (cond);
9212 mode = GET_MODE (cond);
9213 op0 = XEXP (cond, 0);
9214 op1 = XEXP (cond, 1);
9215
9216 if (reverse)
9217 code = reversed_comparison_code (cond, insn);
9218 if (code == UNKNOWN)
9219 return 0;
9220
9221 if (earliest)
9222 *earliest = insn;
9223
9224 /* If we are comparing a register with zero, see if the register is set
9225 in the previous insn to a COMPARE or a comparison operation. Perform
9226 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
9227 in cse.c */
9228
9229 while (GET_RTX_CLASS (code) == '<'
9230 && op1 == CONST0_RTX (GET_MODE (op0))
9231 && op0 != want_reg)
9232 {
9233 /* Set non-zero when we find something of interest. */
9234 rtx x = 0;
9235
9236 #ifdef HAVE_cc0
9237 /* If comparison with cc0, import actual comparison from compare
9238 insn. */
9239 if (op0 == cc0_rtx)
9240 {
9241 if ((prev = prev_nonnote_insn (prev)) == 0
9242 || GET_CODE (prev) != INSN
9243 || (set = single_set (prev)) == 0
9244 || SET_DEST (set) != cc0_rtx)
9245 return 0;
9246
9247 op0 = SET_SRC (set);
9248 op1 = CONST0_RTX (GET_MODE (op0));
9249 if (earliest)
9250 *earliest = prev;
9251 }
9252 #endif
9253
9254 /* If this is a COMPARE, pick up the two things being compared. */
9255 if (GET_CODE (op0) == COMPARE)
9256 {
9257 op1 = XEXP (op0, 1);
9258 op0 = XEXP (op0, 0);
9259 continue;
9260 }
9261 else if (GET_CODE (op0) != REG)
9262 break;
9263
9264 /* Go back to the previous insn. Stop if it is not an INSN. We also
9265 stop if it isn't a single set or if it has a REG_INC note because
9266 we don't want to bother dealing with it. */
9267
9268 if ((prev = prev_nonnote_insn (prev)) == 0
9269 || GET_CODE (prev) != INSN
9270 || FIND_REG_INC_NOTE (prev, NULL_RTX))
9271 break;
9272
9273 set = set_of (op0, prev);
9274
9275 if (set
9276 && (GET_CODE (set) != SET
9277 || !rtx_equal_p (SET_DEST (set), op0)))
9278 break;
9279
9280 /* If this is setting OP0, get what it sets it to if it looks
9281 relevant. */
9282 if (set)
9283 {
9284 enum machine_mode inner_mode = GET_MODE (SET_DEST (set));
9285
9286 /* ??? We may not combine comparisons done in a CCmode with
9287 comparisons not done in a CCmode. This is to aid targets
9288 like Alpha that have an IEEE compliant EQ instruction, and
9289 a non-IEEE compliant BEQ instruction. The use of CCmode is
9290 actually artificial, simply to prevent the combination, but
9291 should not affect other platforms.
9292
9293 However, we must allow VOIDmode comparisons to match either
9294 CCmode or non-CCmode comparison, because some ports have
9295 modeless comparisons inside branch patterns.
9296
9297 ??? This mode check should perhaps look more like the mode check
9298 in simplify_comparison in combine. */
9299
9300 if ((GET_CODE (SET_SRC (set)) == COMPARE
9301 || (((code == NE
9302 || (code == LT
9303 && GET_MODE_CLASS (inner_mode) == MODE_INT
9304 && (GET_MODE_BITSIZE (inner_mode)
9305 <= HOST_BITS_PER_WIDE_INT)
9306 && (STORE_FLAG_VALUE
9307 & ((HOST_WIDE_INT) 1
9308 << (GET_MODE_BITSIZE (inner_mode) - 1))))
9309 #ifdef FLOAT_STORE_FLAG_VALUE
9310 || (code == LT
9311 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
9312 && (REAL_VALUE_NEGATIVE
9313 (FLOAT_STORE_FLAG_VALUE (inner_mode))))
9314 #endif
9315 ))
9316 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'))
9317 && (((GET_MODE_CLASS (mode) == MODE_CC)
9318 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
9319 || mode == VOIDmode || inner_mode == VOIDmode))
9320 x = SET_SRC (set);
9321 else if (((code == EQ
9322 || (code == GE
9323 && (GET_MODE_BITSIZE (inner_mode)
9324 <= HOST_BITS_PER_WIDE_INT)
9325 && GET_MODE_CLASS (inner_mode) == MODE_INT
9326 && (STORE_FLAG_VALUE
9327 & ((HOST_WIDE_INT) 1
9328 << (GET_MODE_BITSIZE (inner_mode) - 1))))
9329 #ifdef FLOAT_STORE_FLAG_VALUE
9330 || (code == GE
9331 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
9332 && (REAL_VALUE_NEGATIVE
9333 (FLOAT_STORE_FLAG_VALUE (inner_mode))))
9334 #endif
9335 ))
9336 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'
9337 && (((GET_MODE_CLASS (mode) == MODE_CC)
9338 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
9339 || mode == VOIDmode || inner_mode == VOIDmode))
9340
9341 {
9342 reverse_code = 1;
9343 x = SET_SRC (set);
9344 }
9345 else
9346 break;
9347 }
9348
9349 else if (reg_set_p (op0, prev))
9350 /* If this sets OP0, but not directly, we have to give up. */
9351 break;
9352
9353 if (x)
9354 {
9355 if (GET_RTX_CLASS (GET_CODE (x)) == '<')
9356 code = GET_CODE (x);
9357 if (reverse_code)
9358 {
9359 code = reversed_comparison_code (x, prev);
9360 if (code == UNKNOWN)
9361 return 0;
9362 reverse_code = 0;
9363 }
9364
9365 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
9366 if (earliest)
9367 *earliest = prev;
9368 }
9369 }
9370
9371 /* If constant is first, put it last. */
9372 if (CONSTANT_P (op0))
9373 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
9374
9375 /* If OP0 is the result of a comparison, we weren't able to find what
9376 was really being compared, so fail. */
9377 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
9378 return 0;
9379
9380 /* Canonicalize any ordered comparison with integers involving equality
9381 if we can do computations in the relevant mode and we do not
9382 overflow. */
9383
9384 if (GET_CODE (op1) == CONST_INT
9385 && GET_MODE (op0) != VOIDmode
9386 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
9387 {
9388 HOST_WIDE_INT const_val = INTVAL (op1);
9389 unsigned HOST_WIDE_INT uconst_val = const_val;
9390 unsigned HOST_WIDE_INT max_val
9391 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
9392
9393 switch (code)
9394 {
9395 case LE:
9396 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
9397 code = LT, op1 = gen_int_mode (const_val + 1, GET_MODE (op0));
9398 break;
9399
9400 /* When cross-compiling, const_val might be sign-extended from
9401 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
9402 case GE:
9403 if ((HOST_WIDE_INT) (const_val & max_val)
9404 != (((HOST_WIDE_INT) 1
9405 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
9406 code = GT, op1 = gen_int_mode (const_val - 1, GET_MODE (op0));
9407 break;
9408
9409 case LEU:
9410 if (uconst_val < max_val)
9411 code = LTU, op1 = gen_int_mode (uconst_val + 1, GET_MODE (op0));
9412 break;
9413
9414 case GEU:
9415 if (uconst_val != 0)
9416 code = GTU, op1 = gen_int_mode (uconst_val - 1, GET_MODE (op0));
9417 break;
9418
9419 default:
9420 break;
9421 }
9422 }
9423
9424 #ifdef HAVE_cc0
9425 /* Never return CC0; return zero instead. */
9426 if (op0 == cc0_rtx)
9427 return 0;
9428 #endif
9429
9430 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
9431 }
9432
9433 /* Given a jump insn JUMP, return the condition that will cause it to branch
9434 to its JUMP_LABEL. If the condition cannot be understood, or is an
9435 inequality floating-point comparison which needs to be reversed, 0 will
9436 be returned.
9437
9438 If EARLIEST is non-zero, it is a pointer to a place where the earliest
9439 insn used in locating the condition was found. If a replacement test
9440 of the condition is desired, it should be placed in front of that
9441 insn and we will be sure that the inputs are still valid. */
9442
9443 rtx
9444 get_condition (jump, earliest)
9445 rtx jump;
9446 rtx *earliest;
9447 {
9448 rtx cond;
9449 int reverse;
9450 rtx set;
9451
9452 /* If this is not a standard conditional jump, we can't parse it. */
9453 if (GET_CODE (jump) != JUMP_INSN
9454 || ! any_condjump_p (jump))
9455 return 0;
9456 set = pc_set (jump);
9457
9458 cond = XEXP (SET_SRC (set), 0);
9459
9460 /* If this branches to JUMP_LABEL when the condition is false, reverse
9461 the condition. */
9462 reverse
9463 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
9464 && XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (jump);
9465
9466 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX);
9467 }
9468
9469 /* Similar to above routine, except that we also put an invariant last
9470 unless both operands are invariants. */
9471
9472 rtx
9473 get_condition_for_loop (loop, x)
9474 const struct loop *loop;
9475 rtx x;
9476 {
9477 rtx comparison = get_condition (x, (rtx*) 0);
9478
9479 if (comparison == 0
9480 || ! loop_invariant_p (loop, XEXP (comparison, 0))
9481 || loop_invariant_p (loop, XEXP (comparison, 1)))
9482 return comparison;
9483
9484 return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)), VOIDmode,
9485 XEXP (comparison, 1), XEXP (comparison, 0));
9486 }
9487
9488 /* Scan the function and determine whether it has indirect (computed) jumps.
9489
9490 This is taken mostly from flow.c; similar code exists elsewhere
9491 in the compiler. It may be useful to put this into rtlanal.c. */
9492 static int
9493 indirect_jump_in_function_p (start)
9494 rtx start;
9495 {
9496 rtx insn;
9497
9498 for (insn = start; insn; insn = NEXT_INSN (insn))
9499 if (computed_jump_p (insn))
9500 return 1;
9501
9502 return 0;
9503 }
9504
9505 /* Add MEM to the LOOP_MEMS array, if appropriate. See the
9506 documentation for LOOP_MEMS for the definition of `appropriate'.
9507 This function is called from prescan_loop via for_each_rtx. */
9508
9509 static int
9510 insert_loop_mem (mem, data)
9511 rtx *mem;
9512 void *data ATTRIBUTE_UNUSED;
9513 {
9514 struct loop_info *loop_info = data;
9515 int i;
9516 rtx m = *mem;
9517
9518 if (m == NULL_RTX)
9519 return 0;
9520
9521 switch (GET_CODE (m))
9522 {
9523 case MEM:
9524 break;
9525
9526 case CLOBBER:
9527 /* We're not interested in MEMs that are only clobbered. */
9528 return -1;
9529
9530 case CONST_DOUBLE:
9531 /* We're not interested in the MEM associated with a
9532 CONST_DOUBLE, so there's no need to traverse into this. */
9533 return -1;
9534
9535 case EXPR_LIST:
9536 /* We're not interested in any MEMs that only appear in notes. */
9537 return -1;
9538
9539 default:
9540 /* This is not a MEM. */
9541 return 0;
9542 }
9543
9544 /* See if we've already seen this MEM. */
9545 for (i = 0; i < loop_info->mems_idx; ++i)
9546 if (rtx_equal_p (m, loop_info->mems[i].mem))
9547 {
9548 if (GET_MODE (m) != GET_MODE (loop_info->mems[i].mem))
9549 /* The modes of the two memory accesses are different. If
9550 this happens, something tricky is going on, and we just
9551 don't optimize accesses to this MEM. */
9552 loop_info->mems[i].optimize = 0;
9553
9554 return 0;
9555 }
9556
9557 /* Resize the array, if necessary. */
9558 if (loop_info->mems_idx == loop_info->mems_allocated)
9559 {
9560 if (loop_info->mems_allocated != 0)
9561 loop_info->mems_allocated *= 2;
9562 else
9563 loop_info->mems_allocated = 32;
9564
9565 loop_info->mems = (loop_mem_info *)
9566 xrealloc (loop_info->mems,
9567 loop_info->mems_allocated * sizeof (loop_mem_info));
9568 }
9569
9570 /* Actually insert the MEM. */
9571 loop_info->mems[loop_info->mems_idx].mem = m;
9572 /* We can't hoist this MEM out of the loop if it's a BLKmode MEM
9573 because we can't put it in a register. We still store it in the
9574 table, though, so that if we see the same address later, but in a
9575 non-BLK mode, we'll not think we can optimize it at that point. */
9576 loop_info->mems[loop_info->mems_idx].optimize = (GET_MODE (m) != BLKmode);
9577 loop_info->mems[loop_info->mems_idx].reg = NULL_RTX;
9578 ++loop_info->mems_idx;
9579
9580 return 0;
9581 }
9582
9583
9584 /* Allocate REGS->ARRAY or reallocate it if it is too small.
9585
9586 Increment REGS->ARRAY[I].SET_IN_LOOP at the index I of each
9587 register that is modified by an insn between FROM and TO. If the
9588 value of an element of REGS->array[I].SET_IN_LOOP becomes 127 or
9589 more, stop incrementing it, to avoid overflow.
9590
9591 Store in REGS->ARRAY[I].SINGLE_USAGE the single insn in which
9592 register I is used, if it is only used once. Otherwise, it is set
9593 to 0 (for no uses) or const0_rtx for more than one use. This
9594 parameter may be zero, in which case this processing is not done.
9595
9596 Set REGS->ARRAY[I].MAY_NOT_OPTIMIZE nonzero if we should not
9597 optimize register I. */
9598
9599 static void
9600 loop_regs_scan (loop, extra_size)
9601 const struct loop *loop;
9602 int extra_size;
9603 {
9604 struct loop_regs *regs = LOOP_REGS (loop);
9605 int old_nregs;
9606 /* last_set[n] is nonzero iff reg n has been set in the current
9607 basic block. In that case, it is the insn that last set reg n. */
9608 rtx *last_set;
9609 rtx insn;
9610 int i;
9611
9612 old_nregs = regs->num;
9613 regs->num = max_reg_num ();
9614
9615 /* Grow the regs array if not allocated or too small. */
9616 if (regs->num >= regs->size)
9617 {
9618 regs->size = regs->num + extra_size;
9619
9620 regs->array = (struct loop_reg *)
9621 xrealloc (regs->array, regs->size * sizeof (*regs->array));
9622
9623 /* Zero the new elements. */
9624 memset (regs->array + old_nregs, 0,
9625 (regs->size - old_nregs) * sizeof (*regs->array));
9626 }
9627
9628 /* Clear previously scanned fields but do not clear n_times_set. */
9629 for (i = 0; i < old_nregs; i++)
9630 {
9631 regs->array[i].set_in_loop = 0;
9632 regs->array[i].may_not_optimize = 0;
9633 regs->array[i].single_usage = NULL_RTX;
9634 }
9635
9636 last_set = (rtx *) xcalloc (regs->num, sizeof (rtx));
9637
9638 /* Scan the loop, recording register usage. */
9639 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
9640 insn = NEXT_INSN (insn))
9641 {
9642 if (INSN_P (insn))
9643 {
9644 /* Record registers that have exactly one use. */
9645 find_single_use_in_loop (regs, insn, PATTERN (insn));
9646
9647 /* Include uses in REG_EQUAL notes. */
9648 if (REG_NOTES (insn))
9649 find_single_use_in_loop (regs, insn, REG_NOTES (insn));
9650
9651 if (GET_CODE (PATTERN (insn)) == SET
9652 || GET_CODE (PATTERN (insn)) == CLOBBER)
9653 count_one_set (regs, insn, PATTERN (insn), last_set);
9654 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
9655 {
9656 int i;
9657 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
9658 count_one_set (regs, insn, XVECEXP (PATTERN (insn), 0, i),
9659 last_set);
9660 }
9661 }
9662
9663 if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
9664 memset (last_set, 0, regs->num * sizeof (rtx));
9665 }
9666
9667 /* Invalidate all hard registers clobbered by calls. With one exception:
9668 a call-clobbered PIC register is still function-invariant for our
9669 purposes, since we can hoist any PIC calculations out of the loop.
9670 Thus the call to rtx_varies_p. */
9671 if (LOOP_INFO (loop)->has_call)
9672 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
9673 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i)
9674 && rtx_varies_p (regno_reg_rtx[i], 1))
9675 {
9676 regs->array[i].may_not_optimize = 1;
9677 regs->array[i].set_in_loop = 1;
9678 }
9679
9680 #ifdef AVOID_CCMODE_COPIES
9681 /* Don't try to move insns which set CC registers if we should not
9682 create CCmode register copies. */
9683 for (i = regs->num - 1; i >= FIRST_PSEUDO_REGISTER; i--)
9684 if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC)
9685 regs->array[i].may_not_optimize = 1;
9686 #endif
9687
9688 /* Set regs->array[I].n_times_set for the new registers. */
9689 for (i = old_nregs; i < regs->num; i++)
9690 regs->array[i].n_times_set = regs->array[i].set_in_loop;
9691
9692 free (last_set);
9693 }
9694
9695 /* Returns the number of real INSNs in the LOOP. */
9696
9697 static int
9698 count_insns_in_loop (loop)
9699 const struct loop *loop;
9700 {
9701 int count = 0;
9702 rtx insn;
9703
9704 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
9705 insn = NEXT_INSN (insn))
9706 if (INSN_P (insn))
9707 ++count;
9708
9709 return count;
9710 }
9711
9712 /* Move MEMs into registers for the duration of the loop. */
9713
9714 static void
9715 load_mems (loop)
9716 const struct loop *loop;
9717 {
9718 struct loop_info *loop_info = LOOP_INFO (loop);
9719 struct loop_regs *regs = LOOP_REGS (loop);
9720 int maybe_never = 0;
9721 int i;
9722 rtx p, prev_ebb_head;
9723 rtx label = NULL_RTX;
9724 rtx end_label;
9725 /* Nonzero if the next instruction may never be executed. */
9726 int next_maybe_never = 0;
9727 unsigned int last_max_reg = max_reg_num ();
9728
9729 if (loop_info->mems_idx == 0)
9730 return;
9731
9732 /* We cannot use next_label here because it skips over normal insns. */
9733 end_label = next_nonnote_insn (loop->end);
9734 if (end_label && GET_CODE (end_label) != CODE_LABEL)
9735 end_label = NULL_RTX;
9736
9737 /* Check to see if it's possible that some instructions in the loop are
9738 never executed. Also check if there is a goto out of the loop other
9739 than right after the end of the loop. */
9740 for (p = next_insn_in_loop (loop, loop->scan_start);
9741 p != NULL_RTX;
9742 p = next_insn_in_loop (loop, p))
9743 {
9744 if (GET_CODE (p) == CODE_LABEL)
9745 maybe_never = 1;
9746 else if (GET_CODE (p) == JUMP_INSN
9747 /* If we enter the loop in the middle, and scan
9748 around to the beginning, don't set maybe_never
9749 for that. This must be an unconditional jump,
9750 otherwise the code at the top of the loop might
9751 never be executed. Unconditional jumps are
9752 followed a by barrier then loop end. */
9753 && ! (GET_CODE (p) == JUMP_INSN
9754 && JUMP_LABEL (p) == loop->top
9755 && NEXT_INSN (NEXT_INSN (p)) == loop->end
9756 && any_uncondjump_p (p)))
9757 {
9758 /* If this is a jump outside of the loop but not right
9759 after the end of the loop, we would have to emit new fixup
9760 sequences for each such label. */
9761 if (/* If we can't tell where control might go when this
9762 JUMP_INSN is executed, we must be conservative. */
9763 !JUMP_LABEL (p)
9764 || (JUMP_LABEL (p) != end_label
9765 && (INSN_UID (JUMP_LABEL (p)) >= max_uid_for_loop
9766 || INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop->start)
9767 || INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop->end))))
9768 return;
9769
9770 if (!any_condjump_p (p))
9771 /* Something complicated. */
9772 maybe_never = 1;
9773 else
9774 /* If there are any more instructions in the loop, they
9775 might not be reached. */
9776 next_maybe_never = 1;
9777 }
9778 else if (next_maybe_never)
9779 maybe_never = 1;
9780 }
9781
9782 /* Find start of the extended basic block that enters the loop. */
9783 for (p = loop->start;
9784 PREV_INSN (p) && GET_CODE (p) != CODE_LABEL;
9785 p = PREV_INSN (p))
9786 ;
9787 prev_ebb_head = p;
9788
9789 cselib_init ();
9790
9791 /* Build table of mems that get set to constant values before the
9792 loop. */
9793 for (; p != loop->start; p = NEXT_INSN (p))
9794 cselib_process_insn (p);
9795
9796 /* Actually move the MEMs. */
9797 for (i = 0; i < loop_info->mems_idx; ++i)
9798 {
9799 regset_head load_copies;
9800 regset_head store_copies;
9801 int written = 0;
9802 rtx reg;
9803 rtx mem = loop_info->mems[i].mem;
9804 rtx mem_list_entry;
9805
9806 if (MEM_VOLATILE_P (mem)
9807 || loop_invariant_p (loop, XEXP (mem, 0)) != 1)
9808 /* There's no telling whether or not MEM is modified. */
9809 loop_info->mems[i].optimize = 0;
9810
9811 /* Go through the MEMs written to in the loop to see if this
9812 one is aliased by one of them. */
9813 mem_list_entry = loop_info->store_mems;
9814 while (mem_list_entry)
9815 {
9816 if (rtx_equal_p (mem, XEXP (mem_list_entry, 0)))
9817 written = 1;
9818 else if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
9819 mem, rtx_varies_p))
9820 {
9821 /* MEM is indeed aliased by this store. */
9822 loop_info->mems[i].optimize = 0;
9823 break;
9824 }
9825 mem_list_entry = XEXP (mem_list_entry, 1);
9826 }
9827
9828 if (flag_float_store && written
9829 && GET_MODE_CLASS (GET_MODE (mem)) == MODE_FLOAT)
9830 loop_info->mems[i].optimize = 0;
9831
9832 /* If this MEM is written to, we must be sure that there
9833 are no reads from another MEM that aliases this one. */
9834 if (loop_info->mems[i].optimize && written)
9835 {
9836 int j;
9837
9838 for (j = 0; j < loop_info->mems_idx; ++j)
9839 {
9840 if (j == i)
9841 continue;
9842 else if (true_dependence (mem,
9843 VOIDmode,
9844 loop_info->mems[j].mem,
9845 rtx_varies_p))
9846 {
9847 /* It's not safe to hoist loop_info->mems[i] out of
9848 the loop because writes to it might not be
9849 seen by reads from loop_info->mems[j]. */
9850 loop_info->mems[i].optimize = 0;
9851 break;
9852 }
9853 }
9854 }
9855
9856 if (maybe_never && may_trap_p (mem))
9857 /* We can't access the MEM outside the loop; it might
9858 cause a trap that wouldn't have happened otherwise. */
9859 loop_info->mems[i].optimize = 0;
9860
9861 if (!loop_info->mems[i].optimize)
9862 /* We thought we were going to lift this MEM out of the
9863 loop, but later discovered that we could not. */
9864 continue;
9865
9866 INIT_REG_SET (&load_copies);
9867 INIT_REG_SET (&store_copies);
9868
9869 /* Allocate a pseudo for this MEM. We set REG_USERVAR_P in
9870 order to keep scan_loop from moving stores to this MEM
9871 out of the loop just because this REG is neither a
9872 user-variable nor used in the loop test. */
9873 reg = gen_reg_rtx (GET_MODE (mem));
9874 REG_USERVAR_P (reg) = 1;
9875 loop_info->mems[i].reg = reg;
9876
9877 /* Now, replace all references to the MEM with the
9878 corresponding pseudos. */
9879 maybe_never = 0;
9880 for (p = next_insn_in_loop (loop, loop->scan_start);
9881 p != NULL_RTX;
9882 p = next_insn_in_loop (loop, p))
9883 {
9884 if (INSN_P (p))
9885 {
9886 rtx set;
9887
9888 set = single_set (p);
9889
9890 /* See if this copies the mem into a register that isn't
9891 modified afterwards. We'll try to do copy propagation
9892 a little further on. */
9893 if (set
9894 /* @@@ This test is _way_ too conservative. */
9895 && ! maybe_never
9896 && GET_CODE (SET_DEST (set)) == REG
9897 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
9898 && REGNO (SET_DEST (set)) < last_max_reg
9899 && regs->array[REGNO (SET_DEST (set))].n_times_set == 1
9900 && rtx_equal_p (SET_SRC (set), mem))
9901 SET_REGNO_REG_SET (&load_copies, REGNO (SET_DEST (set)));
9902
9903 /* See if this copies the mem from a register that isn't
9904 modified afterwards. We'll try to remove the
9905 redundant copy later on by doing a little register
9906 renaming and copy propagation. This will help
9907 to untangle things for the BIV detection code. */
9908 if (set
9909 && ! maybe_never
9910 && GET_CODE (SET_SRC (set)) == REG
9911 && REGNO (SET_SRC (set)) >= FIRST_PSEUDO_REGISTER
9912 && REGNO (SET_SRC (set)) < last_max_reg
9913 && regs->array[REGNO (SET_SRC (set))].n_times_set == 1
9914 && rtx_equal_p (SET_DEST (set), mem))
9915 SET_REGNO_REG_SET (&store_copies, REGNO (SET_SRC (set)));
9916
9917 /* If this is a call which uses / clobbers this memory
9918 location, we must not change the interface here. */
9919 if (GET_CODE (p) == CALL_INSN
9920 && reg_mentioned_p (loop_info->mems[i].mem,
9921 CALL_INSN_FUNCTION_USAGE (p)))
9922 {
9923 cancel_changes (0);
9924 loop_info->mems[i].optimize = 0;
9925 break;
9926 }
9927 else
9928 /* Replace the memory reference with the shadow register. */
9929 replace_loop_mems (p, loop_info->mems[i].mem,
9930 loop_info->mems[i].reg);
9931 }
9932
9933 if (GET_CODE (p) == CODE_LABEL
9934 || GET_CODE (p) == JUMP_INSN)
9935 maybe_never = 1;
9936 }
9937
9938 if (! loop_info->mems[i].optimize)
9939 ; /* We found we couldn't do the replacement, so do nothing. */
9940 else if (! apply_change_group ())
9941 /* We couldn't replace all occurrences of the MEM. */
9942 loop_info->mems[i].optimize = 0;
9943 else
9944 {
9945 /* Load the memory immediately before LOOP->START, which is
9946 the NOTE_LOOP_BEG. */
9947 cselib_val *e = cselib_lookup (mem, VOIDmode, 0);
9948 rtx set;
9949 rtx best = mem;
9950 int j;
9951 struct elt_loc_list *const_equiv = 0;
9952
9953 if (e)
9954 {
9955 struct elt_loc_list *equiv;
9956 struct elt_loc_list *best_equiv = 0;
9957 for (equiv = e->locs; equiv; equiv = equiv->next)
9958 {
9959 if (CONSTANT_P (equiv->loc))
9960 const_equiv = equiv;
9961 else if (GET_CODE (equiv->loc) == REG
9962 /* Extending hard register lifetimes causes crash
9963 on SRC targets. Doing so on non-SRC is
9964 probably also not good idea, since we most
9965 probably have pseudoregister equivalence as
9966 well. */
9967 && REGNO (equiv->loc) >= FIRST_PSEUDO_REGISTER)
9968 best_equiv = equiv;
9969 }
9970 /* Use the constant equivalence if that is cheap enough. */
9971 if (! best_equiv)
9972 best_equiv = const_equiv;
9973 else if (const_equiv
9974 && (rtx_cost (const_equiv->loc, SET)
9975 <= rtx_cost (best_equiv->loc, SET)))
9976 {
9977 best_equiv = const_equiv;
9978 const_equiv = 0;
9979 }
9980
9981 /* If best_equiv is nonzero, we know that MEM is set to a
9982 constant or register before the loop. We will use this
9983 knowledge to initialize the shadow register with that
9984 constant or reg rather than by loading from MEM. */
9985 if (best_equiv)
9986 best = copy_rtx (best_equiv->loc);
9987 }
9988
9989 set = gen_move_insn (reg, best);
9990 set = loop_insn_hoist (loop, set);
9991 if (REG_P (best))
9992 {
9993 for (p = prev_ebb_head; p != loop->start; p = NEXT_INSN (p))
9994 if (REGNO_LAST_UID (REGNO (best)) == INSN_UID (p))
9995 {
9996 REGNO_LAST_UID (REGNO (best)) = INSN_UID (set);
9997 break;
9998 }
9999 }
10000
10001 if (const_equiv)
10002 set_unique_reg_note (set, REG_EQUAL, copy_rtx (const_equiv->loc));
10003
10004 if (written)
10005 {
10006 if (label == NULL_RTX)
10007 {
10008 label = gen_label_rtx ();
10009 emit_label_after (label, loop->end);
10010 }
10011
10012 /* Store the memory immediately after END, which is
10013 the NOTE_LOOP_END. */
10014 set = gen_move_insn (copy_rtx (mem), reg);
10015 loop_insn_emit_after (loop, 0, label, set);
10016 }
10017
10018 if (loop_dump_stream)
10019 {
10020 fprintf (loop_dump_stream, "Hoisted regno %d %s from ",
10021 REGNO (reg), (written ? "r/w" : "r/o"));
10022 print_rtl (loop_dump_stream, mem);
10023 fputc ('\n', loop_dump_stream);
10024 }
10025
10026 /* Attempt a bit of copy propagation. This helps untangle the
10027 data flow, and enables {basic,general}_induction_var to find
10028 more bivs/givs. */
10029 EXECUTE_IF_SET_IN_REG_SET
10030 (&load_copies, FIRST_PSEUDO_REGISTER, j,
10031 {
10032 try_copy_prop (loop, reg, j);
10033 });
10034 CLEAR_REG_SET (&load_copies);
10035
10036 EXECUTE_IF_SET_IN_REG_SET
10037 (&store_copies, FIRST_PSEUDO_REGISTER, j,
10038 {
10039 try_swap_copy_prop (loop, reg, j);
10040 });
10041 CLEAR_REG_SET (&store_copies);
10042 }
10043 }
10044
10045 if (label != NULL_RTX && end_label != NULL_RTX)
10046 {
10047 /* Now, we need to replace all references to the previous exit
10048 label with the new one. */
10049 rtx_pair rr;
10050 rr.r1 = end_label;
10051 rr.r2 = label;
10052
10053 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
10054 {
10055 for_each_rtx (&p, replace_label, &rr);
10056
10057 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
10058 field. This is not handled by for_each_rtx because it doesn't
10059 handle unprinted ('0') fields. We need to update JUMP_LABEL
10060 because the immediately following unroll pass will use it.
10061 replace_label would not work anyways, because that only handles
10062 LABEL_REFs. */
10063 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == end_label)
10064 JUMP_LABEL (p) = label;
10065 }
10066 }
10067
10068 cselib_finish ();
10069 }
10070
10071 /* For communication between note_reg_stored and its caller. */
10072 struct note_reg_stored_arg
10073 {
10074 int set_seen;
10075 rtx reg;
10076 };
10077
10078 /* Called via note_stores, record in SET_SEEN whether X, which is written,
10079 is equal to ARG. */
10080 static void
10081 note_reg_stored (x, setter, arg)
10082 rtx x, setter ATTRIBUTE_UNUSED;
10083 void *arg;
10084 {
10085 struct note_reg_stored_arg *t = (struct note_reg_stored_arg *) arg;
10086 if (t->reg == x)
10087 t->set_seen = 1;
10088 }
10089
10090 /* Try to replace every occurrence of pseudo REGNO with REPLACEMENT.
10091 There must be exactly one insn that sets this pseudo; it will be
10092 deleted if all replacements succeed and we can prove that the register
10093 is not used after the loop. */
10094
10095 static void
10096 try_copy_prop (loop, replacement, regno)
10097 const struct loop *loop;
10098 rtx replacement;
10099 unsigned int regno;
10100 {
10101 /* This is the reg that we are copying from. */
10102 rtx reg_rtx = regno_reg_rtx[regno];
10103 rtx init_insn = 0;
10104 rtx insn;
10105 /* These help keep track of whether we replaced all uses of the reg. */
10106 int replaced_last = 0;
10107 int store_is_first = 0;
10108
10109 for (insn = next_insn_in_loop (loop, loop->scan_start);
10110 insn != NULL_RTX;
10111 insn = next_insn_in_loop (loop, insn))
10112 {
10113 rtx set;
10114
10115 /* Only substitute within one extended basic block from the initializing
10116 insn. */
10117 if (GET_CODE (insn) == CODE_LABEL && init_insn)
10118 break;
10119
10120 if (! INSN_P (insn))
10121 continue;
10122
10123 /* Is this the initializing insn? */
10124 set = single_set (insn);
10125 if (set
10126 && GET_CODE (SET_DEST (set)) == REG
10127 && REGNO (SET_DEST (set)) == regno)
10128 {
10129 if (init_insn)
10130 abort ();
10131
10132 init_insn = insn;
10133 if (REGNO_FIRST_UID (regno) == INSN_UID (insn))
10134 store_is_first = 1;
10135 }
10136
10137 /* Only substitute after seeing the initializing insn. */
10138 if (init_insn && insn != init_insn)
10139 {
10140 struct note_reg_stored_arg arg;
10141
10142 replace_loop_regs (insn, reg_rtx, replacement);
10143 if (REGNO_LAST_UID (regno) == INSN_UID (insn))
10144 replaced_last = 1;
10145
10146 /* Stop replacing when REPLACEMENT is modified. */
10147 arg.reg = replacement;
10148 arg.set_seen = 0;
10149 note_stores (PATTERN (insn), note_reg_stored, &arg);
10150 if (arg.set_seen)
10151 {
10152 rtx note = find_reg_note (insn, REG_EQUAL, NULL);
10153
10154 /* It is possible that we've turned previously valid REG_EQUAL to
10155 invalid, as we change the REGNO to REPLACEMENT and unlike REGNO,
10156 REPLACEMENT is modified, we get different meaning. */
10157 if (note && reg_mentioned_p (replacement, XEXP (note, 0)))
10158 remove_note (insn, note);
10159 break;
10160 }
10161 }
10162 }
10163 if (! init_insn)
10164 abort ();
10165 if (apply_change_group ())
10166 {
10167 if (loop_dump_stream)
10168 fprintf (loop_dump_stream, " Replaced reg %d", regno);
10169 if (store_is_first && replaced_last)
10170 {
10171 rtx first;
10172 rtx retval_note;
10173
10174 /* Assume we're just deleting INIT_INSN. */
10175 first = init_insn;
10176 /* Look for REG_RETVAL note. If we're deleting the end of
10177 the libcall sequence, the whole sequence can go. */
10178 retval_note = find_reg_note (init_insn, REG_RETVAL, NULL_RTX);
10179 /* If we found a REG_RETVAL note, find the first instruction
10180 in the sequence. */
10181 if (retval_note)
10182 first = XEXP (retval_note, 0);
10183
10184 /* Delete the instructions. */
10185 loop_delete_insns (first, init_insn);
10186 }
10187 if (loop_dump_stream)
10188 fprintf (loop_dump_stream, ".\n");
10189 }
10190 }
10191
10192 /* Replace all the instructions from FIRST up to and including LAST
10193 with NOTE_INSN_DELETED notes. */
10194
10195 static void
10196 loop_delete_insns (first, last)
10197 rtx first;
10198 rtx last;
10199 {
10200 while (1)
10201 {
10202 if (loop_dump_stream)
10203 fprintf (loop_dump_stream, ", deleting init_insn (%d)",
10204 INSN_UID (first));
10205 delete_insn (first);
10206
10207 /* If this was the LAST instructions we're supposed to delete,
10208 we're done. */
10209 if (first == last)
10210 break;
10211
10212 first = NEXT_INSN (first);
10213 }
10214 }
10215
10216 /* Try to replace occurrences of pseudo REGNO with REPLACEMENT within
10217 loop LOOP if the order of the sets of these registers can be
10218 swapped. There must be exactly one insn within the loop that sets
10219 this pseudo followed immediately by a move insn that sets
10220 REPLACEMENT with REGNO. */
10221 static void
10222 try_swap_copy_prop (loop, replacement, regno)
10223 const struct loop *loop;
10224 rtx replacement;
10225 unsigned int regno;
10226 {
10227 rtx insn;
10228 rtx set = NULL_RTX;
10229 unsigned int new_regno;
10230
10231 new_regno = REGNO (replacement);
10232
10233 for (insn = next_insn_in_loop (loop, loop->scan_start);
10234 insn != NULL_RTX;
10235 insn = next_insn_in_loop (loop, insn))
10236 {
10237 /* Search for the insn that copies REGNO to NEW_REGNO? */
10238 if (INSN_P (insn)
10239 && (set = single_set (insn))
10240 && GET_CODE (SET_DEST (set)) == REG
10241 && REGNO (SET_DEST (set)) == new_regno
10242 && GET_CODE (SET_SRC (set)) == REG
10243 && REGNO (SET_SRC (set)) == regno)
10244 break;
10245 }
10246
10247 if (insn != NULL_RTX)
10248 {
10249 rtx prev_insn;
10250 rtx prev_set;
10251
10252 /* Some DEF-USE info would come in handy here to make this
10253 function more general. For now, just check the previous insn
10254 which is the most likely candidate for setting REGNO. */
10255
10256 prev_insn = PREV_INSN (insn);
10257
10258 if (INSN_P (insn)
10259 && (prev_set = single_set (prev_insn))
10260 && GET_CODE (SET_DEST (prev_set)) == REG
10261 && REGNO (SET_DEST (prev_set)) == regno)
10262 {
10263 /* We have:
10264 (set (reg regno) (expr))
10265 (set (reg new_regno) (reg regno))
10266
10267 so try converting this to:
10268 (set (reg new_regno) (expr))
10269 (set (reg regno) (reg new_regno))
10270
10271 The former construct is often generated when a global
10272 variable used for an induction variable is shadowed by a
10273 register (NEW_REGNO). The latter construct improves the
10274 chances of GIV replacement and BIV elimination. */
10275
10276 validate_change (prev_insn, &SET_DEST (prev_set),
10277 replacement, 1);
10278 validate_change (insn, &SET_DEST (set),
10279 SET_SRC (set), 1);
10280 validate_change (insn, &SET_SRC (set),
10281 replacement, 1);
10282
10283 if (apply_change_group ())
10284 {
10285 if (loop_dump_stream)
10286 fprintf (loop_dump_stream,
10287 " Swapped set of reg %d at %d with reg %d at %d.\n",
10288 regno, INSN_UID (insn),
10289 new_regno, INSN_UID (prev_insn));
10290
10291 /* Update first use of REGNO. */
10292 if (REGNO_FIRST_UID (regno) == INSN_UID (prev_insn))
10293 REGNO_FIRST_UID (regno) = INSN_UID (insn);
10294
10295 /* Now perform copy propagation to hopefully
10296 remove all uses of REGNO within the loop. */
10297 try_copy_prop (loop, replacement, regno);
10298 }
10299 }
10300 }
10301 }
10302
10303 /* Replace MEM with its associated pseudo register. This function is
10304 called from load_mems via for_each_rtx. DATA is actually a pointer
10305 to a structure describing the instruction currently being scanned
10306 and the MEM we are currently replacing. */
10307
10308 static int
10309 replace_loop_mem (mem, data)
10310 rtx *mem;
10311 void *data;
10312 {
10313 loop_replace_args *args = (loop_replace_args *) data;
10314 rtx m = *mem;
10315
10316 if (m == NULL_RTX)
10317 return 0;
10318
10319 switch (GET_CODE (m))
10320 {
10321 case MEM:
10322 break;
10323
10324 case CONST_DOUBLE:
10325 /* We're not interested in the MEM associated with a
10326 CONST_DOUBLE, so there's no need to traverse into one. */
10327 return -1;
10328
10329 default:
10330 /* This is not a MEM. */
10331 return 0;
10332 }
10333
10334 if (!rtx_equal_p (args->match, m))
10335 /* This is not the MEM we are currently replacing. */
10336 return 0;
10337
10338 /* Actually replace the MEM. */
10339 validate_change (args->insn, mem, args->replacement, 1);
10340
10341 return 0;
10342 }
10343
10344 static void
10345 replace_loop_mems (insn, mem, reg)
10346 rtx insn;
10347 rtx mem;
10348 rtx reg;
10349 {
10350 loop_replace_args args;
10351
10352 args.insn = insn;
10353 args.match = mem;
10354 args.replacement = reg;
10355
10356 for_each_rtx (&insn, replace_loop_mem, &args);
10357 }
10358
10359 /* Replace one register with another. Called through for_each_rtx; PX points
10360 to the rtx being scanned. DATA is actually a pointer to
10361 a structure of arguments. */
10362
10363 static int
10364 replace_loop_reg (px, data)
10365 rtx *px;
10366 void *data;
10367 {
10368 rtx x = *px;
10369 loop_replace_args *args = (loop_replace_args *) data;
10370
10371 if (x == NULL_RTX)
10372 return 0;
10373
10374 if (x == args->match)
10375 validate_change (args->insn, px, args->replacement, 1);
10376
10377 return 0;
10378 }
10379
10380 static void
10381 replace_loop_regs (insn, reg, replacement)
10382 rtx insn;
10383 rtx reg;
10384 rtx replacement;
10385 {
10386 loop_replace_args args;
10387
10388 args.insn = insn;
10389 args.match = reg;
10390 args.replacement = replacement;
10391
10392 for_each_rtx (&insn, replace_loop_reg, &args);
10393 }
10394
10395 /* Replace occurrences of the old exit label for the loop with the new
10396 one. DATA is an rtx_pair containing the old and new labels,
10397 respectively. */
10398
10399 static int
10400 replace_label (x, data)
10401 rtx *x;
10402 void *data;
10403 {
10404 rtx l = *x;
10405 rtx old_label = ((rtx_pair *) data)->r1;
10406 rtx new_label = ((rtx_pair *) data)->r2;
10407
10408 if (l == NULL_RTX)
10409 return 0;
10410
10411 if (GET_CODE (l) != LABEL_REF)
10412 return 0;
10413
10414 if (XEXP (l, 0) != old_label)
10415 return 0;
10416
10417 XEXP (l, 0) = new_label;
10418 ++LABEL_NUSES (new_label);
10419 --LABEL_NUSES (old_label);
10420
10421 return 0;
10422 }
10423 \f
10424 /* Emit insn for PATTERN after WHERE_INSN in basic block WHERE_BB
10425 (ignored in the interim). */
10426
10427 static rtx
10428 loop_insn_emit_after (loop, where_bb, where_insn, pattern)
10429 const struct loop *loop ATTRIBUTE_UNUSED;
10430 basic_block where_bb ATTRIBUTE_UNUSED;
10431 rtx where_insn;
10432 rtx pattern;
10433 {
10434 return emit_insn_after (pattern, where_insn);
10435 }
10436
10437
10438 /* If WHERE_INSN is non-zero emit insn for PATTERN before WHERE_INSN
10439 in basic block WHERE_BB (ignored in the interim) within the loop
10440 otherwise hoist PATTERN into the loop pre-header. */
10441
10442 rtx
10443 loop_insn_emit_before (loop, where_bb, where_insn, pattern)
10444 const struct loop *loop;
10445 basic_block where_bb ATTRIBUTE_UNUSED;
10446 rtx where_insn;
10447 rtx pattern;
10448 {
10449 if (! where_insn)
10450 return loop_insn_hoist (loop, pattern);
10451 return emit_insn_before (pattern, where_insn);
10452 }
10453
10454
10455 /* Emit call insn for PATTERN before WHERE_INSN in basic block
10456 WHERE_BB (ignored in the interim) within the loop. */
10457
10458 static rtx
10459 loop_call_insn_emit_before (loop, where_bb, where_insn, pattern)
10460 const struct loop *loop ATTRIBUTE_UNUSED;
10461 basic_block where_bb ATTRIBUTE_UNUSED;
10462 rtx where_insn;
10463 rtx pattern;
10464 {
10465 return emit_call_insn_before (pattern, where_insn);
10466 }
10467
10468
10469 /* Hoist insn for PATTERN into the loop pre-header. */
10470
10471 rtx
10472 loop_insn_hoist (loop, pattern)
10473 const struct loop *loop;
10474 rtx pattern;
10475 {
10476 return loop_insn_emit_before (loop, 0, loop->start, pattern);
10477 }
10478
10479
10480 /* Hoist call insn for PATTERN into the loop pre-header. */
10481
10482 static rtx
10483 loop_call_insn_hoist (loop, pattern)
10484 const struct loop *loop;
10485 rtx pattern;
10486 {
10487 return loop_call_insn_emit_before (loop, 0, loop->start, pattern);
10488 }
10489
10490
10491 /* Sink insn for PATTERN after the loop end. */
10492
10493 rtx
10494 loop_insn_sink (loop, pattern)
10495 const struct loop *loop;
10496 rtx pattern;
10497 {
10498 return loop_insn_emit_before (loop, 0, loop->sink, pattern);
10499 }
10500
10501 /* bl->final_value can be eighter general_operand or PLUS of general_operand
10502 and constant. Emit sequence of intructions to load it into REG */
10503 static rtx
10504 gen_load_of_final_value (reg, final_value)
10505 rtx reg, final_value;
10506 {
10507 rtx seq;
10508 start_sequence ();
10509 final_value = force_operand (final_value, reg);
10510 if (final_value != reg)
10511 emit_move_insn (reg, final_value);
10512 seq = get_insns ();
10513 end_sequence ();
10514 return seq;
10515 }
10516
10517 /* If the loop has multiple exits, emit insn for PATTERN before the
10518 loop to ensure that it will always be executed no matter how the
10519 loop exits. Otherwise, emit the insn for PATTERN after the loop,
10520 since this is slightly more efficient. */
10521
10522 static rtx
10523 loop_insn_sink_or_swim (loop, pattern)
10524 const struct loop *loop;
10525 rtx pattern;
10526 {
10527 if (loop->exit_count)
10528 return loop_insn_hoist (loop, pattern);
10529 else
10530 return loop_insn_sink (loop, pattern);
10531 }
10532 \f
10533 static void
10534 loop_ivs_dump (loop, file, verbose)
10535 const struct loop *loop;
10536 FILE *file;
10537 int verbose;
10538 {
10539 struct iv_class *bl;
10540 int iv_num = 0;
10541
10542 if (! loop || ! file)
10543 return;
10544
10545 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
10546 iv_num++;
10547
10548 fprintf (file, "Loop %d: %d IV classes\n", loop->num, iv_num);
10549
10550 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
10551 {
10552 loop_iv_class_dump (bl, file, verbose);
10553 fputc ('\n', file);
10554 }
10555 }
10556
10557
10558 static void
10559 loop_iv_class_dump (bl, file, verbose)
10560 const struct iv_class *bl;
10561 FILE *file;
10562 int verbose ATTRIBUTE_UNUSED;
10563 {
10564 struct induction *v;
10565 rtx incr;
10566 int i;
10567
10568 if (! bl || ! file)
10569 return;
10570
10571 fprintf (file, "IV class for reg %d, benefit %d\n",
10572 bl->regno, bl->total_benefit);
10573
10574 fprintf (file, " Init insn %d", INSN_UID (bl->init_insn));
10575 if (bl->initial_value)
10576 {
10577 fprintf (file, ", init val: ");
10578 print_simple_rtl (file, bl->initial_value);
10579 }
10580 if (bl->initial_test)
10581 {
10582 fprintf (file, ", init test: ");
10583 print_simple_rtl (file, bl->initial_test);
10584 }
10585 fputc ('\n', file);
10586
10587 if (bl->final_value)
10588 {
10589 fprintf (file, " Final val: ");
10590 print_simple_rtl (file, bl->final_value);
10591 fputc ('\n', file);
10592 }
10593
10594 if ((incr = biv_total_increment (bl)))
10595 {
10596 fprintf (file, " Total increment: ");
10597 print_simple_rtl (file, incr);
10598 fputc ('\n', file);
10599 }
10600
10601 /* List the increments. */
10602 for (i = 0, v = bl->biv; v; v = v->next_iv, i++)
10603 {
10604 fprintf (file, " Inc%d: insn %d, incr: ", i, INSN_UID (v->insn));
10605 print_simple_rtl (file, v->add_val);
10606 fputc ('\n', file);
10607 }
10608
10609 /* List the givs. */
10610 for (i = 0, v = bl->giv; v; v = v->next_iv, i++)
10611 {
10612 fprintf (file, " Giv%d: insn %d, benefit %d, ",
10613 i, INSN_UID (v->insn), v->benefit);
10614 if (v->giv_type == DEST_ADDR)
10615 print_simple_rtl (file, v->mem);
10616 else
10617 print_simple_rtl (file, single_set (v->insn));
10618 fputc ('\n', file);
10619 }
10620 }
10621
10622
10623 static void
10624 loop_biv_dump (v, file, verbose)
10625 const struct induction *v;
10626 FILE *file;
10627 int verbose;
10628 {
10629 if (! v || ! file)
10630 return;
10631
10632 fprintf (file,
10633 "Biv %d: insn %d",
10634 REGNO (v->dest_reg), INSN_UID (v->insn));
10635 fprintf (file, " const ");
10636 print_simple_rtl (file, v->add_val);
10637
10638 if (verbose && v->final_value)
10639 {
10640 fputc ('\n', file);
10641 fprintf (file, " final ");
10642 print_simple_rtl (file, v->final_value);
10643 }
10644
10645 fputc ('\n', file);
10646 }
10647
10648
10649 static void
10650 loop_giv_dump (v, file, verbose)
10651 const struct induction *v;
10652 FILE *file;
10653 int verbose;
10654 {
10655 if (! v || ! file)
10656 return;
10657
10658 if (v->giv_type == DEST_REG)
10659 fprintf (file, "Giv %d: insn %d",
10660 REGNO (v->dest_reg), INSN_UID (v->insn));
10661 else
10662 fprintf (file, "Dest address: insn %d",
10663 INSN_UID (v->insn));
10664
10665 fprintf (file, " src reg %d benefit %d",
10666 REGNO (v->src_reg), v->benefit);
10667 fprintf (file, " lifetime %d",
10668 v->lifetime);
10669
10670 if (v->replaceable)
10671 fprintf (file, " replaceable");
10672
10673 if (v->no_const_addval)
10674 fprintf (file, " ncav");
10675
10676 if (v->ext_dependent)
10677 {
10678 switch (GET_CODE (v->ext_dependent))
10679 {
10680 case SIGN_EXTEND:
10681 fprintf (file, " ext se");
10682 break;
10683 case ZERO_EXTEND:
10684 fprintf (file, " ext ze");
10685 break;
10686 case TRUNCATE:
10687 fprintf (file, " ext tr");
10688 break;
10689 default:
10690 abort ();
10691 }
10692 }
10693
10694 fputc ('\n', file);
10695 fprintf (file, " mult ");
10696 print_simple_rtl (file, v->mult_val);
10697
10698 fputc ('\n', file);
10699 fprintf (file, " add ");
10700 print_simple_rtl (file, v->add_val);
10701
10702 if (verbose && v->final_value)
10703 {
10704 fputc ('\n', file);
10705 fprintf (file, " final ");
10706 print_simple_rtl (file, v->final_value);
10707 }
10708
10709 fputc ('\n', file);
10710 }
10711
10712
10713 void
10714 debug_ivs (loop)
10715 const struct loop *loop;
10716 {
10717 loop_ivs_dump (loop, stderr, 1);
10718 }
10719
10720
10721 void
10722 debug_iv_class (bl)
10723 const struct iv_class *bl;
10724 {
10725 loop_iv_class_dump (bl, stderr, 1);
10726 }
10727
10728
10729 void
10730 debug_biv (v)
10731 const struct induction *v;
10732 {
10733 loop_biv_dump (v, stderr, 1);
10734 }
10735
10736
10737 void
10738 debug_giv (v)
10739 const struct induction *v;
10740 {
10741 loop_giv_dump (v, stderr, 1);
10742 }
10743
10744
10745 #define LOOP_BLOCK_NUM_1(INSN) \
10746 ((INSN) ? (BLOCK_FOR_INSN (INSN) ? BLOCK_NUM (INSN) : - 1) : -1)
10747
10748 /* The notes do not have an assigned block, so look at the next insn. */
10749 #define LOOP_BLOCK_NUM(INSN) \
10750 ((INSN) ? (GET_CODE (INSN) == NOTE \
10751 ? LOOP_BLOCK_NUM_1 (next_nonnote_insn (INSN)) \
10752 : LOOP_BLOCK_NUM_1 (INSN)) \
10753 : -1)
10754
10755 #define LOOP_INSN_UID(INSN) ((INSN) ? INSN_UID (INSN) : -1)
10756
10757 static void
10758 loop_dump_aux (loop, file, verbose)
10759 const struct loop *loop;
10760 FILE *file;
10761 int verbose ATTRIBUTE_UNUSED;
10762 {
10763 rtx label;
10764
10765 if (! loop || ! file)
10766 return;
10767
10768 /* Print diagnostics to compare our concept of a loop with
10769 what the loop notes say. */
10770 if (! PREV_INSN (loop->first->head)
10771 || GET_CODE (PREV_INSN (loop->first->head)) != NOTE
10772 || NOTE_LINE_NUMBER (PREV_INSN (loop->first->head))
10773 != NOTE_INSN_LOOP_BEG)
10774 fprintf (file, ";; No NOTE_INSN_LOOP_BEG at %d\n",
10775 INSN_UID (PREV_INSN (loop->first->head)));
10776 if (! NEXT_INSN (loop->last->end)
10777 || GET_CODE (NEXT_INSN (loop->last->end)) != NOTE
10778 || NOTE_LINE_NUMBER (NEXT_INSN (loop->last->end))
10779 != NOTE_INSN_LOOP_END)
10780 fprintf (file, ";; No NOTE_INSN_LOOP_END at %d\n",
10781 INSN_UID (NEXT_INSN (loop->last->end)));
10782
10783 if (loop->start)
10784 {
10785 fprintf (file,
10786 ";; start %d (%d), cont dom %d (%d), cont %d (%d), vtop %d (%d), end %d (%d)\n",
10787 LOOP_BLOCK_NUM (loop->start),
10788 LOOP_INSN_UID (loop->start),
10789 LOOP_BLOCK_NUM (loop->cont),
10790 LOOP_INSN_UID (loop->cont),
10791 LOOP_BLOCK_NUM (loop->cont),
10792 LOOP_INSN_UID (loop->cont),
10793 LOOP_BLOCK_NUM (loop->vtop),
10794 LOOP_INSN_UID (loop->vtop),
10795 LOOP_BLOCK_NUM (loop->end),
10796 LOOP_INSN_UID (loop->end));
10797 fprintf (file, ";; top %d (%d), scan start %d (%d)\n",
10798 LOOP_BLOCK_NUM (loop->top),
10799 LOOP_INSN_UID (loop->top),
10800 LOOP_BLOCK_NUM (loop->scan_start),
10801 LOOP_INSN_UID (loop->scan_start));
10802 fprintf (file, ";; exit_count %d", loop->exit_count);
10803 if (loop->exit_count)
10804 {
10805 fputs (", labels:", file);
10806 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
10807 {
10808 fprintf (file, " %d ",
10809 LOOP_INSN_UID (XEXP (label, 0)));
10810 }
10811 }
10812 fputs ("\n", file);
10813
10814 /* This can happen when a marked loop appears as two nested loops,
10815 say from while (a || b) {}. The inner loop won't match
10816 the loop markers but the outer one will. */
10817 if (LOOP_BLOCK_NUM (loop->cont) != loop->latch->index)
10818 fprintf (file, ";; NOTE_INSN_LOOP_CONT not in loop latch\n");
10819 }
10820 }
10821
10822 /* Call this function from the debugger to dump LOOP. */
10823
10824 void
10825 debug_loop (loop)
10826 const struct loop *loop;
10827 {
10828 flow_loop_dump (loop, stderr, loop_dump_aux, 1);
10829 }
10830
10831 /* Call this function from the debugger to dump LOOPS. */
10832
10833 void
10834 debug_loops (loops)
10835 const struct loops *loops;
10836 {
10837 flow_loops_dump (loops, stderr, loop_dump_aux, 1);
10838 }