inclhack.def (hpux_imaginary_i): Remove spaces.
[gcc.git] / gcc / reorg.c
1 /* Perform instruction reorganizations for delay slot filling.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
4 Free Software Foundation, Inc.
5 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu).
6 Hacked by Michael Tiemann (tiemann@cygnus.com).
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 /* Instruction reorganization pass.
25
26 This pass runs after register allocation and final jump
27 optimization. It should be the last pass to run before peephole.
28 It serves primarily to fill delay slots of insns, typically branch
29 and call insns. Other insns typically involve more complicated
30 interactions of data dependencies and resource constraints, and
31 are better handled by scheduling before register allocation (by the
32 function `schedule_insns').
33
34 The Branch Penalty is the number of extra cycles that are needed to
35 execute a branch insn. On an ideal machine, branches take a single
36 cycle, and the Branch Penalty is 0. Several RISC machines approach
37 branch delays differently:
38
39 The MIPS has a single branch delay slot. Most insns
40 (except other branches) can be used to fill this slot. When the
41 slot is filled, two insns execute in two cycles, reducing the
42 branch penalty to zero.
43
44 The SPARC always has a branch delay slot, but its effects can be
45 annulled when the branch is not taken. This means that failing to
46 find other sources of insns, we can hoist an insn from the branch
47 target that would only be safe to execute knowing that the branch
48 is taken.
49
50 The HP-PA always has a branch delay slot. For unconditional branches
51 its effects can be annulled when the branch is taken. The effects
52 of the delay slot in a conditional branch can be nullified for forward
53 taken branches, or for untaken backward branches. This means
54 we can hoist insns from the fall-through path for forward branches or
55 steal insns from the target of backward branches.
56
57 The TMS320C3x and C4x have three branch delay slots. When the three
58 slots are filled, the branch penalty is zero. Most insns can fill the
59 delay slots except jump insns.
60
61 Three techniques for filling delay slots have been implemented so far:
62
63 (1) `fill_simple_delay_slots' is the simplest, most efficient way
64 to fill delay slots. This pass first looks for insns which come
65 from before the branch and which are safe to execute after the
66 branch. Then it searches after the insn requiring delay slots or,
67 in the case of a branch, for insns that are after the point at
68 which the branch merges into the fallthrough code, if such a point
69 exists. When such insns are found, the branch penalty decreases
70 and no code expansion takes place.
71
72 (2) `fill_eager_delay_slots' is more complicated: it is used for
73 scheduling conditional jumps, or for scheduling jumps which cannot
74 be filled using (1). A machine need not have annulled jumps to use
75 this strategy, but it helps (by keeping more options open).
76 `fill_eager_delay_slots' tries to guess the direction the branch
77 will go; if it guesses right 100% of the time, it can reduce the
78 branch penalty as much as `fill_simple_delay_slots' does. If it
79 guesses wrong 100% of the time, it might as well schedule nops. When
80 `fill_eager_delay_slots' takes insns from the fall-through path of
81 the jump, usually there is no code expansion; when it takes insns
82 from the branch target, there is code expansion if it is not the
83 only way to reach that target.
84
85 (3) `relax_delay_slots' uses a set of rules to simplify code that
86 has been reorganized by (1) and (2). It finds cases where
87 conditional test can be eliminated, jumps can be threaded, extra
88 insns can be eliminated, etc. It is the job of (1) and (2) to do a
89 good job of scheduling locally; `relax_delay_slots' takes care of
90 making the various individual schedules work well together. It is
91 especially tuned to handle the control flow interactions of branch
92 insns. It does nothing for insns with delay slots that do not
93 branch.
94
95 On machines that use CC0, we are very conservative. We will not make
96 a copy of an insn involving CC0 since we want to maintain a 1-1
97 correspondence between the insn that sets and uses CC0. The insns are
98 allowed to be separated by placing an insn that sets CC0 (but not an insn
99 that uses CC0; we could do this, but it doesn't seem worthwhile) in a
100 delay slot. In that case, we point each insn at the other with REG_CC_USER
101 and REG_CC_SETTER notes. Note that these restrictions affect very few
102 machines because most RISC machines with delay slots will not use CC0
103 (the RT is the only known exception at this point).
104
105 Not yet implemented:
106
107 The Acorn Risc Machine can conditionally execute most insns, so
108 it is profitable to move single insns into a position to execute
109 based on the condition code of the previous insn.
110
111 The HP-PA can conditionally nullify insns, providing a similar
112 effect to the ARM, differing mostly in which insn is "in charge". */
113
114 #include "config.h"
115 #include "system.h"
116 #include "coretypes.h"
117 #include "tm.h"
118 #include "toplev.h"
119 #include "rtl.h"
120 #include "tm_p.h"
121 #include "expr.h"
122 #include "function.h"
123 #include "insn-config.h"
124 #include "conditions.h"
125 #include "hard-reg-set.h"
126 #include "basic-block.h"
127 #include "regs.h"
128 #include "recog.h"
129 #include "flags.h"
130 #include "output.h"
131 #include "obstack.h"
132 #include "insn-attr.h"
133 #include "resource.h"
134 #include "except.h"
135 #include "params.h"
136 #include "timevar.h"
137 #include "target.h"
138 #include "tree-pass.h"
139
140 #ifdef DELAY_SLOTS
141
142 #ifndef ANNUL_IFTRUE_SLOTS
143 #define eligible_for_annul_true(INSN, SLOTS, TRIAL, FLAGS) 0
144 #endif
145 #ifndef ANNUL_IFFALSE_SLOTS
146 #define eligible_for_annul_false(INSN, SLOTS, TRIAL, FLAGS) 0
147 #endif
148
149 /* Insns which have delay slots that have not yet been filled. */
150
151 static struct obstack unfilled_slots_obstack;
152 static rtx *unfilled_firstobj;
153
154 /* Define macros to refer to the first and last slot containing unfilled
155 insns. These are used because the list may move and its address
156 should be recomputed at each use. */
157
158 #define unfilled_slots_base \
159 ((rtx *) obstack_base (&unfilled_slots_obstack))
160
161 #define unfilled_slots_next \
162 ((rtx *) obstack_next_free (&unfilled_slots_obstack))
163
164 /* Points to the label before the end of the function. */
165 static rtx end_of_function_label;
166
167 /* Mapping between INSN_UID's and position in the code since INSN_UID's do
168 not always monotonically increase. */
169 static int *uid_to_ruid;
170
171 /* Highest valid index in `uid_to_ruid'. */
172 static int max_uid;
173
174 static int stop_search_p (rtx, int);
175 static int resource_conflicts_p (struct resources *, struct resources *);
176 static int insn_references_resource_p (rtx, struct resources *, bool);
177 static int insn_sets_resource_p (rtx, struct resources *, bool);
178 static rtx find_end_label (void);
179 static rtx emit_delay_sequence (rtx, rtx, int);
180 static rtx add_to_delay_list (rtx, rtx);
181 static rtx delete_from_delay_slot (rtx);
182 static void delete_scheduled_jump (rtx);
183 static void note_delay_statistics (int, int);
184 #if defined(ANNUL_IFFALSE_SLOTS) || defined(ANNUL_IFTRUE_SLOTS)
185 static rtx optimize_skip (rtx);
186 #endif
187 static int get_jump_flags (rtx, rtx);
188 static int rare_destination (rtx);
189 static int mostly_true_jump (rtx, rtx);
190 static rtx get_branch_condition (rtx, rtx);
191 static int condition_dominates_p (rtx, rtx);
192 static int redirect_with_delay_slots_safe_p (rtx, rtx, rtx);
193 static int redirect_with_delay_list_safe_p (rtx, rtx, rtx);
194 static int check_annul_list_true_false (int, rtx);
195 static rtx steal_delay_list_from_target (rtx, rtx, rtx, rtx,
196 struct resources *,
197 struct resources *,
198 struct resources *,
199 int, int *, int *, rtx *);
200 static rtx steal_delay_list_from_fallthrough (rtx, rtx, rtx, rtx,
201 struct resources *,
202 struct resources *,
203 struct resources *,
204 int, int *, int *);
205 static void try_merge_delay_insns (rtx, rtx);
206 static rtx redundant_insn (rtx, rtx, rtx);
207 static int own_thread_p (rtx, rtx, int);
208 static void update_block (rtx, rtx);
209 static int reorg_redirect_jump (rtx, rtx);
210 static void update_reg_dead_notes (rtx, rtx);
211 static void fix_reg_dead_note (rtx, rtx);
212 static void update_reg_unused_notes (rtx, rtx);
213 static void fill_simple_delay_slots (int);
214 static rtx fill_slots_from_thread (rtx, rtx, rtx, rtx,
215 int, int, int, int,
216 int *, rtx);
217 static void fill_eager_delay_slots (void);
218 static void relax_delay_slots (rtx);
219 #ifdef HAVE_return
220 static void make_return_insns (rtx);
221 #endif
222 \f
223 /* Return TRUE if this insn should stop the search for insn to fill delay
224 slots. LABELS_P indicates that labels should terminate the search.
225 In all cases, jumps terminate the search. */
226
227 static int
228 stop_search_p (rtx insn, int labels_p)
229 {
230 if (insn == 0)
231 return 1;
232
233 /* If the insn can throw an exception that is caught within the function,
234 it may effectively perform a jump from the viewpoint of the function.
235 Therefore act like for a jump. */
236 if (can_throw_internal (insn))
237 return 1;
238
239 switch (GET_CODE (insn))
240 {
241 case NOTE:
242 case CALL_INSN:
243 return 0;
244
245 case CODE_LABEL:
246 return labels_p;
247
248 case JUMP_INSN:
249 case BARRIER:
250 return 1;
251
252 case INSN:
253 /* OK unless it contains a delay slot or is an `asm' insn of some type.
254 We don't know anything about these. */
255 return (GET_CODE (PATTERN (insn)) == SEQUENCE
256 || GET_CODE (PATTERN (insn)) == ASM_INPUT
257 || asm_noperands (PATTERN (insn)) >= 0);
258
259 default:
260 gcc_unreachable ();
261 }
262 }
263 \f
264 /* Return TRUE if any resources are marked in both RES1 and RES2 or if either
265 resource set contains a volatile memory reference. Otherwise, return FALSE. */
266
267 static int
268 resource_conflicts_p (struct resources *res1, struct resources *res2)
269 {
270 if ((res1->cc && res2->cc) || (res1->memory && res2->memory)
271 || (res1->unch_memory && res2->unch_memory)
272 || res1->volatil || res2->volatil)
273 return 1;
274
275 #ifdef HARD_REG_SET
276 return (res1->regs & res2->regs) != HARD_CONST (0);
277 #else
278 {
279 int i;
280
281 for (i = 0; i < HARD_REG_SET_LONGS; i++)
282 if ((res1->regs[i] & res2->regs[i]) != 0)
283 return 1;
284 return 0;
285 }
286 #endif
287 }
288
289 /* Return TRUE if any resource marked in RES, a `struct resources', is
290 referenced by INSN. If INCLUDE_DELAYED_EFFECTS is set, return if the called
291 routine is using those resources.
292
293 We compute this by computing all the resources referenced by INSN and
294 seeing if this conflicts with RES. It might be faster to directly check
295 ourselves, and this is the way it used to work, but it means duplicating
296 a large block of complex code. */
297
298 static int
299 insn_references_resource_p (rtx insn, struct resources *res,
300 bool include_delayed_effects)
301 {
302 struct resources insn_res;
303
304 CLEAR_RESOURCE (&insn_res);
305 mark_referenced_resources (insn, &insn_res, include_delayed_effects);
306 return resource_conflicts_p (&insn_res, res);
307 }
308
309 /* Return TRUE if INSN modifies resources that are marked in RES.
310 INCLUDE_DELAYED_EFFECTS is set if the actions of that routine should be
311 included. CC0 is only modified if it is explicitly set; see comments
312 in front of mark_set_resources for details. */
313
314 static int
315 insn_sets_resource_p (rtx insn, struct resources *res,
316 bool include_delayed_effects)
317 {
318 struct resources insn_sets;
319
320 CLEAR_RESOURCE (&insn_sets);
321 mark_set_resources (insn, &insn_sets, 0,
322 (include_delayed_effects
323 ? MARK_SRC_DEST_CALL
324 : MARK_SRC_DEST));
325 return resource_conflicts_p (&insn_sets, res);
326 }
327 \f
328 /* Find a label at the end of the function or before a RETURN. If there
329 is none, try to make one. If that fails, returns 0.
330
331 The property of such a label is that it is placed just before the
332 epilogue or a bare RETURN insn, so that another bare RETURN can be
333 turned into a jump to the label unconditionally. In particular, the
334 label cannot be placed before a RETURN insn with a filled delay slot.
335
336 ??? There may be a problem with the current implementation. Suppose
337 we start with a bare RETURN insn and call find_end_label. It may set
338 end_of_function_label just before the RETURN. Suppose the machinery
339 is able to fill the delay slot of the RETURN insn afterwards. Then
340 end_of_function_label is no longer valid according to the property
341 described above and find_end_label will still return it unmodified.
342 Note that this is probably mitigated by the following observation:
343 once end_of_function_label is made, it is very likely the target of
344 a jump, so filling the delay slot of the RETURN will be much more
345 difficult. */
346
347 static rtx
348 find_end_label (void)
349 {
350 rtx insn;
351
352 /* If we found one previously, return it. */
353 if (end_of_function_label)
354 return end_of_function_label;
355
356 /* Otherwise, see if there is a label at the end of the function. If there
357 is, it must be that RETURN insns aren't needed, so that is our return
358 label and we don't have to do anything else. */
359
360 insn = get_last_insn ();
361 while (NOTE_P (insn)
362 || (NONJUMP_INSN_P (insn)
363 && (GET_CODE (PATTERN (insn)) == USE
364 || GET_CODE (PATTERN (insn)) == CLOBBER)))
365 insn = PREV_INSN (insn);
366
367 /* When a target threads its epilogue we might already have a
368 suitable return insn. If so put a label before it for the
369 end_of_function_label. */
370 if (BARRIER_P (insn)
371 && JUMP_P (PREV_INSN (insn))
372 && GET_CODE (PATTERN (PREV_INSN (insn))) == RETURN)
373 {
374 rtx temp = PREV_INSN (PREV_INSN (insn));
375 end_of_function_label = gen_label_rtx ();
376 LABEL_NUSES (end_of_function_label) = 0;
377
378 /* Put the label before an USE insns that may precede the RETURN insn. */
379 while (GET_CODE (temp) == USE)
380 temp = PREV_INSN (temp);
381
382 emit_label_after (end_of_function_label, temp);
383 }
384
385 else if (LABEL_P (insn))
386 end_of_function_label = insn;
387 else
388 {
389 end_of_function_label = gen_label_rtx ();
390 LABEL_NUSES (end_of_function_label) = 0;
391 /* If the basic block reorder pass moves the return insn to
392 some other place try to locate it again and put our
393 end_of_function_label there. */
394 while (insn && ! (JUMP_P (insn)
395 && (GET_CODE (PATTERN (insn)) == RETURN)))
396 insn = PREV_INSN (insn);
397 if (insn)
398 {
399 insn = PREV_INSN (insn);
400
401 /* Put the label before an USE insns that may proceed the
402 RETURN insn. */
403 while (GET_CODE (insn) == USE)
404 insn = PREV_INSN (insn);
405
406 emit_label_after (end_of_function_label, insn);
407 }
408 else
409 {
410 #ifdef HAVE_epilogue
411 if (HAVE_epilogue
412 #ifdef HAVE_return
413 && ! HAVE_return
414 #endif
415 )
416 {
417 /* The RETURN insn has its delay slot filled so we cannot
418 emit the label just before it. Since we already have
419 an epilogue and cannot emit a new RETURN, we cannot
420 emit the label at all. */
421 end_of_function_label = NULL_RTX;
422 return end_of_function_label;
423 }
424 #endif /* HAVE_epilogue */
425
426 /* Otherwise, make a new label and emit a RETURN and BARRIER,
427 if needed. */
428 emit_label (end_of_function_label);
429 #ifdef HAVE_return
430 /* We don't bother trying to create a return insn if the
431 epilogue has filled delay-slots; we would have to try and
432 move the delay-slot fillers to the delay-slots for the new
433 return insn or in front of the new return insn. */
434 if (crtl->epilogue_delay_list == NULL
435 && HAVE_return)
436 {
437 /* The return we make may have delay slots too. */
438 rtx insn = gen_return ();
439 insn = emit_jump_insn (insn);
440 emit_barrier ();
441 if (num_delay_slots (insn) > 0)
442 obstack_ptr_grow (&unfilled_slots_obstack, insn);
443 }
444 #endif
445 }
446 }
447
448 /* Show one additional use for this label so it won't go away until
449 we are done. */
450 ++LABEL_NUSES (end_of_function_label);
451
452 return end_of_function_label;
453 }
454 \f
455 /* Put INSN and LIST together in a SEQUENCE rtx of LENGTH, and replace
456 the pattern of INSN with the SEQUENCE.
457
458 Chain the insns so that NEXT_INSN of each insn in the sequence points to
459 the next and NEXT_INSN of the last insn in the sequence points to
460 the first insn after the sequence. Similarly for PREV_INSN. This makes
461 it easier to scan all insns.
462
463 Returns the SEQUENCE that replaces INSN. */
464
465 static rtx
466 emit_delay_sequence (rtx insn, rtx list, int length)
467 {
468 int i = 1;
469 rtx li;
470 int had_barrier = 0;
471
472 /* Allocate the rtvec to hold the insns and the SEQUENCE. */
473 rtvec seqv = rtvec_alloc (length + 1);
474 rtx seq = gen_rtx_SEQUENCE (VOIDmode, seqv);
475 rtx seq_insn = make_insn_raw (seq);
476 rtx first = get_insns ();
477 rtx last = get_last_insn ();
478
479 /* Make a copy of the insn having delay slots. */
480 rtx delay_insn = copy_rtx (insn);
481
482 /* If INSN is followed by a BARRIER, delete the BARRIER since it will only
483 confuse further processing. Update LAST in case it was the last insn.
484 We will put the BARRIER back in later. */
485 if (NEXT_INSN (insn) && BARRIER_P (NEXT_INSN (insn)))
486 {
487 delete_related_insns (NEXT_INSN (insn));
488 last = get_last_insn ();
489 had_barrier = 1;
490 }
491
492 /* Splice our SEQUENCE into the insn stream where INSN used to be. */
493 NEXT_INSN (seq_insn) = NEXT_INSN (insn);
494 PREV_INSN (seq_insn) = PREV_INSN (insn);
495
496 if (insn != last)
497 PREV_INSN (NEXT_INSN (seq_insn)) = seq_insn;
498
499 if (insn != first)
500 NEXT_INSN (PREV_INSN (seq_insn)) = seq_insn;
501
502 /* Note the calls to set_new_first_and_last_insn must occur after
503 SEQ_INSN has been completely spliced into the insn stream.
504
505 Otherwise CUR_INSN_UID will get set to an incorrect value because
506 set_new_first_and_last_insn will not find SEQ_INSN in the chain. */
507 if (insn == last)
508 set_new_first_and_last_insn (first, seq_insn);
509
510 if (insn == first)
511 set_new_first_and_last_insn (seq_insn, last);
512
513 /* Build our SEQUENCE and rebuild the insn chain. */
514 XVECEXP (seq, 0, 0) = delay_insn;
515 INSN_DELETED_P (delay_insn) = 0;
516 PREV_INSN (delay_insn) = PREV_INSN (seq_insn);
517
518 INSN_LOCATOR (seq_insn) = INSN_LOCATOR (delay_insn);
519
520 for (li = list; li; li = XEXP (li, 1), i++)
521 {
522 rtx tem = XEXP (li, 0);
523 rtx note, next;
524
525 /* Show that this copy of the insn isn't deleted. */
526 INSN_DELETED_P (tem) = 0;
527
528 XVECEXP (seq, 0, i) = tem;
529 PREV_INSN (tem) = XVECEXP (seq, 0, i - 1);
530 NEXT_INSN (XVECEXP (seq, 0, i - 1)) = tem;
531
532 /* SPARC assembler, for instance, emit warning when debug info is output
533 into the delay slot. */
534 if (INSN_LOCATOR (tem) && !INSN_LOCATOR (seq_insn))
535 INSN_LOCATOR (seq_insn) = INSN_LOCATOR (tem);
536 INSN_LOCATOR (tem) = 0;
537
538 for (note = REG_NOTES (tem); note; note = next)
539 {
540 next = XEXP (note, 1);
541 switch (REG_NOTE_KIND (note))
542 {
543 case REG_DEAD:
544 /* Remove any REG_DEAD notes because we can't rely on them now
545 that the insn has been moved. */
546 remove_note (tem, note);
547 break;
548
549 case REG_LABEL_OPERAND:
550 case REG_LABEL_TARGET:
551 /* Keep the label reference count up to date. */
552 if (LABEL_P (XEXP (note, 0)))
553 LABEL_NUSES (XEXP (note, 0)) ++;
554 break;
555
556 default:
557 break;
558 }
559 }
560 }
561
562 NEXT_INSN (XVECEXP (seq, 0, length)) = NEXT_INSN (seq_insn);
563
564 /* If the previous insn is a SEQUENCE, update the NEXT_INSN pointer on the
565 last insn in that SEQUENCE to point to us. Similarly for the first
566 insn in the following insn if it is a SEQUENCE. */
567
568 if (PREV_INSN (seq_insn) && NONJUMP_INSN_P (PREV_INSN (seq_insn))
569 && GET_CODE (PATTERN (PREV_INSN (seq_insn))) == SEQUENCE)
570 NEXT_INSN (XVECEXP (PATTERN (PREV_INSN (seq_insn)), 0,
571 XVECLEN (PATTERN (PREV_INSN (seq_insn)), 0) - 1))
572 = seq_insn;
573
574 if (NEXT_INSN (seq_insn) && NONJUMP_INSN_P (NEXT_INSN (seq_insn))
575 && GET_CODE (PATTERN (NEXT_INSN (seq_insn))) == SEQUENCE)
576 PREV_INSN (XVECEXP (PATTERN (NEXT_INSN (seq_insn)), 0, 0)) = seq_insn;
577
578 /* If there used to be a BARRIER, put it back. */
579 if (had_barrier)
580 emit_barrier_after (seq_insn);
581
582 gcc_assert (i == length + 1);
583
584 return seq_insn;
585 }
586
587 /* Add INSN to DELAY_LIST and return the head of the new list. The list must
588 be in the order in which the insns are to be executed. */
589
590 static rtx
591 add_to_delay_list (rtx insn, rtx delay_list)
592 {
593 /* If we have an empty list, just make a new list element. If
594 INSN has its block number recorded, clear it since we may
595 be moving the insn to a new block. */
596
597 if (delay_list == 0)
598 {
599 clear_hashed_info_for_insn (insn);
600 return gen_rtx_INSN_LIST (VOIDmode, insn, NULL_RTX);
601 }
602
603 /* Otherwise this must be an INSN_LIST. Add INSN to the end of the
604 list. */
605 XEXP (delay_list, 1) = add_to_delay_list (insn, XEXP (delay_list, 1));
606
607 return delay_list;
608 }
609 \f
610 /* Delete INSN from the delay slot of the insn that it is in, which may
611 produce an insn with no delay slots. Return the new insn. */
612
613 static rtx
614 delete_from_delay_slot (rtx insn)
615 {
616 rtx trial, seq_insn, seq, prev;
617 rtx delay_list = 0;
618 int i;
619 int had_barrier = 0;
620
621 /* We first must find the insn containing the SEQUENCE with INSN in its
622 delay slot. Do this by finding an insn, TRIAL, where
623 PREV_INSN (NEXT_INSN (TRIAL)) != TRIAL. */
624
625 for (trial = insn;
626 PREV_INSN (NEXT_INSN (trial)) == trial;
627 trial = NEXT_INSN (trial))
628 ;
629
630 seq_insn = PREV_INSN (NEXT_INSN (trial));
631 seq = PATTERN (seq_insn);
632
633 if (NEXT_INSN (seq_insn) && BARRIER_P (NEXT_INSN (seq_insn)))
634 had_barrier = 1;
635
636 /* Create a delay list consisting of all the insns other than the one
637 we are deleting (unless we were the only one). */
638 if (XVECLEN (seq, 0) > 2)
639 for (i = 1; i < XVECLEN (seq, 0); i++)
640 if (XVECEXP (seq, 0, i) != insn)
641 delay_list = add_to_delay_list (XVECEXP (seq, 0, i), delay_list);
642
643 /* Delete the old SEQUENCE, re-emit the insn that used to have the delay
644 list, and rebuild the delay list if non-empty. */
645 prev = PREV_INSN (seq_insn);
646 trial = XVECEXP (seq, 0, 0);
647 delete_related_insns (seq_insn);
648 add_insn_after (trial, prev, NULL);
649
650 /* If there was a barrier after the old SEQUENCE, remit it. */
651 if (had_barrier)
652 emit_barrier_after (trial);
653
654 /* If there are any delay insns, remit them. Otherwise clear the
655 annul flag. */
656 if (delay_list)
657 trial = emit_delay_sequence (trial, delay_list, XVECLEN (seq, 0) - 2);
658 else if (INSN_P (trial))
659 INSN_ANNULLED_BRANCH_P (trial) = 0;
660
661 INSN_FROM_TARGET_P (insn) = 0;
662
663 /* Show we need to fill this insn again. */
664 obstack_ptr_grow (&unfilled_slots_obstack, trial);
665
666 return trial;
667 }
668 \f
669 /* Delete INSN, a JUMP_INSN. If it is a conditional jump, we must track down
670 the insn that sets CC0 for it and delete it too. */
671
672 static void
673 delete_scheduled_jump (rtx insn)
674 {
675 /* Delete the insn that sets cc0 for us. On machines without cc0, we could
676 delete the insn that sets the condition code, but it is hard to find it.
677 Since this case is rare anyway, don't bother trying; there would likely
678 be other insns that became dead anyway, which we wouldn't know to
679 delete. */
680
681 #ifdef HAVE_cc0
682 if (reg_mentioned_p (cc0_rtx, insn))
683 {
684 rtx note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
685
686 /* If a reg-note was found, it points to an insn to set CC0. This
687 insn is in the delay list of some other insn. So delete it from
688 the delay list it was in. */
689 if (note)
690 {
691 if (! FIND_REG_INC_NOTE (XEXP (note, 0), NULL_RTX)
692 && sets_cc0_p (PATTERN (XEXP (note, 0))) == 1)
693 delete_from_delay_slot (XEXP (note, 0));
694 }
695 else
696 {
697 /* The insn setting CC0 is our previous insn, but it may be in
698 a delay slot. It will be the last insn in the delay slot, if
699 it is. */
700 rtx trial = previous_insn (insn);
701 if (NOTE_P (trial))
702 trial = prev_nonnote_insn (trial);
703 if (sets_cc0_p (PATTERN (trial)) != 1
704 || FIND_REG_INC_NOTE (trial, NULL_RTX))
705 return;
706 if (PREV_INSN (NEXT_INSN (trial)) == trial)
707 delete_related_insns (trial);
708 else
709 delete_from_delay_slot (trial);
710 }
711 }
712 #endif
713
714 delete_related_insns (insn);
715 }
716 \f
717 /* Counters for delay-slot filling. */
718
719 #define NUM_REORG_FUNCTIONS 2
720 #define MAX_DELAY_HISTOGRAM 3
721 #define MAX_REORG_PASSES 2
722
723 static int num_insns_needing_delays[NUM_REORG_FUNCTIONS][MAX_REORG_PASSES];
724
725 static int num_filled_delays[NUM_REORG_FUNCTIONS][MAX_DELAY_HISTOGRAM+1][MAX_REORG_PASSES];
726
727 static int reorg_pass_number;
728
729 static void
730 note_delay_statistics (int slots_filled, int index)
731 {
732 num_insns_needing_delays[index][reorg_pass_number]++;
733 if (slots_filled > MAX_DELAY_HISTOGRAM)
734 slots_filled = MAX_DELAY_HISTOGRAM;
735 num_filled_delays[index][slots_filled][reorg_pass_number]++;
736 }
737 \f
738 #if defined(ANNUL_IFFALSE_SLOTS) || defined(ANNUL_IFTRUE_SLOTS)
739
740 /* Optimize the following cases:
741
742 1. When a conditional branch skips over only one instruction,
743 use an annulling branch and put that insn in the delay slot.
744 Use either a branch that annuls when the condition if true or
745 invert the test with a branch that annuls when the condition is
746 false. This saves insns, since otherwise we must copy an insn
747 from the L1 target.
748
749 (orig) (skip) (otherwise)
750 Bcc.n L1 Bcc',a L1 Bcc,a L1'
751 insn insn insn2
752 L1: L1: L1:
753 insn2 insn2 insn2
754 insn3 insn3 L1':
755 insn3
756
757 2. When a conditional branch skips over only one instruction,
758 and after that, it unconditionally branches somewhere else,
759 perform the similar optimization. This saves executing the
760 second branch in the case where the inverted condition is true.
761
762 Bcc.n L1 Bcc',a L2
763 insn insn
764 L1: L1:
765 Bra L2 Bra L2
766
767 INSN is a JUMP_INSN.
768
769 This should be expanded to skip over N insns, where N is the number
770 of delay slots required. */
771
772 static rtx
773 optimize_skip (rtx insn)
774 {
775 rtx trial = next_nonnote_insn (insn);
776 rtx next_trial = next_active_insn (trial);
777 rtx delay_list = 0;
778 int flags;
779
780 flags = get_jump_flags (insn, JUMP_LABEL (insn));
781
782 if (trial == 0
783 || !NONJUMP_INSN_P (trial)
784 || GET_CODE (PATTERN (trial)) == SEQUENCE
785 || recog_memoized (trial) < 0
786 || (! eligible_for_annul_false (insn, 0, trial, flags)
787 && ! eligible_for_annul_true (insn, 0, trial, flags))
788 || can_throw_internal (trial))
789 return 0;
790
791 /* There are two cases where we are just executing one insn (we assume
792 here that a branch requires only one insn; this should be generalized
793 at some point): Where the branch goes around a single insn or where
794 we have one insn followed by a branch to the same label we branch to.
795 In both of these cases, inverting the jump and annulling the delay
796 slot give the same effect in fewer insns. */
797 if ((next_trial == next_active_insn (JUMP_LABEL (insn))
798 && ! (next_trial == 0 && crtl->epilogue_delay_list != 0))
799 || (next_trial != 0
800 && JUMP_P (next_trial)
801 && JUMP_LABEL (insn) == JUMP_LABEL (next_trial)
802 && (simplejump_p (next_trial)
803 || GET_CODE (PATTERN (next_trial)) == RETURN)))
804 {
805 if (eligible_for_annul_false (insn, 0, trial, flags))
806 {
807 if (invert_jump (insn, JUMP_LABEL (insn), 1))
808 INSN_FROM_TARGET_P (trial) = 1;
809 else if (! eligible_for_annul_true (insn, 0, trial, flags))
810 return 0;
811 }
812
813 delay_list = add_to_delay_list (trial, NULL_RTX);
814 next_trial = next_active_insn (trial);
815 update_block (trial, trial);
816 delete_related_insns (trial);
817
818 /* Also, if we are targeting an unconditional
819 branch, thread our jump to the target of that branch. Don't
820 change this into a RETURN here, because it may not accept what
821 we have in the delay slot. We'll fix this up later. */
822 if (next_trial && JUMP_P (next_trial)
823 && (simplejump_p (next_trial)
824 || GET_CODE (PATTERN (next_trial)) == RETURN))
825 {
826 rtx target_label = JUMP_LABEL (next_trial);
827 if (target_label == 0)
828 target_label = find_end_label ();
829
830 if (target_label)
831 {
832 /* Recompute the flags based on TARGET_LABEL since threading
833 the jump to TARGET_LABEL may change the direction of the
834 jump (which may change the circumstances in which the
835 delay slot is nullified). */
836 flags = get_jump_flags (insn, target_label);
837 if (eligible_for_annul_true (insn, 0, trial, flags))
838 reorg_redirect_jump (insn, target_label);
839 }
840 }
841
842 INSN_ANNULLED_BRANCH_P (insn) = 1;
843 }
844
845 return delay_list;
846 }
847 #endif
848 \f
849 /* Encode and return branch direction and prediction information for
850 INSN assuming it will jump to LABEL.
851
852 Non conditional branches return no direction information and
853 are predicted as very likely taken. */
854
855 static int
856 get_jump_flags (rtx insn, rtx label)
857 {
858 int flags;
859
860 /* get_jump_flags can be passed any insn with delay slots, these may
861 be INSNs, CALL_INSNs, or JUMP_INSNs. Only JUMP_INSNs have branch
862 direction information, and only if they are conditional jumps.
863
864 If LABEL is zero, then there is no way to determine the branch
865 direction. */
866 if (JUMP_P (insn)
867 && (condjump_p (insn) || condjump_in_parallel_p (insn))
868 && INSN_UID (insn) <= max_uid
869 && label != 0
870 && INSN_UID (label) <= max_uid)
871 flags
872 = (uid_to_ruid[INSN_UID (label)] > uid_to_ruid[INSN_UID (insn)])
873 ? ATTR_FLAG_forward : ATTR_FLAG_backward;
874 /* No valid direction information. */
875 else
876 flags = 0;
877
878 /* If insn is a conditional branch call mostly_true_jump to get
879 determine the branch prediction.
880
881 Non conditional branches are predicted as very likely taken. */
882 if (JUMP_P (insn)
883 && (condjump_p (insn) || condjump_in_parallel_p (insn)))
884 {
885 int prediction;
886
887 prediction = mostly_true_jump (insn, get_branch_condition (insn, label));
888 switch (prediction)
889 {
890 case 2:
891 flags |= (ATTR_FLAG_very_likely | ATTR_FLAG_likely);
892 break;
893 case 1:
894 flags |= ATTR_FLAG_likely;
895 break;
896 case 0:
897 flags |= ATTR_FLAG_unlikely;
898 break;
899 case -1:
900 flags |= (ATTR_FLAG_very_unlikely | ATTR_FLAG_unlikely);
901 break;
902
903 default:
904 gcc_unreachable ();
905 }
906 }
907 else
908 flags |= (ATTR_FLAG_very_likely | ATTR_FLAG_likely);
909
910 return flags;
911 }
912
913 /* Return 1 if INSN is a destination that will be branched to rarely (the
914 return point of a function); return 2 if DEST will be branched to very
915 rarely (a call to a function that doesn't return). Otherwise,
916 return 0. */
917
918 static int
919 rare_destination (rtx insn)
920 {
921 int jump_count = 0;
922 rtx next;
923
924 for (; insn; insn = next)
925 {
926 if (NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE)
927 insn = XVECEXP (PATTERN (insn), 0, 0);
928
929 next = NEXT_INSN (insn);
930
931 switch (GET_CODE (insn))
932 {
933 case CODE_LABEL:
934 return 0;
935 case BARRIER:
936 /* A BARRIER can either be after a JUMP_INSN or a CALL_INSN. We
937 don't scan past JUMP_INSNs, so any barrier we find here must
938 have been after a CALL_INSN and hence mean the call doesn't
939 return. */
940 return 2;
941 case JUMP_INSN:
942 if (GET_CODE (PATTERN (insn)) == RETURN)
943 return 1;
944 else if (simplejump_p (insn)
945 && jump_count++ < 10)
946 next = JUMP_LABEL (insn);
947 else
948 return 0;
949
950 default:
951 break;
952 }
953 }
954
955 /* If we got here it means we hit the end of the function. So this
956 is an unlikely destination. */
957
958 return 1;
959 }
960
961 /* Return truth value of the statement that this branch
962 is mostly taken. If we think that the branch is extremely likely
963 to be taken, we return 2. If the branch is slightly more likely to be
964 taken, return 1. If the branch is slightly less likely to be taken,
965 return 0 and if the branch is highly unlikely to be taken, return -1.
966
967 CONDITION, if nonzero, is the condition that JUMP_INSN is testing. */
968
969 static int
970 mostly_true_jump (rtx jump_insn, rtx condition)
971 {
972 rtx target_label = JUMP_LABEL (jump_insn);
973 rtx note;
974 int rare_dest, rare_fallthrough;
975
976 /* If branch probabilities are available, then use that number since it
977 always gives a correct answer. */
978 note = find_reg_note (jump_insn, REG_BR_PROB, 0);
979 if (note)
980 {
981 int prob = INTVAL (XEXP (note, 0));
982
983 if (prob >= REG_BR_PROB_BASE * 9 / 10)
984 return 2;
985 else if (prob >= REG_BR_PROB_BASE / 2)
986 return 1;
987 else if (prob >= REG_BR_PROB_BASE / 10)
988 return 0;
989 else
990 return -1;
991 }
992
993 /* Look at the relative rarities of the fallthrough and destination. If
994 they differ, we can predict the branch that way. */
995 rare_dest = rare_destination (target_label);
996 rare_fallthrough = rare_destination (NEXT_INSN (jump_insn));
997
998 switch (rare_fallthrough - rare_dest)
999 {
1000 case -2:
1001 return -1;
1002 case -1:
1003 return 0;
1004 case 0:
1005 break;
1006 case 1:
1007 return 1;
1008 case 2:
1009 return 2;
1010 }
1011
1012 /* If we couldn't figure out what this jump was, assume it won't be
1013 taken. This should be rare. */
1014 if (condition == 0)
1015 return 0;
1016
1017 /* Predict backward branches usually take, forward branches usually not. If
1018 we don't know whether this is forward or backward, assume the branch
1019 will be taken, since most are. */
1020 return (target_label == 0 || INSN_UID (jump_insn) > max_uid
1021 || INSN_UID (target_label) > max_uid
1022 || (uid_to_ruid[INSN_UID (jump_insn)]
1023 > uid_to_ruid[INSN_UID (target_label)]));
1024 }
1025
1026 /* Return the condition under which INSN will branch to TARGET. If TARGET
1027 is zero, return the condition under which INSN will return. If INSN is
1028 an unconditional branch, return const_true_rtx. If INSN isn't a simple
1029 type of jump, or it doesn't go to TARGET, return 0. */
1030
1031 static rtx
1032 get_branch_condition (rtx insn, rtx target)
1033 {
1034 rtx pat = PATTERN (insn);
1035 rtx src;
1036
1037 if (condjump_in_parallel_p (insn))
1038 pat = XVECEXP (pat, 0, 0);
1039
1040 if (GET_CODE (pat) == RETURN)
1041 return target == 0 ? const_true_rtx : 0;
1042
1043 else if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
1044 return 0;
1045
1046 src = SET_SRC (pat);
1047 if (GET_CODE (src) == LABEL_REF && XEXP (src, 0) == target)
1048 return const_true_rtx;
1049
1050 else if (GET_CODE (src) == IF_THEN_ELSE
1051 && ((target == 0 && GET_CODE (XEXP (src, 1)) == RETURN)
1052 || (GET_CODE (XEXP (src, 1)) == LABEL_REF
1053 && XEXP (XEXP (src, 1), 0) == target))
1054 && XEXP (src, 2) == pc_rtx)
1055 return XEXP (src, 0);
1056
1057 else if (GET_CODE (src) == IF_THEN_ELSE
1058 && ((target == 0 && GET_CODE (XEXP (src, 2)) == RETURN)
1059 || (GET_CODE (XEXP (src, 2)) == LABEL_REF
1060 && XEXP (XEXP (src, 2), 0) == target))
1061 && XEXP (src, 1) == pc_rtx)
1062 {
1063 enum rtx_code rev;
1064 rev = reversed_comparison_code (XEXP (src, 0), insn);
1065 if (rev != UNKNOWN)
1066 return gen_rtx_fmt_ee (rev, GET_MODE (XEXP (src, 0)),
1067 XEXP (XEXP (src, 0), 0),
1068 XEXP (XEXP (src, 0), 1));
1069 }
1070
1071 return 0;
1072 }
1073
1074 /* Return nonzero if CONDITION is more strict than the condition of
1075 INSN, i.e., if INSN will always branch if CONDITION is true. */
1076
1077 static int
1078 condition_dominates_p (rtx condition, rtx insn)
1079 {
1080 rtx other_condition = get_branch_condition (insn, JUMP_LABEL (insn));
1081 enum rtx_code code = GET_CODE (condition);
1082 enum rtx_code other_code;
1083
1084 if (rtx_equal_p (condition, other_condition)
1085 || other_condition == const_true_rtx)
1086 return 1;
1087
1088 else if (condition == const_true_rtx || other_condition == 0)
1089 return 0;
1090
1091 other_code = GET_CODE (other_condition);
1092 if (GET_RTX_LENGTH (code) != 2 || GET_RTX_LENGTH (other_code) != 2
1093 || ! rtx_equal_p (XEXP (condition, 0), XEXP (other_condition, 0))
1094 || ! rtx_equal_p (XEXP (condition, 1), XEXP (other_condition, 1)))
1095 return 0;
1096
1097 return comparison_dominates_p (code, other_code);
1098 }
1099
1100 /* Return nonzero if redirecting JUMP to NEWLABEL does not invalidate
1101 any insns already in the delay slot of JUMP. */
1102
1103 static int
1104 redirect_with_delay_slots_safe_p (rtx jump, rtx newlabel, rtx seq)
1105 {
1106 int flags, i;
1107 rtx pat = PATTERN (seq);
1108
1109 /* Make sure all the delay slots of this jump would still
1110 be valid after threading the jump. If they are still
1111 valid, then return nonzero. */
1112
1113 flags = get_jump_flags (jump, newlabel);
1114 for (i = 1; i < XVECLEN (pat, 0); i++)
1115 if (! (
1116 #ifdef ANNUL_IFFALSE_SLOTS
1117 (INSN_ANNULLED_BRANCH_P (jump)
1118 && INSN_FROM_TARGET_P (XVECEXP (pat, 0, i)))
1119 ? eligible_for_annul_false (jump, i - 1,
1120 XVECEXP (pat, 0, i), flags) :
1121 #endif
1122 #ifdef ANNUL_IFTRUE_SLOTS
1123 (INSN_ANNULLED_BRANCH_P (jump)
1124 && ! INSN_FROM_TARGET_P (XVECEXP (pat, 0, i)))
1125 ? eligible_for_annul_true (jump, i - 1,
1126 XVECEXP (pat, 0, i), flags) :
1127 #endif
1128 eligible_for_delay (jump, i - 1, XVECEXP (pat, 0, i), flags)))
1129 break;
1130
1131 return (i == XVECLEN (pat, 0));
1132 }
1133
1134 /* Return nonzero if redirecting JUMP to NEWLABEL does not invalidate
1135 any insns we wish to place in the delay slot of JUMP. */
1136
1137 static int
1138 redirect_with_delay_list_safe_p (rtx jump, rtx newlabel, rtx delay_list)
1139 {
1140 int flags, i;
1141 rtx li;
1142
1143 /* Make sure all the insns in DELAY_LIST would still be
1144 valid after threading the jump. If they are still
1145 valid, then return nonzero. */
1146
1147 flags = get_jump_flags (jump, newlabel);
1148 for (li = delay_list, i = 0; li; li = XEXP (li, 1), i++)
1149 if (! (
1150 #ifdef ANNUL_IFFALSE_SLOTS
1151 (INSN_ANNULLED_BRANCH_P (jump)
1152 && INSN_FROM_TARGET_P (XEXP (li, 0)))
1153 ? eligible_for_annul_false (jump, i, XEXP (li, 0), flags) :
1154 #endif
1155 #ifdef ANNUL_IFTRUE_SLOTS
1156 (INSN_ANNULLED_BRANCH_P (jump)
1157 && ! INSN_FROM_TARGET_P (XEXP (li, 0)))
1158 ? eligible_for_annul_true (jump, i, XEXP (li, 0), flags) :
1159 #endif
1160 eligible_for_delay (jump, i, XEXP (li, 0), flags)))
1161 break;
1162
1163 return (li == NULL);
1164 }
1165
1166 /* DELAY_LIST is a list of insns that have already been placed into delay
1167 slots. See if all of them have the same annulling status as ANNUL_TRUE_P.
1168 If not, return 0; otherwise return 1. */
1169
1170 static int
1171 check_annul_list_true_false (int annul_true_p, rtx delay_list)
1172 {
1173 rtx temp;
1174
1175 if (delay_list)
1176 {
1177 for (temp = delay_list; temp; temp = XEXP (temp, 1))
1178 {
1179 rtx trial = XEXP (temp, 0);
1180
1181 if ((annul_true_p && INSN_FROM_TARGET_P (trial))
1182 || (!annul_true_p && !INSN_FROM_TARGET_P (trial)))
1183 return 0;
1184 }
1185 }
1186
1187 return 1;
1188 }
1189 \f
1190 /* INSN branches to an insn whose pattern SEQ is a SEQUENCE. Given that
1191 the condition tested by INSN is CONDITION and the resources shown in
1192 OTHER_NEEDED are needed after INSN, see whether INSN can take all the insns
1193 from SEQ's delay list, in addition to whatever insns it may execute
1194 (in DELAY_LIST). SETS and NEEDED are denote resources already set and
1195 needed while searching for delay slot insns. Return the concatenated
1196 delay list if possible, otherwise, return 0.
1197
1198 SLOTS_TO_FILL is the total number of slots required by INSN, and
1199 PSLOTS_FILLED points to the number filled so far (also the number of
1200 insns in DELAY_LIST). It is updated with the number that have been
1201 filled from the SEQUENCE, if any.
1202
1203 PANNUL_P points to a nonzero value if we already know that we need
1204 to annul INSN. If this routine determines that annulling is needed,
1205 it may set that value nonzero.
1206
1207 PNEW_THREAD points to a location that is to receive the place at which
1208 execution should continue. */
1209
1210 static rtx
1211 steal_delay_list_from_target (rtx insn, rtx condition, rtx seq,
1212 rtx delay_list, struct resources *sets,
1213 struct resources *needed,
1214 struct resources *other_needed,
1215 int slots_to_fill, int *pslots_filled,
1216 int *pannul_p, rtx *pnew_thread)
1217 {
1218 rtx temp;
1219 int slots_remaining = slots_to_fill - *pslots_filled;
1220 int total_slots_filled = *pslots_filled;
1221 rtx new_delay_list = 0;
1222 int must_annul = *pannul_p;
1223 int used_annul = 0;
1224 int i;
1225 struct resources cc_set;
1226
1227 /* We can't do anything if there are more delay slots in SEQ than we
1228 can handle, or if we don't know that it will be a taken branch.
1229 We know that it will be a taken branch if it is either an unconditional
1230 branch or a conditional branch with a stricter branch condition.
1231
1232 Also, exit if the branch has more than one set, since then it is computing
1233 other results that can't be ignored, e.g. the HPPA mov&branch instruction.
1234 ??? It may be possible to move other sets into INSN in addition to
1235 moving the instructions in the delay slots.
1236
1237 We can not steal the delay list if one of the instructions in the
1238 current delay_list modifies the condition codes and the jump in the
1239 sequence is a conditional jump. We can not do this because we can
1240 not change the direction of the jump because the condition codes
1241 will effect the direction of the jump in the sequence. */
1242
1243 CLEAR_RESOURCE (&cc_set);
1244 for (temp = delay_list; temp; temp = XEXP (temp, 1))
1245 {
1246 rtx trial = XEXP (temp, 0);
1247
1248 mark_set_resources (trial, &cc_set, 0, MARK_SRC_DEST_CALL);
1249 if (insn_references_resource_p (XVECEXP (seq , 0, 0), &cc_set, false))
1250 return delay_list;
1251 }
1252
1253 if (XVECLEN (seq, 0) - 1 > slots_remaining
1254 || ! condition_dominates_p (condition, XVECEXP (seq, 0, 0))
1255 || ! single_set (XVECEXP (seq, 0, 0)))
1256 return delay_list;
1257
1258 #ifdef MD_CAN_REDIRECT_BRANCH
1259 /* On some targets, branches with delay slots can have a limited
1260 displacement. Give the back end a chance to tell us we can't do
1261 this. */
1262 if (! MD_CAN_REDIRECT_BRANCH (insn, XVECEXP (seq, 0, 0)))
1263 return delay_list;
1264 #endif
1265
1266 for (i = 1; i < XVECLEN (seq, 0); i++)
1267 {
1268 rtx trial = XVECEXP (seq, 0, i);
1269 int flags;
1270
1271 if (insn_references_resource_p (trial, sets, false)
1272 || insn_sets_resource_p (trial, needed, false)
1273 || insn_sets_resource_p (trial, sets, false)
1274 #ifdef HAVE_cc0
1275 /* If TRIAL sets CC0, we can't copy it, so we can't steal this
1276 delay list. */
1277 || find_reg_note (trial, REG_CC_USER, NULL_RTX)
1278 #endif
1279 /* If TRIAL is from the fallthrough code of an annulled branch insn
1280 in SEQ, we cannot use it. */
1281 || (INSN_ANNULLED_BRANCH_P (XVECEXP (seq, 0, 0))
1282 && ! INSN_FROM_TARGET_P (trial)))
1283 return delay_list;
1284
1285 /* If this insn was already done (usually in a previous delay slot),
1286 pretend we put it in our delay slot. */
1287 if (redundant_insn (trial, insn, new_delay_list))
1288 continue;
1289
1290 /* We will end up re-vectoring this branch, so compute flags
1291 based on jumping to the new label. */
1292 flags = get_jump_flags (insn, JUMP_LABEL (XVECEXP (seq, 0, 0)));
1293
1294 if (! must_annul
1295 && ((condition == const_true_rtx
1296 || (! insn_sets_resource_p (trial, other_needed, false)
1297 && ! may_trap_or_fault_p (PATTERN (trial)))))
1298 ? eligible_for_delay (insn, total_slots_filled, trial, flags)
1299 : (must_annul || (delay_list == NULL && new_delay_list == NULL))
1300 && (must_annul = 1,
1301 check_annul_list_true_false (0, delay_list)
1302 && check_annul_list_true_false (0, new_delay_list)
1303 && eligible_for_annul_false (insn, total_slots_filled,
1304 trial, flags)))
1305 {
1306 if (must_annul)
1307 used_annul = 1;
1308 temp = copy_rtx (trial);
1309 INSN_FROM_TARGET_P (temp) = 1;
1310 new_delay_list = add_to_delay_list (temp, new_delay_list);
1311 total_slots_filled++;
1312
1313 if (--slots_remaining == 0)
1314 break;
1315 }
1316 else
1317 return delay_list;
1318 }
1319
1320 /* Show the place to which we will be branching. */
1321 *pnew_thread = next_active_insn (JUMP_LABEL (XVECEXP (seq, 0, 0)));
1322
1323 /* Add any new insns to the delay list and update the count of the
1324 number of slots filled. */
1325 *pslots_filled = total_slots_filled;
1326 if (used_annul)
1327 *pannul_p = 1;
1328
1329 if (delay_list == 0)
1330 return new_delay_list;
1331
1332 for (temp = new_delay_list; temp; temp = XEXP (temp, 1))
1333 delay_list = add_to_delay_list (XEXP (temp, 0), delay_list);
1334
1335 return delay_list;
1336 }
1337 \f
1338 /* Similar to steal_delay_list_from_target except that SEQ is on the
1339 fallthrough path of INSN. Here we only do something if the delay insn
1340 of SEQ is an unconditional branch. In that case we steal its delay slot
1341 for INSN since unconditional branches are much easier to fill. */
1342
1343 static rtx
1344 steal_delay_list_from_fallthrough (rtx insn, rtx condition, rtx seq,
1345 rtx delay_list, struct resources *sets,
1346 struct resources *needed,
1347 struct resources *other_needed,
1348 int slots_to_fill, int *pslots_filled,
1349 int *pannul_p)
1350 {
1351 int i;
1352 int flags;
1353 int must_annul = *pannul_p;
1354 int used_annul = 0;
1355
1356 flags = get_jump_flags (insn, JUMP_LABEL (insn));
1357
1358 /* We can't do anything if SEQ's delay insn isn't an
1359 unconditional branch. */
1360
1361 if (! simplejump_p (XVECEXP (seq, 0, 0))
1362 && GET_CODE (PATTERN (XVECEXP (seq, 0, 0))) != RETURN)
1363 return delay_list;
1364
1365 for (i = 1; i < XVECLEN (seq, 0); i++)
1366 {
1367 rtx trial = XVECEXP (seq, 0, i);
1368
1369 /* If TRIAL sets CC0, stealing it will move it too far from the use
1370 of CC0. */
1371 if (insn_references_resource_p (trial, sets, false)
1372 || insn_sets_resource_p (trial, needed, false)
1373 || insn_sets_resource_p (trial, sets, false)
1374 #ifdef HAVE_cc0
1375 || sets_cc0_p (PATTERN (trial))
1376 #endif
1377 )
1378
1379 break;
1380
1381 /* If this insn was already done, we don't need it. */
1382 if (redundant_insn (trial, insn, delay_list))
1383 {
1384 delete_from_delay_slot (trial);
1385 continue;
1386 }
1387
1388 if (! must_annul
1389 && ((condition == const_true_rtx
1390 || (! insn_sets_resource_p (trial, other_needed, false)
1391 && ! may_trap_or_fault_p (PATTERN (trial)))))
1392 ? eligible_for_delay (insn, *pslots_filled, trial, flags)
1393 : (must_annul || delay_list == NULL) && (must_annul = 1,
1394 check_annul_list_true_false (1, delay_list)
1395 && eligible_for_annul_true (insn, *pslots_filled, trial, flags)))
1396 {
1397 if (must_annul)
1398 used_annul = 1;
1399 delete_from_delay_slot (trial);
1400 delay_list = add_to_delay_list (trial, delay_list);
1401
1402 if (++(*pslots_filled) == slots_to_fill)
1403 break;
1404 }
1405 else
1406 break;
1407 }
1408
1409 if (used_annul)
1410 *pannul_p = 1;
1411 return delay_list;
1412 }
1413 \f
1414 /* Try merging insns starting at THREAD which match exactly the insns in
1415 INSN's delay list.
1416
1417 If all insns were matched and the insn was previously annulling, the
1418 annul bit will be cleared.
1419
1420 For each insn that is merged, if the branch is or will be non-annulling,
1421 we delete the merged insn. */
1422
1423 static void
1424 try_merge_delay_insns (rtx insn, rtx thread)
1425 {
1426 rtx trial, next_trial;
1427 rtx delay_insn = XVECEXP (PATTERN (insn), 0, 0);
1428 int annul_p = INSN_ANNULLED_BRANCH_P (delay_insn);
1429 int slot_number = 1;
1430 int num_slots = XVECLEN (PATTERN (insn), 0);
1431 rtx next_to_match = XVECEXP (PATTERN (insn), 0, slot_number);
1432 struct resources set, needed;
1433 rtx merged_insns = 0;
1434 int i;
1435 int flags;
1436
1437 flags = get_jump_flags (delay_insn, JUMP_LABEL (delay_insn));
1438
1439 CLEAR_RESOURCE (&needed);
1440 CLEAR_RESOURCE (&set);
1441
1442 /* If this is not an annulling branch, take into account anything needed in
1443 INSN's delay slot. This prevents two increments from being incorrectly
1444 folded into one. If we are annulling, this would be the correct
1445 thing to do. (The alternative, looking at things set in NEXT_TO_MATCH
1446 will essentially disable this optimization. This method is somewhat of
1447 a kludge, but I don't see a better way.) */
1448 if (! annul_p)
1449 for (i = 1 ; i < num_slots; i++)
1450 if (XVECEXP (PATTERN (insn), 0, i))
1451 mark_referenced_resources (XVECEXP (PATTERN (insn), 0, i), &needed,
1452 true);
1453
1454 for (trial = thread; !stop_search_p (trial, 1); trial = next_trial)
1455 {
1456 rtx pat = PATTERN (trial);
1457 rtx oldtrial = trial;
1458
1459 next_trial = next_nonnote_insn (trial);
1460
1461 /* TRIAL must be a CALL_INSN or INSN. Skip USE and CLOBBER. */
1462 if (NONJUMP_INSN_P (trial)
1463 && (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER))
1464 continue;
1465
1466 if (GET_CODE (next_to_match) == GET_CODE (trial)
1467 #ifdef HAVE_cc0
1468 /* We can't share an insn that sets cc0. */
1469 && ! sets_cc0_p (pat)
1470 #endif
1471 && ! insn_references_resource_p (trial, &set, true)
1472 && ! insn_sets_resource_p (trial, &set, true)
1473 && ! insn_sets_resource_p (trial, &needed, true)
1474 && (trial = try_split (pat, trial, 0)) != 0
1475 /* Update next_trial, in case try_split succeeded. */
1476 && (next_trial = next_nonnote_insn (trial))
1477 /* Likewise THREAD. */
1478 && (thread = oldtrial == thread ? trial : thread)
1479 && rtx_equal_p (PATTERN (next_to_match), PATTERN (trial))
1480 /* Have to test this condition if annul condition is different
1481 from (and less restrictive than) non-annulling one. */
1482 && eligible_for_delay (delay_insn, slot_number - 1, trial, flags))
1483 {
1484
1485 if (! annul_p)
1486 {
1487 update_block (trial, thread);
1488 if (trial == thread)
1489 thread = next_active_insn (thread);
1490
1491 delete_related_insns (trial);
1492 INSN_FROM_TARGET_P (next_to_match) = 0;
1493 }
1494 else
1495 merged_insns = gen_rtx_INSN_LIST (VOIDmode, trial, merged_insns);
1496
1497 if (++slot_number == num_slots)
1498 break;
1499
1500 next_to_match = XVECEXP (PATTERN (insn), 0, slot_number);
1501 }
1502
1503 mark_set_resources (trial, &set, 0, MARK_SRC_DEST_CALL);
1504 mark_referenced_resources (trial, &needed, true);
1505 }
1506
1507 /* See if we stopped on a filled insn. If we did, try to see if its
1508 delay slots match. */
1509 if (slot_number != num_slots
1510 && trial && NONJUMP_INSN_P (trial)
1511 && GET_CODE (PATTERN (trial)) == SEQUENCE
1512 && ! INSN_ANNULLED_BRANCH_P (XVECEXP (PATTERN (trial), 0, 0)))
1513 {
1514 rtx pat = PATTERN (trial);
1515 rtx filled_insn = XVECEXP (pat, 0, 0);
1516
1517 /* Account for resources set/needed by the filled insn. */
1518 mark_set_resources (filled_insn, &set, 0, MARK_SRC_DEST_CALL);
1519 mark_referenced_resources (filled_insn, &needed, true);
1520
1521 for (i = 1; i < XVECLEN (pat, 0); i++)
1522 {
1523 rtx dtrial = XVECEXP (pat, 0, i);
1524
1525 if (! insn_references_resource_p (dtrial, &set, true)
1526 && ! insn_sets_resource_p (dtrial, &set, true)
1527 && ! insn_sets_resource_p (dtrial, &needed, true)
1528 #ifdef HAVE_cc0
1529 && ! sets_cc0_p (PATTERN (dtrial))
1530 #endif
1531 && rtx_equal_p (PATTERN (next_to_match), PATTERN (dtrial))
1532 && eligible_for_delay (delay_insn, slot_number - 1, dtrial, flags))
1533 {
1534 if (! annul_p)
1535 {
1536 rtx new_rtx;
1537
1538 update_block (dtrial, thread);
1539 new_rtx = delete_from_delay_slot (dtrial);
1540 if (INSN_DELETED_P (thread))
1541 thread = new_rtx;
1542 INSN_FROM_TARGET_P (next_to_match) = 0;
1543 }
1544 else
1545 merged_insns = gen_rtx_INSN_LIST (SImode, dtrial,
1546 merged_insns);
1547
1548 if (++slot_number == num_slots)
1549 break;
1550
1551 next_to_match = XVECEXP (PATTERN (insn), 0, slot_number);
1552 }
1553 else
1554 {
1555 /* Keep track of the set/referenced resources for the delay
1556 slots of any trial insns we encounter. */
1557 mark_set_resources (dtrial, &set, 0, MARK_SRC_DEST_CALL);
1558 mark_referenced_resources (dtrial, &needed, true);
1559 }
1560 }
1561 }
1562
1563 /* If all insns in the delay slot have been matched and we were previously
1564 annulling the branch, we need not any more. In that case delete all the
1565 merged insns. Also clear the INSN_FROM_TARGET_P bit of each insn in
1566 the delay list so that we know that it isn't only being used at the
1567 target. */
1568 if (slot_number == num_slots && annul_p)
1569 {
1570 for (; merged_insns; merged_insns = XEXP (merged_insns, 1))
1571 {
1572 if (GET_MODE (merged_insns) == SImode)
1573 {
1574 rtx new_rtx;
1575
1576 update_block (XEXP (merged_insns, 0), thread);
1577 new_rtx = delete_from_delay_slot (XEXP (merged_insns, 0));
1578 if (INSN_DELETED_P (thread))
1579 thread = new_rtx;
1580 }
1581 else
1582 {
1583 update_block (XEXP (merged_insns, 0), thread);
1584 delete_related_insns (XEXP (merged_insns, 0));
1585 }
1586 }
1587
1588 INSN_ANNULLED_BRANCH_P (delay_insn) = 0;
1589
1590 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1591 INSN_FROM_TARGET_P (XVECEXP (PATTERN (insn), 0, i)) = 0;
1592 }
1593 }
1594 \f
1595 /* See if INSN is redundant with an insn in front of TARGET. Often this
1596 is called when INSN is a candidate for a delay slot of TARGET.
1597 DELAY_LIST are insns that will be placed in delay slots of TARGET in front
1598 of INSN. Often INSN will be redundant with an insn in a delay slot of
1599 some previous insn. This happens when we have a series of branches to the
1600 same label; in that case the first insn at the target might want to go
1601 into each of the delay slots.
1602
1603 If we are not careful, this routine can take up a significant fraction
1604 of the total compilation time (4%), but only wins rarely. Hence we
1605 speed this routine up by making two passes. The first pass goes back
1606 until it hits a label and sees if it finds an insn with an identical
1607 pattern. Only in this (relatively rare) event does it check for
1608 data conflicts.
1609
1610 We do not split insns we encounter. This could cause us not to find a
1611 redundant insn, but the cost of splitting seems greater than the possible
1612 gain in rare cases. */
1613
1614 static rtx
1615 redundant_insn (rtx insn, rtx target, rtx delay_list)
1616 {
1617 rtx target_main = target;
1618 rtx ipat = PATTERN (insn);
1619 rtx trial, pat;
1620 struct resources needed, set;
1621 int i;
1622 unsigned insns_to_search;
1623
1624 /* If INSN has any REG_UNUSED notes, it can't match anything since we
1625 are allowed to not actually assign to such a register. */
1626 if (find_reg_note (insn, REG_UNUSED, NULL_RTX) != 0)
1627 return 0;
1628
1629 /* Scan backwards looking for a match. */
1630 for (trial = PREV_INSN (target),
1631 insns_to_search = MAX_DELAY_SLOT_INSN_SEARCH;
1632 trial && insns_to_search > 0;
1633 trial = PREV_INSN (trial), --insns_to_search)
1634 {
1635 if (LABEL_P (trial))
1636 return 0;
1637
1638 if (! INSN_P (trial))
1639 continue;
1640
1641 pat = PATTERN (trial);
1642 if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
1643 continue;
1644
1645 if (GET_CODE (pat) == SEQUENCE)
1646 {
1647 /* Stop for a CALL and its delay slots because it is difficult to
1648 track its resource needs correctly. */
1649 if (CALL_P (XVECEXP (pat, 0, 0)))
1650 return 0;
1651
1652 /* Stop for an INSN or JUMP_INSN with delayed effects and its delay
1653 slots because it is difficult to track its resource needs
1654 correctly. */
1655
1656 #ifdef INSN_SETS_ARE_DELAYED
1657 if (INSN_SETS_ARE_DELAYED (XVECEXP (pat, 0, 0)))
1658 return 0;
1659 #endif
1660
1661 #ifdef INSN_REFERENCES_ARE_DELAYED
1662 if (INSN_REFERENCES_ARE_DELAYED (XVECEXP (pat, 0, 0)))
1663 return 0;
1664 #endif
1665
1666 /* See if any of the insns in the delay slot match, updating
1667 resource requirements as we go. */
1668 for (i = XVECLEN (pat, 0) - 1; i > 0; i--)
1669 if (GET_CODE (XVECEXP (pat, 0, i)) == GET_CODE (insn)
1670 && rtx_equal_p (PATTERN (XVECEXP (pat, 0, i)), ipat)
1671 && ! find_reg_note (XVECEXP (pat, 0, i), REG_UNUSED, NULL_RTX))
1672 break;
1673
1674 /* If found a match, exit this loop early. */
1675 if (i > 0)
1676 break;
1677 }
1678
1679 else if (GET_CODE (trial) == GET_CODE (insn) && rtx_equal_p (pat, ipat)
1680 && ! find_reg_note (trial, REG_UNUSED, NULL_RTX))
1681 break;
1682 }
1683
1684 /* If we didn't find an insn that matches, return 0. */
1685 if (trial == 0)
1686 return 0;
1687
1688 /* See what resources this insn sets and needs. If they overlap, or
1689 if this insn references CC0, it can't be redundant. */
1690
1691 CLEAR_RESOURCE (&needed);
1692 CLEAR_RESOURCE (&set);
1693 mark_set_resources (insn, &set, 0, MARK_SRC_DEST_CALL);
1694 mark_referenced_resources (insn, &needed, true);
1695
1696 /* If TARGET is a SEQUENCE, get the main insn. */
1697 if (NONJUMP_INSN_P (target) && GET_CODE (PATTERN (target)) == SEQUENCE)
1698 target_main = XVECEXP (PATTERN (target), 0, 0);
1699
1700 if (resource_conflicts_p (&needed, &set)
1701 #ifdef HAVE_cc0
1702 || reg_mentioned_p (cc0_rtx, ipat)
1703 #endif
1704 /* The insn requiring the delay may not set anything needed or set by
1705 INSN. */
1706 || insn_sets_resource_p (target_main, &needed, true)
1707 || insn_sets_resource_p (target_main, &set, true))
1708 return 0;
1709
1710 /* Insns we pass may not set either NEEDED or SET, so merge them for
1711 simpler tests. */
1712 needed.memory |= set.memory;
1713 needed.unch_memory |= set.unch_memory;
1714 IOR_HARD_REG_SET (needed.regs, set.regs);
1715
1716 /* This insn isn't redundant if it conflicts with an insn that either is
1717 or will be in a delay slot of TARGET. */
1718
1719 while (delay_list)
1720 {
1721 if (insn_sets_resource_p (XEXP (delay_list, 0), &needed, true))
1722 return 0;
1723 delay_list = XEXP (delay_list, 1);
1724 }
1725
1726 if (NONJUMP_INSN_P (target) && GET_CODE (PATTERN (target)) == SEQUENCE)
1727 for (i = 1; i < XVECLEN (PATTERN (target), 0); i++)
1728 if (insn_sets_resource_p (XVECEXP (PATTERN (target), 0, i), &needed,
1729 true))
1730 return 0;
1731
1732 /* Scan backwards until we reach a label or an insn that uses something
1733 INSN sets or sets something insn uses or sets. */
1734
1735 for (trial = PREV_INSN (target),
1736 insns_to_search = MAX_DELAY_SLOT_INSN_SEARCH;
1737 trial && !LABEL_P (trial) && insns_to_search > 0;
1738 trial = PREV_INSN (trial), --insns_to_search)
1739 {
1740 if (!INSN_P (trial))
1741 continue;
1742
1743 pat = PATTERN (trial);
1744 if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
1745 continue;
1746
1747 if (GET_CODE (pat) == SEQUENCE)
1748 {
1749 /* If this is a CALL_INSN and its delay slots, it is hard to track
1750 the resource needs properly, so give up. */
1751 if (CALL_P (XVECEXP (pat, 0, 0)))
1752 return 0;
1753
1754 /* If this is an INSN or JUMP_INSN with delayed effects, it
1755 is hard to track the resource needs properly, so give up. */
1756
1757 #ifdef INSN_SETS_ARE_DELAYED
1758 if (INSN_SETS_ARE_DELAYED (XVECEXP (pat, 0, 0)))
1759 return 0;
1760 #endif
1761
1762 #ifdef INSN_REFERENCES_ARE_DELAYED
1763 if (INSN_REFERENCES_ARE_DELAYED (XVECEXP (pat, 0, 0)))
1764 return 0;
1765 #endif
1766
1767 /* See if any of the insns in the delay slot match, updating
1768 resource requirements as we go. */
1769 for (i = XVECLEN (pat, 0) - 1; i > 0; i--)
1770 {
1771 rtx candidate = XVECEXP (pat, 0, i);
1772
1773 /* If an insn will be annulled if the branch is false, it isn't
1774 considered as a possible duplicate insn. */
1775 if (rtx_equal_p (PATTERN (candidate), ipat)
1776 && ! (INSN_ANNULLED_BRANCH_P (XVECEXP (pat, 0, 0))
1777 && INSN_FROM_TARGET_P (candidate)))
1778 {
1779 /* Show that this insn will be used in the sequel. */
1780 INSN_FROM_TARGET_P (candidate) = 0;
1781 return candidate;
1782 }
1783
1784 /* Unless this is an annulled insn from the target of a branch,
1785 we must stop if it sets anything needed or set by INSN. */
1786 if ((! INSN_ANNULLED_BRANCH_P (XVECEXP (pat, 0, 0))
1787 || ! INSN_FROM_TARGET_P (candidate))
1788 && insn_sets_resource_p (candidate, &needed, true))
1789 return 0;
1790 }
1791
1792 /* If the insn requiring the delay slot conflicts with INSN, we
1793 must stop. */
1794 if (insn_sets_resource_p (XVECEXP (pat, 0, 0), &needed, true))
1795 return 0;
1796 }
1797 else
1798 {
1799 /* See if TRIAL is the same as INSN. */
1800 pat = PATTERN (trial);
1801 if (rtx_equal_p (pat, ipat))
1802 return trial;
1803
1804 /* Can't go any further if TRIAL conflicts with INSN. */
1805 if (insn_sets_resource_p (trial, &needed, true))
1806 return 0;
1807 }
1808 }
1809
1810 return 0;
1811 }
1812 \f
1813 /* Return 1 if THREAD can only be executed in one way. If LABEL is nonzero,
1814 it is the target of the branch insn being scanned. If ALLOW_FALLTHROUGH
1815 is nonzero, we are allowed to fall into this thread; otherwise, we are
1816 not.
1817
1818 If LABEL is used more than one or we pass a label other than LABEL before
1819 finding an active insn, we do not own this thread. */
1820
1821 static int
1822 own_thread_p (rtx thread, rtx label, int allow_fallthrough)
1823 {
1824 rtx active_insn;
1825 rtx insn;
1826
1827 /* We don't own the function end. */
1828 if (thread == 0)
1829 return 0;
1830
1831 /* Get the first active insn, or THREAD, if it is an active insn. */
1832 active_insn = next_active_insn (PREV_INSN (thread));
1833
1834 for (insn = thread; insn != active_insn; insn = NEXT_INSN (insn))
1835 if (LABEL_P (insn)
1836 && (insn != label || LABEL_NUSES (insn) != 1))
1837 return 0;
1838
1839 if (allow_fallthrough)
1840 return 1;
1841
1842 /* Ensure that we reach a BARRIER before any insn or label. */
1843 for (insn = prev_nonnote_insn (thread);
1844 insn == 0 || !BARRIER_P (insn);
1845 insn = prev_nonnote_insn (insn))
1846 if (insn == 0
1847 || LABEL_P (insn)
1848 || (NONJUMP_INSN_P (insn)
1849 && GET_CODE (PATTERN (insn)) != USE
1850 && GET_CODE (PATTERN (insn)) != CLOBBER))
1851 return 0;
1852
1853 return 1;
1854 }
1855 \f
1856 /* Called when INSN is being moved from a location near the target of a jump.
1857 We leave a marker of the form (use (INSN)) immediately in front
1858 of WHERE for mark_target_live_regs. These markers will be deleted when
1859 reorg finishes.
1860
1861 We used to try to update the live status of registers if WHERE is at
1862 the start of a basic block, but that can't work since we may remove a
1863 BARRIER in relax_delay_slots. */
1864
1865 static void
1866 update_block (rtx insn, rtx where)
1867 {
1868 /* Ignore if this was in a delay slot and it came from the target of
1869 a branch. */
1870 if (INSN_FROM_TARGET_P (insn))
1871 return;
1872
1873 emit_insn_before (gen_rtx_USE (VOIDmode, insn), where);
1874
1875 /* INSN might be making a value live in a block where it didn't use to
1876 be. So recompute liveness information for this block. */
1877
1878 incr_ticks_for_insn (insn);
1879 }
1880
1881 /* Similar to REDIRECT_JUMP except that we update the BB_TICKS entry for
1882 the basic block containing the jump. */
1883
1884 static int
1885 reorg_redirect_jump (rtx jump, rtx nlabel)
1886 {
1887 incr_ticks_for_insn (jump);
1888 return redirect_jump (jump, nlabel, 1);
1889 }
1890
1891 /* Called when INSN is being moved forward into a delay slot of DELAYED_INSN.
1892 We check every instruction between INSN and DELAYED_INSN for REG_DEAD notes
1893 that reference values used in INSN. If we find one, then we move the
1894 REG_DEAD note to INSN.
1895
1896 This is needed to handle the case where a later insn (after INSN) has a
1897 REG_DEAD note for a register used by INSN, and this later insn subsequently
1898 gets moved before a CODE_LABEL because it is a redundant insn. In this
1899 case, mark_target_live_regs may be confused into thinking the register
1900 is dead because it sees a REG_DEAD note immediately before a CODE_LABEL. */
1901
1902 static void
1903 update_reg_dead_notes (rtx insn, rtx delayed_insn)
1904 {
1905 rtx p, link, next;
1906
1907 for (p = next_nonnote_insn (insn); p != delayed_insn;
1908 p = next_nonnote_insn (p))
1909 for (link = REG_NOTES (p); link; link = next)
1910 {
1911 next = XEXP (link, 1);
1912
1913 if (REG_NOTE_KIND (link) != REG_DEAD
1914 || !REG_P (XEXP (link, 0)))
1915 continue;
1916
1917 if (reg_referenced_p (XEXP (link, 0), PATTERN (insn)))
1918 {
1919 /* Move the REG_DEAD note from P to INSN. */
1920 remove_note (p, link);
1921 XEXP (link, 1) = REG_NOTES (insn);
1922 REG_NOTES (insn) = link;
1923 }
1924 }
1925 }
1926
1927 /* Called when an insn redundant with start_insn is deleted. If there
1928 is a REG_DEAD note for the target of start_insn between start_insn
1929 and stop_insn, then the REG_DEAD note needs to be deleted since the
1930 value no longer dies there.
1931
1932 If the REG_DEAD note isn't deleted, then mark_target_live_regs may be
1933 confused into thinking the register is dead. */
1934
1935 static void
1936 fix_reg_dead_note (rtx start_insn, rtx stop_insn)
1937 {
1938 rtx p, link, next;
1939
1940 for (p = next_nonnote_insn (start_insn); p != stop_insn;
1941 p = next_nonnote_insn (p))
1942 for (link = REG_NOTES (p); link; link = next)
1943 {
1944 next = XEXP (link, 1);
1945
1946 if (REG_NOTE_KIND (link) != REG_DEAD
1947 || !REG_P (XEXP (link, 0)))
1948 continue;
1949
1950 if (reg_set_p (XEXP (link, 0), PATTERN (start_insn)))
1951 {
1952 remove_note (p, link);
1953 return;
1954 }
1955 }
1956 }
1957
1958 /* Delete any REG_UNUSED notes that exist on INSN but not on REDUNDANT_INSN.
1959
1960 This handles the case of udivmodXi4 instructions which optimize their
1961 output depending on whether any REG_UNUSED notes are present.
1962 we must make sure that INSN calculates as many results as REDUNDANT_INSN
1963 does. */
1964
1965 static void
1966 update_reg_unused_notes (rtx insn, rtx redundant_insn)
1967 {
1968 rtx link, next;
1969
1970 for (link = REG_NOTES (insn); link; link = next)
1971 {
1972 next = XEXP (link, 1);
1973
1974 if (REG_NOTE_KIND (link) != REG_UNUSED
1975 || !REG_P (XEXP (link, 0)))
1976 continue;
1977
1978 if (! find_regno_note (redundant_insn, REG_UNUSED,
1979 REGNO (XEXP (link, 0))))
1980 remove_note (insn, link);
1981 }
1982 }
1983 \f
1984 /* Return the label before INSN, or put a new label there. */
1985
1986 static rtx
1987 get_label_before (rtx insn)
1988 {
1989 rtx label;
1990
1991 /* Find an existing label at this point
1992 or make a new one if there is none. */
1993 label = prev_nonnote_insn (insn);
1994
1995 if (label == 0 || !LABEL_P (label))
1996 {
1997 rtx prev = PREV_INSN (insn);
1998
1999 label = gen_label_rtx ();
2000 emit_label_after (label, prev);
2001 LABEL_NUSES (label) = 0;
2002 }
2003 return label;
2004 }
2005
2006 /* Scan a function looking for insns that need a delay slot and find insns to
2007 put into the delay slot.
2008
2009 NON_JUMPS_P is nonzero if we are to only try to fill non-jump insns (such
2010 as calls). We do these first since we don't want jump insns (that are
2011 easier to fill) to get the only insns that could be used for non-jump insns.
2012 When it is zero, only try to fill JUMP_INSNs.
2013
2014 When slots are filled in this manner, the insns (including the
2015 delay_insn) are put together in a SEQUENCE rtx. In this fashion,
2016 it is possible to tell whether a delay slot has really been filled
2017 or not. `final' knows how to deal with this, by communicating
2018 through FINAL_SEQUENCE. */
2019
2020 static void
2021 fill_simple_delay_slots (int non_jumps_p)
2022 {
2023 rtx insn, pat, trial, next_trial;
2024 int i;
2025 int num_unfilled_slots = unfilled_slots_next - unfilled_slots_base;
2026 struct resources needed, set;
2027 int slots_to_fill, slots_filled;
2028 rtx delay_list;
2029
2030 for (i = 0; i < num_unfilled_slots; i++)
2031 {
2032 int flags;
2033 /* Get the next insn to fill. If it has already had any slots assigned,
2034 we can't do anything with it. Maybe we'll improve this later. */
2035
2036 insn = unfilled_slots_base[i];
2037 if (insn == 0
2038 || INSN_DELETED_P (insn)
2039 || (NONJUMP_INSN_P (insn)
2040 && GET_CODE (PATTERN (insn)) == SEQUENCE)
2041 || (JUMP_P (insn) && non_jumps_p)
2042 || (!JUMP_P (insn) && ! non_jumps_p))
2043 continue;
2044
2045 /* It may have been that this insn used to need delay slots, but
2046 now doesn't; ignore in that case. This can happen, for example,
2047 on the HP PA RISC, where the number of delay slots depends on
2048 what insns are nearby. */
2049 slots_to_fill = num_delay_slots (insn);
2050
2051 /* Some machine description have defined instructions to have
2052 delay slots only in certain circumstances which may depend on
2053 nearby insns (which change due to reorg's actions).
2054
2055 For example, the PA port normally has delay slots for unconditional
2056 jumps.
2057
2058 However, the PA port claims such jumps do not have a delay slot
2059 if they are immediate successors of certain CALL_INSNs. This
2060 allows the port to favor filling the delay slot of the call with
2061 the unconditional jump. */
2062 if (slots_to_fill == 0)
2063 continue;
2064
2065 /* This insn needs, or can use, some delay slots. SLOTS_TO_FILL
2066 says how many. After initialization, first try optimizing
2067
2068 call _foo call _foo
2069 nop add %o7,.-L1,%o7
2070 b,a L1
2071 nop
2072
2073 If this case applies, the delay slot of the call is filled with
2074 the unconditional jump. This is done first to avoid having the
2075 delay slot of the call filled in the backward scan. Also, since
2076 the unconditional jump is likely to also have a delay slot, that
2077 insn must exist when it is subsequently scanned.
2078
2079 This is tried on each insn with delay slots as some machines
2080 have insns which perform calls, but are not represented as
2081 CALL_INSNs. */
2082
2083 slots_filled = 0;
2084 delay_list = 0;
2085
2086 if (JUMP_P (insn))
2087 flags = get_jump_flags (insn, JUMP_LABEL (insn));
2088 else
2089 flags = get_jump_flags (insn, NULL_RTX);
2090
2091 if ((trial = next_active_insn (insn))
2092 && JUMP_P (trial)
2093 && simplejump_p (trial)
2094 && eligible_for_delay (insn, slots_filled, trial, flags)
2095 && no_labels_between_p (insn, trial)
2096 && ! can_throw_internal (trial))
2097 {
2098 rtx *tmp;
2099 slots_filled++;
2100 delay_list = add_to_delay_list (trial, delay_list);
2101
2102 /* TRIAL may have had its delay slot filled, then unfilled. When
2103 the delay slot is unfilled, TRIAL is placed back on the unfilled
2104 slots obstack. Unfortunately, it is placed on the end of the
2105 obstack, not in its original location. Therefore, we must search
2106 from entry i + 1 to the end of the unfilled slots obstack to
2107 try and find TRIAL. */
2108 tmp = &unfilled_slots_base[i + 1];
2109 while (*tmp != trial && tmp != unfilled_slots_next)
2110 tmp++;
2111
2112 /* Remove the unconditional jump from consideration for delay slot
2113 filling and unthread it. */
2114 if (*tmp == trial)
2115 *tmp = 0;
2116 {
2117 rtx next = NEXT_INSN (trial);
2118 rtx prev = PREV_INSN (trial);
2119 if (prev)
2120 NEXT_INSN (prev) = next;
2121 if (next)
2122 PREV_INSN (next) = prev;
2123 }
2124 }
2125
2126 /* Now, scan backwards from the insn to search for a potential
2127 delay-slot candidate. Stop searching when a label or jump is hit.
2128
2129 For each candidate, if it is to go into the delay slot (moved
2130 forward in execution sequence), it must not need or set any resources
2131 that were set by later insns and must not set any resources that
2132 are needed for those insns.
2133
2134 The delay slot insn itself sets resources unless it is a call
2135 (in which case the called routine, not the insn itself, is doing
2136 the setting). */
2137
2138 if (slots_filled < slots_to_fill)
2139 {
2140 CLEAR_RESOURCE (&needed);
2141 CLEAR_RESOURCE (&set);
2142 mark_set_resources (insn, &set, 0, MARK_SRC_DEST);
2143 mark_referenced_resources (insn, &needed, false);
2144
2145 for (trial = prev_nonnote_insn (insn); ! stop_search_p (trial, 1);
2146 trial = next_trial)
2147 {
2148 next_trial = prev_nonnote_insn (trial);
2149
2150 /* This must be an INSN or CALL_INSN. */
2151 pat = PATTERN (trial);
2152
2153 /* USE and CLOBBER at this level was just for flow; ignore it. */
2154 if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
2155 continue;
2156
2157 /* Check for resource conflict first, to avoid unnecessary
2158 splitting. */
2159 if (! insn_references_resource_p (trial, &set, true)
2160 && ! insn_sets_resource_p (trial, &set, true)
2161 && ! insn_sets_resource_p (trial, &needed, true)
2162 #ifdef HAVE_cc0
2163 /* Can't separate set of cc0 from its use. */
2164 && ! (reg_mentioned_p (cc0_rtx, pat) && ! sets_cc0_p (pat))
2165 #endif
2166 && ! can_throw_internal (trial))
2167 {
2168 trial = try_split (pat, trial, 1);
2169 next_trial = prev_nonnote_insn (trial);
2170 if (eligible_for_delay (insn, slots_filled, trial, flags))
2171 {
2172 /* In this case, we are searching backward, so if we
2173 find insns to put on the delay list, we want
2174 to put them at the head, rather than the
2175 tail, of the list. */
2176
2177 update_reg_dead_notes (trial, insn);
2178 delay_list = gen_rtx_INSN_LIST (VOIDmode,
2179 trial, delay_list);
2180 update_block (trial, trial);
2181 delete_related_insns (trial);
2182 if (slots_to_fill == ++slots_filled)
2183 break;
2184 continue;
2185 }
2186 }
2187
2188 mark_set_resources (trial, &set, 0, MARK_SRC_DEST_CALL);
2189 mark_referenced_resources (trial, &needed, true);
2190 }
2191 }
2192
2193 /* If all needed slots haven't been filled, we come here. */
2194
2195 /* Try to optimize case of jumping around a single insn. */
2196 #if defined(ANNUL_IFFALSE_SLOTS) || defined(ANNUL_IFTRUE_SLOTS)
2197 if (slots_filled != slots_to_fill
2198 && delay_list == 0
2199 && JUMP_P (insn)
2200 && (condjump_p (insn) || condjump_in_parallel_p (insn)))
2201 {
2202 delay_list = optimize_skip (insn);
2203 if (delay_list)
2204 slots_filled += 1;
2205 }
2206 #endif
2207
2208 /* Try to get insns from beyond the insn needing the delay slot.
2209 These insns can neither set or reference resources set in insns being
2210 skipped, cannot set resources in the insn being skipped, and, if this
2211 is a CALL_INSN (or a CALL_INSN is passed), cannot trap (because the
2212 call might not return).
2213
2214 There used to be code which continued past the target label if
2215 we saw all uses of the target label. This code did not work,
2216 because it failed to account for some instructions which were
2217 both annulled and marked as from the target. This can happen as a
2218 result of optimize_skip. Since this code was redundant with
2219 fill_eager_delay_slots anyways, it was just deleted. */
2220
2221 if (slots_filled != slots_to_fill
2222 /* If this instruction could throw an exception which is
2223 caught in the same function, then it's not safe to fill
2224 the delay slot with an instruction from beyond this
2225 point. For example, consider:
2226
2227 int i = 2;
2228
2229 try {
2230 f();
2231 i = 3;
2232 } catch (...) {}
2233
2234 return i;
2235
2236 Even though `i' is a local variable, we must be sure not
2237 to put `i = 3' in the delay slot if `f' might throw an
2238 exception.
2239
2240 Presumably, we should also check to see if we could get
2241 back to this function via `setjmp'. */
2242 && ! can_throw_internal (insn)
2243 && (!JUMP_P (insn)
2244 || ((condjump_p (insn) || condjump_in_parallel_p (insn))
2245 && ! simplejump_p (insn)
2246 && JUMP_LABEL (insn) != 0)))
2247 {
2248 /* Invariant: If insn is a JUMP_INSN, the insn's jump
2249 label. Otherwise, zero. */
2250 rtx target = 0;
2251 int maybe_never = 0;
2252 rtx pat, trial_delay;
2253
2254 CLEAR_RESOURCE (&needed);
2255 CLEAR_RESOURCE (&set);
2256
2257 if (CALL_P (insn))
2258 {
2259 mark_set_resources (insn, &set, 0, MARK_SRC_DEST_CALL);
2260 mark_referenced_resources (insn, &needed, true);
2261 maybe_never = 1;
2262 }
2263 else
2264 {
2265 mark_set_resources (insn, &set, 0, MARK_SRC_DEST_CALL);
2266 mark_referenced_resources (insn, &needed, true);
2267 if (JUMP_P (insn))
2268 target = JUMP_LABEL (insn);
2269 }
2270
2271 if (target == 0)
2272 for (trial = next_nonnote_insn (insn); trial; trial = next_trial)
2273 {
2274 next_trial = next_nonnote_insn (trial);
2275
2276 if (LABEL_P (trial)
2277 || BARRIER_P (trial))
2278 break;
2279
2280 /* We must have an INSN, JUMP_INSN, or CALL_INSN. */
2281 pat = PATTERN (trial);
2282
2283 /* Stand-alone USE and CLOBBER are just for flow. */
2284 if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
2285 continue;
2286
2287 /* If this already has filled delay slots, get the insn needing
2288 the delay slots. */
2289 if (GET_CODE (pat) == SEQUENCE)
2290 trial_delay = XVECEXP (pat, 0, 0);
2291 else
2292 trial_delay = trial;
2293
2294 /* Stop our search when seeing an unconditional jump. */
2295 if (JUMP_P (trial_delay))
2296 break;
2297
2298 /* See if we have a resource problem before we try to
2299 split. */
2300 if (GET_CODE (pat) != SEQUENCE
2301 && ! insn_references_resource_p (trial, &set, true)
2302 && ! insn_sets_resource_p (trial, &set, true)
2303 && ! insn_sets_resource_p (trial, &needed, true)
2304 #ifdef HAVE_cc0
2305 && ! (reg_mentioned_p (cc0_rtx, pat) && ! sets_cc0_p (pat))
2306 #endif
2307 && ! (maybe_never && may_trap_or_fault_p (pat))
2308 && (trial = try_split (pat, trial, 0))
2309 && eligible_for_delay (insn, slots_filled, trial, flags)
2310 && ! can_throw_internal(trial))
2311 {
2312 next_trial = next_nonnote_insn (trial);
2313 delay_list = add_to_delay_list (trial, delay_list);
2314
2315 #ifdef HAVE_cc0
2316 if (reg_mentioned_p (cc0_rtx, pat))
2317 link_cc0_insns (trial);
2318 #endif
2319
2320 delete_related_insns (trial);
2321 if (slots_to_fill == ++slots_filled)
2322 break;
2323 continue;
2324 }
2325
2326 mark_set_resources (trial, &set, 0, MARK_SRC_DEST_CALL);
2327 mark_referenced_resources (trial, &needed, true);
2328
2329 /* Ensure we don't put insns between the setting of cc and the
2330 comparison by moving a setting of cc into an earlier delay
2331 slot since these insns could clobber the condition code. */
2332 set.cc = 1;
2333
2334 /* If this is a call or jump, we might not get here. */
2335 if (CALL_P (trial_delay)
2336 || JUMP_P (trial_delay))
2337 maybe_never = 1;
2338 }
2339
2340 /* If there are slots left to fill and our search was stopped by an
2341 unconditional branch, try the insn at the branch target. We can
2342 redirect the branch if it works.
2343
2344 Don't do this if the insn at the branch target is a branch. */
2345 if (slots_to_fill != slots_filled
2346 && trial
2347 && JUMP_P (trial)
2348 && simplejump_p (trial)
2349 && (target == 0 || JUMP_LABEL (trial) == target)
2350 && (next_trial = next_active_insn (JUMP_LABEL (trial))) != 0
2351 && ! (NONJUMP_INSN_P (next_trial)
2352 && GET_CODE (PATTERN (next_trial)) == SEQUENCE)
2353 && !JUMP_P (next_trial)
2354 && ! insn_references_resource_p (next_trial, &set, true)
2355 && ! insn_sets_resource_p (next_trial, &set, true)
2356 && ! insn_sets_resource_p (next_trial, &needed, true)
2357 #ifdef HAVE_cc0
2358 && ! reg_mentioned_p (cc0_rtx, PATTERN (next_trial))
2359 #endif
2360 && ! (maybe_never && may_trap_or_fault_p (PATTERN (next_trial)))
2361 && (next_trial = try_split (PATTERN (next_trial), next_trial, 0))
2362 && eligible_for_delay (insn, slots_filled, next_trial, flags)
2363 && ! can_throw_internal (trial))
2364 {
2365 /* See comment in relax_delay_slots about necessity of using
2366 next_real_insn here. */
2367 rtx new_label = next_real_insn (next_trial);
2368
2369 if (new_label != 0)
2370 new_label = get_label_before (new_label);
2371 else
2372 new_label = find_end_label ();
2373
2374 if (new_label)
2375 {
2376 delay_list
2377 = add_to_delay_list (copy_rtx (next_trial), delay_list);
2378 slots_filled++;
2379 reorg_redirect_jump (trial, new_label);
2380
2381 /* If we merged because we both jumped to the same place,
2382 redirect the original insn also. */
2383 if (target)
2384 reorg_redirect_jump (insn, new_label);
2385 }
2386 }
2387 }
2388
2389 /* If this is an unconditional jump, then try to get insns from the
2390 target of the jump. */
2391 if (JUMP_P (insn)
2392 && simplejump_p (insn)
2393 && slots_filled != slots_to_fill)
2394 delay_list
2395 = fill_slots_from_thread (insn, const_true_rtx,
2396 next_active_insn (JUMP_LABEL (insn)),
2397 NULL, 1, 1,
2398 own_thread_p (JUMP_LABEL (insn),
2399 JUMP_LABEL (insn), 0),
2400 slots_to_fill, &slots_filled,
2401 delay_list);
2402
2403 if (delay_list)
2404 unfilled_slots_base[i]
2405 = emit_delay_sequence (insn, delay_list, slots_filled);
2406
2407 if (slots_to_fill == slots_filled)
2408 unfilled_slots_base[i] = 0;
2409
2410 note_delay_statistics (slots_filled, 0);
2411 }
2412
2413 #ifdef DELAY_SLOTS_FOR_EPILOGUE
2414 /* See if the epilogue needs any delay slots. Try to fill them if so.
2415 The only thing we can do is scan backwards from the end of the
2416 function. If we did this in a previous pass, it is incorrect to do it
2417 again. */
2418 if (crtl->epilogue_delay_list)
2419 return;
2420
2421 slots_to_fill = DELAY_SLOTS_FOR_EPILOGUE;
2422 if (slots_to_fill == 0)
2423 return;
2424
2425 slots_filled = 0;
2426 CLEAR_RESOURCE (&set);
2427
2428 /* The frame pointer and stack pointer are needed at the beginning of
2429 the epilogue, so instructions setting them can not be put in the
2430 epilogue delay slot. However, everything else needed at function
2431 end is safe, so we don't want to use end_of_function_needs here. */
2432 CLEAR_RESOURCE (&needed);
2433 if (frame_pointer_needed)
2434 {
2435 SET_HARD_REG_BIT (needed.regs, FRAME_POINTER_REGNUM);
2436 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2437 SET_HARD_REG_BIT (needed.regs, HARD_FRAME_POINTER_REGNUM);
2438 #endif
2439 if (! EXIT_IGNORE_STACK
2440 || current_function_sp_is_unchanging)
2441 SET_HARD_REG_BIT (needed.regs, STACK_POINTER_REGNUM);
2442 }
2443 else
2444 SET_HARD_REG_BIT (needed.regs, STACK_POINTER_REGNUM);
2445
2446 #ifdef EPILOGUE_USES
2447 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2448 {
2449 if (EPILOGUE_USES (i))
2450 SET_HARD_REG_BIT (needed.regs, i);
2451 }
2452 #endif
2453
2454 for (trial = get_last_insn (); ! stop_search_p (trial, 1);
2455 trial = PREV_INSN (trial))
2456 {
2457 if (NOTE_P (trial))
2458 continue;
2459 pat = PATTERN (trial);
2460 if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
2461 continue;
2462
2463 if (! insn_references_resource_p (trial, &set, true)
2464 && ! insn_sets_resource_p (trial, &needed, true)
2465 && ! insn_sets_resource_p (trial, &set, true)
2466 #ifdef HAVE_cc0
2467 /* Don't want to mess with cc0 here. */
2468 && ! reg_mentioned_p (cc0_rtx, pat)
2469 #endif
2470 && ! can_throw_internal (trial))
2471 {
2472 trial = try_split (pat, trial, 1);
2473 if (ELIGIBLE_FOR_EPILOGUE_DELAY (trial, slots_filled))
2474 {
2475 /* Here as well we are searching backward, so put the
2476 insns we find on the head of the list. */
2477
2478 crtl->epilogue_delay_list
2479 = gen_rtx_INSN_LIST (VOIDmode, trial,
2480 crtl->epilogue_delay_list);
2481 mark_end_of_function_resources (trial, true);
2482 update_block (trial, trial);
2483 delete_related_insns (trial);
2484
2485 /* Clear deleted bit so final.c will output the insn. */
2486 INSN_DELETED_P (trial) = 0;
2487
2488 if (slots_to_fill == ++slots_filled)
2489 break;
2490 continue;
2491 }
2492 }
2493
2494 mark_set_resources (trial, &set, 0, MARK_SRC_DEST_CALL);
2495 mark_referenced_resources (trial, &needed, true);
2496 }
2497
2498 note_delay_statistics (slots_filled, 0);
2499 #endif
2500 }
2501 \f
2502 /* Follow any unconditional jump at LABEL;
2503 return the ultimate label reached by any such chain of jumps.
2504 Return null if the chain ultimately leads to a return instruction.
2505 If LABEL is not followed by a jump, return LABEL.
2506 If the chain loops or we can't find end, return LABEL,
2507 since that tells caller to avoid changing the insn. */
2508
2509 static rtx
2510 follow_jumps (rtx label)
2511 {
2512 rtx insn;
2513 rtx next;
2514 rtx value = label;
2515 int depth;
2516
2517 for (depth = 0;
2518 (depth < 10
2519 && (insn = next_active_insn (value)) != 0
2520 && JUMP_P (insn)
2521 && ((JUMP_LABEL (insn) != 0 && any_uncondjump_p (insn)
2522 && onlyjump_p (insn))
2523 || GET_CODE (PATTERN (insn)) == RETURN)
2524 && (next = NEXT_INSN (insn))
2525 && BARRIER_P (next));
2526 depth++)
2527 {
2528 rtx tem;
2529
2530 /* If we have found a cycle, make the insn jump to itself. */
2531 if (JUMP_LABEL (insn) == label)
2532 return label;
2533
2534 tem = next_active_insn (JUMP_LABEL (insn));
2535 if (tem && (GET_CODE (PATTERN (tem)) == ADDR_VEC
2536 || GET_CODE (PATTERN (tem)) == ADDR_DIFF_VEC))
2537 break;
2538
2539 value = JUMP_LABEL (insn);
2540 }
2541 if (depth == 10)
2542 return label;
2543 return value;
2544 }
2545
2546 /* Try to find insns to place in delay slots.
2547
2548 INSN is the jump needing SLOTS_TO_FILL delay slots. It tests CONDITION
2549 or is an unconditional branch if CONDITION is const_true_rtx.
2550 *PSLOTS_FILLED is updated with the number of slots that we have filled.
2551
2552 THREAD is a flow-of-control, either the insns to be executed if the
2553 branch is true or if the branch is false, THREAD_IF_TRUE says which.
2554
2555 OPPOSITE_THREAD is the thread in the opposite direction. It is used
2556 to see if any potential delay slot insns set things needed there.
2557
2558 LIKELY is nonzero if it is extremely likely that the branch will be
2559 taken and THREAD_IF_TRUE is set. This is used for the branch at the
2560 end of a loop back up to the top.
2561
2562 OWN_THREAD and OWN_OPPOSITE_THREAD are true if we are the only user of the
2563 thread. I.e., it is the fallthrough code of our jump or the target of the
2564 jump when we are the only jump going there.
2565
2566 If OWN_THREAD is false, it must be the "true" thread of a jump. In that
2567 case, we can only take insns from the head of the thread for our delay
2568 slot. We then adjust the jump to point after the insns we have taken. */
2569
2570 static rtx
2571 fill_slots_from_thread (rtx insn, rtx condition, rtx thread,
2572 rtx opposite_thread, int likely, int thread_if_true,
2573 int own_thread, int slots_to_fill,
2574 int *pslots_filled, rtx delay_list)
2575 {
2576 rtx new_thread;
2577 struct resources opposite_needed, set, needed;
2578 rtx trial;
2579 int lose = 0;
2580 int must_annul = 0;
2581 int flags;
2582
2583 /* Validate our arguments. */
2584 gcc_assert(condition != const_true_rtx || thread_if_true);
2585 gcc_assert(own_thread || thread_if_true);
2586
2587 flags = get_jump_flags (insn, JUMP_LABEL (insn));
2588
2589 /* If our thread is the end of subroutine, we can't get any delay
2590 insns from that. */
2591 if (thread == 0)
2592 return delay_list;
2593
2594 /* If this is an unconditional branch, nothing is needed at the
2595 opposite thread. Otherwise, compute what is needed there. */
2596 if (condition == const_true_rtx)
2597 CLEAR_RESOURCE (&opposite_needed);
2598 else
2599 mark_target_live_regs (get_insns (), opposite_thread, &opposite_needed);
2600
2601 /* If the insn at THREAD can be split, do it here to avoid having to
2602 update THREAD and NEW_THREAD if it is done in the loop below. Also
2603 initialize NEW_THREAD. */
2604
2605 new_thread = thread = try_split (PATTERN (thread), thread, 0);
2606
2607 /* Scan insns at THREAD. We are looking for an insn that can be removed
2608 from THREAD (it neither sets nor references resources that were set
2609 ahead of it and it doesn't set anything needs by the insns ahead of
2610 it) and that either can be placed in an annulling insn or aren't
2611 needed at OPPOSITE_THREAD. */
2612
2613 CLEAR_RESOURCE (&needed);
2614 CLEAR_RESOURCE (&set);
2615
2616 /* If we do not own this thread, we must stop as soon as we find
2617 something that we can't put in a delay slot, since all we can do
2618 is branch into THREAD at a later point. Therefore, labels stop
2619 the search if this is not the `true' thread. */
2620
2621 for (trial = thread;
2622 ! stop_search_p (trial, ! thread_if_true) && (! lose || own_thread);
2623 trial = next_nonnote_insn (trial))
2624 {
2625 rtx pat, old_trial;
2626
2627 /* If we have passed a label, we no longer own this thread. */
2628 if (LABEL_P (trial))
2629 {
2630 own_thread = 0;
2631 continue;
2632 }
2633
2634 pat = PATTERN (trial);
2635 if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
2636 continue;
2637
2638 /* If TRIAL conflicts with the insns ahead of it, we lose. Also,
2639 don't separate or copy insns that set and use CC0. */
2640 if (! insn_references_resource_p (trial, &set, true)
2641 && ! insn_sets_resource_p (trial, &set, true)
2642 && ! insn_sets_resource_p (trial, &needed, true)
2643 #ifdef HAVE_cc0
2644 && ! (reg_mentioned_p (cc0_rtx, pat)
2645 && (! own_thread || ! sets_cc0_p (pat)))
2646 #endif
2647 && ! can_throw_internal (trial))
2648 {
2649 rtx prior_insn;
2650
2651 /* If TRIAL is redundant with some insn before INSN, we don't
2652 actually need to add it to the delay list; we can merely pretend
2653 we did. */
2654 if ((prior_insn = redundant_insn (trial, insn, delay_list)))
2655 {
2656 fix_reg_dead_note (prior_insn, insn);
2657 if (own_thread)
2658 {
2659 update_block (trial, thread);
2660 if (trial == thread)
2661 {
2662 thread = next_active_insn (thread);
2663 if (new_thread == trial)
2664 new_thread = thread;
2665 }
2666
2667 delete_related_insns (trial);
2668 }
2669 else
2670 {
2671 update_reg_unused_notes (prior_insn, trial);
2672 new_thread = next_active_insn (trial);
2673 }
2674
2675 continue;
2676 }
2677
2678 /* There are two ways we can win: If TRIAL doesn't set anything
2679 needed at the opposite thread and can't trap, or if it can
2680 go into an annulled delay slot. */
2681 if (!must_annul
2682 && (condition == const_true_rtx
2683 || (! insn_sets_resource_p (trial, &opposite_needed, true)
2684 && ! may_trap_or_fault_p (pat))))
2685 {
2686 old_trial = trial;
2687 trial = try_split (pat, trial, 0);
2688 if (new_thread == old_trial)
2689 new_thread = trial;
2690 if (thread == old_trial)
2691 thread = trial;
2692 pat = PATTERN (trial);
2693 if (eligible_for_delay (insn, *pslots_filled, trial, flags))
2694 goto winner;
2695 }
2696 else if (0
2697 #ifdef ANNUL_IFTRUE_SLOTS
2698 || ! thread_if_true
2699 #endif
2700 #ifdef ANNUL_IFFALSE_SLOTS
2701 || thread_if_true
2702 #endif
2703 )
2704 {
2705 old_trial = trial;
2706 trial = try_split (pat, trial, 0);
2707 if (new_thread == old_trial)
2708 new_thread = trial;
2709 if (thread == old_trial)
2710 thread = trial;
2711 pat = PATTERN (trial);
2712 if ((must_annul || delay_list == NULL) && (thread_if_true
2713 ? check_annul_list_true_false (0, delay_list)
2714 && eligible_for_annul_false (insn, *pslots_filled, trial, flags)
2715 : check_annul_list_true_false (1, delay_list)
2716 && eligible_for_annul_true (insn, *pslots_filled, trial, flags)))
2717 {
2718 rtx temp;
2719
2720 must_annul = 1;
2721 winner:
2722
2723 #ifdef HAVE_cc0
2724 if (reg_mentioned_p (cc0_rtx, pat))
2725 link_cc0_insns (trial);
2726 #endif
2727
2728 /* If we own this thread, delete the insn. If this is the
2729 destination of a branch, show that a basic block status
2730 may have been updated. In any case, mark the new
2731 starting point of this thread. */
2732 if (own_thread)
2733 {
2734 rtx note;
2735
2736 update_block (trial, thread);
2737 if (trial == thread)
2738 {
2739 thread = next_active_insn (thread);
2740 if (new_thread == trial)
2741 new_thread = thread;
2742 }
2743
2744 /* We are moving this insn, not deleting it. We must
2745 temporarily increment the use count on any referenced
2746 label lest it be deleted by delete_related_insns. */
2747 for (note = REG_NOTES (trial);
2748 note != NULL_RTX;
2749 note = XEXP (note, 1))
2750 if (REG_NOTE_KIND (note) == REG_LABEL_OPERAND
2751 || REG_NOTE_KIND (note) == REG_LABEL_TARGET)
2752 {
2753 /* REG_LABEL_OPERAND could be
2754 NOTE_INSN_DELETED_LABEL too. */
2755 if (LABEL_P (XEXP (note, 0)))
2756 LABEL_NUSES (XEXP (note, 0))++;
2757 else
2758 gcc_assert (REG_NOTE_KIND (note)
2759 == REG_LABEL_OPERAND);
2760 }
2761 if (JUMP_P (trial) && JUMP_LABEL (trial))
2762 LABEL_NUSES (JUMP_LABEL (trial))++;
2763
2764 delete_related_insns (trial);
2765
2766 for (note = REG_NOTES (trial);
2767 note != NULL_RTX;
2768 note = XEXP (note, 1))
2769 if (REG_NOTE_KIND (note) == REG_LABEL_OPERAND
2770 || REG_NOTE_KIND (note) == REG_LABEL_TARGET)
2771 {
2772 /* REG_LABEL_OPERAND could be
2773 NOTE_INSN_DELETED_LABEL too. */
2774 if (LABEL_P (XEXP (note, 0)))
2775 LABEL_NUSES (XEXP (note, 0))--;
2776 else
2777 gcc_assert (REG_NOTE_KIND (note)
2778 == REG_LABEL_OPERAND);
2779 }
2780 if (JUMP_P (trial) && JUMP_LABEL (trial))
2781 LABEL_NUSES (JUMP_LABEL (trial))--;
2782 }
2783 else
2784 new_thread = next_active_insn (trial);
2785
2786 temp = own_thread ? trial : copy_rtx (trial);
2787 if (thread_if_true)
2788 INSN_FROM_TARGET_P (temp) = 1;
2789
2790 delay_list = add_to_delay_list (temp, delay_list);
2791
2792 if (slots_to_fill == ++(*pslots_filled))
2793 {
2794 /* Even though we have filled all the slots, we
2795 may be branching to a location that has a
2796 redundant insn. Skip any if so. */
2797 while (new_thread && ! own_thread
2798 && ! insn_sets_resource_p (new_thread, &set, true)
2799 && ! insn_sets_resource_p (new_thread, &needed,
2800 true)
2801 && ! insn_references_resource_p (new_thread,
2802 &set, true)
2803 && (prior_insn
2804 = redundant_insn (new_thread, insn,
2805 delay_list)))
2806 {
2807 /* We know we do not own the thread, so no need
2808 to call update_block and delete_insn. */
2809 fix_reg_dead_note (prior_insn, insn);
2810 update_reg_unused_notes (prior_insn, new_thread);
2811 new_thread = next_active_insn (new_thread);
2812 }
2813 break;
2814 }
2815
2816 continue;
2817 }
2818 }
2819 }
2820
2821 /* This insn can't go into a delay slot. */
2822 lose = 1;
2823 mark_set_resources (trial, &set, 0, MARK_SRC_DEST_CALL);
2824 mark_referenced_resources (trial, &needed, true);
2825
2826 /* Ensure we don't put insns between the setting of cc and the comparison
2827 by moving a setting of cc into an earlier delay slot since these insns
2828 could clobber the condition code. */
2829 set.cc = 1;
2830
2831 /* If this insn is a register-register copy and the next insn has
2832 a use of our destination, change it to use our source. That way,
2833 it will become a candidate for our delay slot the next time
2834 through this loop. This case occurs commonly in loops that
2835 scan a list.
2836
2837 We could check for more complex cases than those tested below,
2838 but it doesn't seem worth it. It might also be a good idea to try
2839 to swap the two insns. That might do better.
2840
2841 We can't do this if the next insn modifies our destination, because
2842 that would make the replacement into the insn invalid. We also can't
2843 do this if it modifies our source, because it might be an earlyclobber
2844 operand. This latter test also prevents updating the contents of
2845 a PRE_INC. We also can't do this if there's overlap of source and
2846 destination. Overlap may happen for larger-than-register-size modes. */
2847
2848 if (NONJUMP_INSN_P (trial) && GET_CODE (pat) == SET
2849 && REG_P (SET_SRC (pat))
2850 && REG_P (SET_DEST (pat))
2851 && !reg_overlap_mentioned_p (SET_DEST (pat), SET_SRC (pat)))
2852 {
2853 rtx next = next_nonnote_insn (trial);
2854
2855 if (next && NONJUMP_INSN_P (next)
2856 && GET_CODE (PATTERN (next)) != USE
2857 && ! reg_set_p (SET_DEST (pat), next)
2858 && ! reg_set_p (SET_SRC (pat), next)
2859 && reg_referenced_p (SET_DEST (pat), PATTERN (next))
2860 && ! modified_in_p (SET_DEST (pat), next))
2861 validate_replace_rtx (SET_DEST (pat), SET_SRC (pat), next);
2862 }
2863 }
2864
2865 /* If we stopped on a branch insn that has delay slots, see if we can
2866 steal some of the insns in those slots. */
2867 if (trial && NONJUMP_INSN_P (trial)
2868 && GET_CODE (PATTERN (trial)) == SEQUENCE
2869 && JUMP_P (XVECEXP (PATTERN (trial), 0, 0)))
2870 {
2871 /* If this is the `true' thread, we will want to follow the jump,
2872 so we can only do this if we have taken everything up to here. */
2873 if (thread_if_true && trial == new_thread)
2874 {
2875 delay_list
2876 = steal_delay_list_from_target (insn, condition, PATTERN (trial),
2877 delay_list, &set, &needed,
2878 &opposite_needed, slots_to_fill,
2879 pslots_filled, &must_annul,
2880 &new_thread);
2881 /* If we owned the thread and are told that it branched
2882 elsewhere, make sure we own the thread at the new location. */
2883 if (own_thread && trial != new_thread)
2884 own_thread = own_thread_p (new_thread, new_thread, 0);
2885 }
2886 else if (! thread_if_true)
2887 delay_list
2888 = steal_delay_list_from_fallthrough (insn, condition,
2889 PATTERN (trial),
2890 delay_list, &set, &needed,
2891 &opposite_needed, slots_to_fill,
2892 pslots_filled, &must_annul);
2893 }
2894
2895 /* If we haven't found anything for this delay slot and it is very
2896 likely that the branch will be taken, see if the insn at our target
2897 increments or decrements a register with an increment that does not
2898 depend on the destination register. If so, try to place the opposite
2899 arithmetic insn after the jump insn and put the arithmetic insn in the
2900 delay slot. If we can't do this, return. */
2901 if (delay_list == 0 && likely && new_thread
2902 && NONJUMP_INSN_P (new_thread)
2903 && GET_CODE (PATTERN (new_thread)) != ASM_INPUT
2904 && asm_noperands (PATTERN (new_thread)) < 0)
2905 {
2906 rtx pat = PATTERN (new_thread);
2907 rtx dest;
2908 rtx src;
2909
2910 trial = new_thread;
2911 pat = PATTERN (trial);
2912
2913 if (!NONJUMP_INSN_P (trial)
2914 || GET_CODE (pat) != SET
2915 || ! eligible_for_delay (insn, 0, trial, flags)
2916 || can_throw_internal (trial))
2917 return 0;
2918
2919 dest = SET_DEST (pat), src = SET_SRC (pat);
2920 if ((GET_CODE (src) == PLUS || GET_CODE (src) == MINUS)
2921 && rtx_equal_p (XEXP (src, 0), dest)
2922 && (!FLOAT_MODE_P (GET_MODE (src))
2923 || flag_unsafe_math_optimizations)
2924 && ! reg_overlap_mentioned_p (dest, XEXP (src, 1))
2925 && ! side_effects_p (pat))
2926 {
2927 rtx other = XEXP (src, 1);
2928 rtx new_arith;
2929 rtx ninsn;
2930
2931 /* If this is a constant adjustment, use the same code with
2932 the negated constant. Otherwise, reverse the sense of the
2933 arithmetic. */
2934 if (CONST_INT_P (other))
2935 new_arith = gen_rtx_fmt_ee (GET_CODE (src), GET_MODE (src), dest,
2936 negate_rtx (GET_MODE (src), other));
2937 else
2938 new_arith = gen_rtx_fmt_ee (GET_CODE (src) == PLUS ? MINUS : PLUS,
2939 GET_MODE (src), dest, other);
2940
2941 ninsn = emit_insn_after (gen_rtx_SET (VOIDmode, dest, new_arith),
2942 insn);
2943
2944 if (recog_memoized (ninsn) < 0
2945 || (extract_insn (ninsn), ! constrain_operands (1)))
2946 {
2947 delete_related_insns (ninsn);
2948 return 0;
2949 }
2950
2951 if (own_thread)
2952 {
2953 update_block (trial, thread);
2954 if (trial == thread)
2955 {
2956 thread = next_active_insn (thread);
2957 if (new_thread == trial)
2958 new_thread = thread;
2959 }
2960 delete_related_insns (trial);
2961 }
2962 else
2963 new_thread = next_active_insn (trial);
2964
2965 ninsn = own_thread ? trial : copy_rtx (trial);
2966 if (thread_if_true)
2967 INSN_FROM_TARGET_P (ninsn) = 1;
2968
2969 delay_list = add_to_delay_list (ninsn, NULL_RTX);
2970 (*pslots_filled)++;
2971 }
2972 }
2973
2974 if (delay_list && must_annul)
2975 INSN_ANNULLED_BRANCH_P (insn) = 1;
2976
2977 /* If we are to branch into the middle of this thread, find an appropriate
2978 label or make a new one if none, and redirect INSN to it. If we hit the
2979 end of the function, use the end-of-function label. */
2980 if (new_thread != thread)
2981 {
2982 rtx label;
2983
2984 gcc_assert (thread_if_true);
2985
2986 if (new_thread && JUMP_P (new_thread)
2987 && (simplejump_p (new_thread)
2988 || GET_CODE (PATTERN (new_thread)) == RETURN)
2989 && redirect_with_delay_list_safe_p (insn,
2990 JUMP_LABEL (new_thread),
2991 delay_list))
2992 new_thread = follow_jumps (JUMP_LABEL (new_thread));
2993
2994 if (new_thread == 0)
2995 label = find_end_label ();
2996 else if (LABEL_P (new_thread))
2997 label = new_thread;
2998 else
2999 label = get_label_before (new_thread);
3000
3001 if (label)
3002 reorg_redirect_jump (insn, label);
3003 }
3004
3005 return delay_list;
3006 }
3007 \f
3008 /* Make another attempt to find insns to place in delay slots.
3009
3010 We previously looked for insns located in front of the delay insn
3011 and, for non-jump delay insns, located behind the delay insn.
3012
3013 Here only try to schedule jump insns and try to move insns from either
3014 the target or the following insns into the delay slot. If annulling is
3015 supported, we will be likely to do this. Otherwise, we can do this only
3016 if safe. */
3017
3018 static void
3019 fill_eager_delay_slots (void)
3020 {
3021 rtx insn;
3022 int i;
3023 int num_unfilled_slots = unfilled_slots_next - unfilled_slots_base;
3024
3025 for (i = 0; i < num_unfilled_slots; i++)
3026 {
3027 rtx condition;
3028 rtx target_label, insn_at_target, fallthrough_insn;
3029 rtx delay_list = 0;
3030 int own_target;
3031 int own_fallthrough;
3032 int prediction, slots_to_fill, slots_filled;
3033
3034 insn = unfilled_slots_base[i];
3035 if (insn == 0
3036 || INSN_DELETED_P (insn)
3037 || !JUMP_P (insn)
3038 || ! (condjump_p (insn) || condjump_in_parallel_p (insn)))
3039 continue;
3040
3041 slots_to_fill = num_delay_slots (insn);
3042 /* Some machine description have defined instructions to have
3043 delay slots only in certain circumstances which may depend on
3044 nearby insns (which change due to reorg's actions).
3045
3046 For example, the PA port normally has delay slots for unconditional
3047 jumps.
3048
3049 However, the PA port claims such jumps do not have a delay slot
3050 if they are immediate successors of certain CALL_INSNs. This
3051 allows the port to favor filling the delay slot of the call with
3052 the unconditional jump. */
3053 if (slots_to_fill == 0)
3054 continue;
3055
3056 slots_filled = 0;
3057 target_label = JUMP_LABEL (insn);
3058 condition = get_branch_condition (insn, target_label);
3059
3060 if (condition == 0)
3061 continue;
3062
3063 /* Get the next active fallthrough and target insns and see if we own
3064 them. Then see whether the branch is likely true. We don't need
3065 to do a lot of this for unconditional branches. */
3066
3067 insn_at_target = next_active_insn (target_label);
3068 own_target = own_thread_p (target_label, target_label, 0);
3069
3070 if (condition == const_true_rtx)
3071 {
3072 own_fallthrough = 0;
3073 fallthrough_insn = 0;
3074 prediction = 2;
3075 }
3076 else
3077 {
3078 fallthrough_insn = next_active_insn (insn);
3079 own_fallthrough = own_thread_p (NEXT_INSN (insn), NULL_RTX, 1);
3080 prediction = mostly_true_jump (insn, condition);
3081 }
3082
3083 /* If this insn is expected to branch, first try to get insns from our
3084 target, then our fallthrough insns. If it is not expected to branch,
3085 try the other order. */
3086
3087 if (prediction > 0)
3088 {
3089 delay_list
3090 = fill_slots_from_thread (insn, condition, insn_at_target,
3091 fallthrough_insn, prediction == 2, 1,
3092 own_target,
3093 slots_to_fill, &slots_filled, delay_list);
3094
3095 if (delay_list == 0 && own_fallthrough)
3096 {
3097 /* Even though we didn't find anything for delay slots,
3098 we might have found a redundant insn which we deleted
3099 from the thread that was filled. So we have to recompute
3100 the next insn at the target. */
3101 target_label = JUMP_LABEL (insn);
3102 insn_at_target = next_active_insn (target_label);
3103
3104 delay_list
3105 = fill_slots_from_thread (insn, condition, fallthrough_insn,
3106 insn_at_target, 0, 0,
3107 own_fallthrough,
3108 slots_to_fill, &slots_filled,
3109 delay_list);
3110 }
3111 }
3112 else
3113 {
3114 if (own_fallthrough)
3115 delay_list
3116 = fill_slots_from_thread (insn, condition, fallthrough_insn,
3117 insn_at_target, 0, 0,
3118 own_fallthrough,
3119 slots_to_fill, &slots_filled,
3120 delay_list);
3121
3122 if (delay_list == 0)
3123 delay_list
3124 = fill_slots_from_thread (insn, condition, insn_at_target,
3125 next_active_insn (insn), 0, 1,
3126 own_target,
3127 slots_to_fill, &slots_filled,
3128 delay_list);
3129 }
3130
3131 if (delay_list)
3132 unfilled_slots_base[i]
3133 = emit_delay_sequence (insn, delay_list, slots_filled);
3134
3135 if (slots_to_fill == slots_filled)
3136 unfilled_slots_base[i] = 0;
3137
3138 note_delay_statistics (slots_filled, 1);
3139 }
3140 }
3141 \f
3142 static void delete_computation (rtx insn);
3143
3144 /* Recursively delete prior insns that compute the value (used only by INSN
3145 which the caller is deleting) stored in the register mentioned by NOTE
3146 which is a REG_DEAD note associated with INSN. */
3147
3148 static void
3149 delete_prior_computation (rtx note, rtx insn)
3150 {
3151 rtx our_prev;
3152 rtx reg = XEXP (note, 0);
3153
3154 for (our_prev = prev_nonnote_insn (insn);
3155 our_prev && (NONJUMP_INSN_P (our_prev)
3156 || CALL_P (our_prev));
3157 our_prev = prev_nonnote_insn (our_prev))
3158 {
3159 rtx pat = PATTERN (our_prev);
3160
3161 /* If we reach a CALL which is not calling a const function
3162 or the callee pops the arguments, then give up. */
3163 if (CALL_P (our_prev)
3164 && (! RTL_CONST_CALL_P (our_prev)
3165 || GET_CODE (pat) != SET || GET_CODE (SET_SRC (pat)) != CALL))
3166 break;
3167
3168 /* If we reach a SEQUENCE, it is too complex to try to
3169 do anything with it, so give up. We can be run during
3170 and after reorg, so SEQUENCE rtl can legitimately show
3171 up here. */
3172 if (GET_CODE (pat) == SEQUENCE)
3173 break;
3174
3175 if (GET_CODE (pat) == USE
3176 && NONJUMP_INSN_P (XEXP (pat, 0)))
3177 /* reorg creates USEs that look like this. We leave them
3178 alone because reorg needs them for its own purposes. */
3179 break;
3180
3181 if (reg_set_p (reg, pat))
3182 {
3183 if (side_effects_p (pat) && !CALL_P (our_prev))
3184 break;
3185
3186 if (GET_CODE (pat) == PARALLEL)
3187 {
3188 /* If we find a SET of something else, we can't
3189 delete the insn. */
3190
3191 int i;
3192
3193 for (i = 0; i < XVECLEN (pat, 0); i++)
3194 {
3195 rtx part = XVECEXP (pat, 0, i);
3196
3197 if (GET_CODE (part) == SET
3198 && SET_DEST (part) != reg)
3199 break;
3200 }
3201
3202 if (i == XVECLEN (pat, 0))
3203 delete_computation (our_prev);
3204 }
3205 else if (GET_CODE (pat) == SET
3206 && REG_P (SET_DEST (pat)))
3207 {
3208 int dest_regno = REGNO (SET_DEST (pat));
3209 int dest_endregno = END_REGNO (SET_DEST (pat));
3210 int regno = REGNO (reg);
3211 int endregno = END_REGNO (reg);
3212
3213 if (dest_regno >= regno
3214 && dest_endregno <= endregno)
3215 delete_computation (our_prev);
3216
3217 /* We may have a multi-word hard register and some, but not
3218 all, of the words of the register are needed in subsequent
3219 insns. Write REG_UNUSED notes for those parts that were not
3220 needed. */
3221 else if (dest_regno <= regno
3222 && dest_endregno >= endregno)
3223 {
3224 int i;
3225
3226 add_reg_note (our_prev, REG_UNUSED, reg);
3227
3228 for (i = dest_regno; i < dest_endregno; i++)
3229 if (! find_regno_note (our_prev, REG_UNUSED, i))
3230 break;
3231
3232 if (i == dest_endregno)
3233 delete_computation (our_prev);
3234 }
3235 }
3236
3237 break;
3238 }
3239
3240 /* If PAT references the register that dies here, it is an
3241 additional use. Hence any prior SET isn't dead. However, this
3242 insn becomes the new place for the REG_DEAD note. */
3243 if (reg_overlap_mentioned_p (reg, pat))
3244 {
3245 XEXP (note, 1) = REG_NOTES (our_prev);
3246 REG_NOTES (our_prev) = note;
3247 break;
3248 }
3249 }
3250 }
3251
3252 /* Delete INSN and recursively delete insns that compute values used only
3253 by INSN. This uses the REG_DEAD notes computed during flow analysis.
3254 If we are running before flow.c, we need do nothing since flow.c will
3255 delete dead code. We also can't know if the registers being used are
3256 dead or not at this point.
3257
3258 Otherwise, look at all our REG_DEAD notes. If a previous insn does
3259 nothing other than set a register that dies in this insn, we can delete
3260 that insn as well.
3261
3262 On machines with CC0, if CC0 is used in this insn, we may be able to
3263 delete the insn that set it. */
3264
3265 static void
3266 delete_computation (rtx insn)
3267 {
3268 rtx note, next;
3269
3270 #ifdef HAVE_cc0
3271 if (reg_referenced_p (cc0_rtx, PATTERN (insn)))
3272 {
3273 rtx prev = prev_nonnote_insn (insn);
3274 /* We assume that at this stage
3275 CC's are always set explicitly
3276 and always immediately before the jump that
3277 will use them. So if the previous insn
3278 exists to set the CC's, delete it
3279 (unless it performs auto-increments, etc.). */
3280 if (prev && NONJUMP_INSN_P (prev)
3281 && sets_cc0_p (PATTERN (prev)))
3282 {
3283 if (sets_cc0_p (PATTERN (prev)) > 0
3284 && ! side_effects_p (PATTERN (prev)))
3285 delete_computation (prev);
3286 else
3287 /* Otherwise, show that cc0 won't be used. */
3288 add_reg_note (prev, REG_UNUSED, cc0_rtx);
3289 }
3290 }
3291 #endif
3292
3293 for (note = REG_NOTES (insn); note; note = next)
3294 {
3295 next = XEXP (note, 1);
3296
3297 if (REG_NOTE_KIND (note) != REG_DEAD
3298 /* Verify that the REG_NOTE is legitimate. */
3299 || !REG_P (XEXP (note, 0)))
3300 continue;
3301
3302 delete_prior_computation (note, insn);
3303 }
3304
3305 delete_related_insns (insn);
3306 }
3307
3308 /* If all INSN does is set the pc, delete it,
3309 and delete the insn that set the condition codes for it
3310 if that's what the previous thing was. */
3311
3312 static void
3313 delete_jump (rtx insn)
3314 {
3315 rtx set = single_set (insn);
3316
3317 if (set && GET_CODE (SET_DEST (set)) == PC)
3318 delete_computation (insn);
3319 }
3320
3321 \f
3322 /* Once we have tried two ways to fill a delay slot, make a pass over the
3323 code to try to improve the results and to do such things as more jump
3324 threading. */
3325
3326 static void
3327 relax_delay_slots (rtx first)
3328 {
3329 rtx insn, next, pat;
3330 rtx trial, delay_insn, target_label;
3331
3332 /* Look at every JUMP_INSN and see if we can improve it. */
3333 for (insn = first; insn; insn = next)
3334 {
3335 rtx other;
3336
3337 next = next_active_insn (insn);
3338
3339 /* If this is a jump insn, see if it now jumps to a jump, jumps to
3340 the next insn, or jumps to a label that is not the last of a
3341 group of consecutive labels. */
3342 if (JUMP_P (insn)
3343 && (condjump_p (insn) || condjump_in_parallel_p (insn))
3344 && (target_label = JUMP_LABEL (insn)) != 0)
3345 {
3346 target_label = skip_consecutive_labels (follow_jumps (target_label));
3347 if (target_label == 0)
3348 target_label = find_end_label ();
3349
3350 if (target_label && next_active_insn (target_label) == next
3351 && ! condjump_in_parallel_p (insn))
3352 {
3353 delete_jump (insn);
3354 continue;
3355 }
3356
3357 if (target_label && target_label != JUMP_LABEL (insn))
3358 reorg_redirect_jump (insn, target_label);
3359
3360 /* See if this jump conditionally branches around an unconditional
3361 jump. If so, invert this jump and point it to the target of the
3362 second jump. */
3363 if (next && JUMP_P (next)
3364 && any_condjump_p (insn)
3365 && (simplejump_p (next) || GET_CODE (PATTERN (next)) == RETURN)
3366 && target_label
3367 && next_active_insn (target_label) == next_active_insn (next)
3368 && no_labels_between_p (insn, next))
3369 {
3370 rtx label = JUMP_LABEL (next);
3371
3372 /* Be careful how we do this to avoid deleting code or
3373 labels that are momentarily dead. See similar optimization
3374 in jump.c.
3375
3376 We also need to ensure we properly handle the case when
3377 invert_jump fails. */
3378
3379 ++LABEL_NUSES (target_label);
3380 if (label)
3381 ++LABEL_NUSES (label);
3382
3383 if (invert_jump (insn, label, 1))
3384 {
3385 delete_related_insns (next);
3386 next = insn;
3387 }
3388
3389 if (label)
3390 --LABEL_NUSES (label);
3391
3392 if (--LABEL_NUSES (target_label) == 0)
3393 delete_related_insns (target_label);
3394
3395 continue;
3396 }
3397 }
3398
3399 /* If this is an unconditional jump and the previous insn is a
3400 conditional jump, try reversing the condition of the previous
3401 insn and swapping our targets. The next pass might be able to
3402 fill the slots.
3403
3404 Don't do this if we expect the conditional branch to be true, because
3405 we would then be making the more common case longer. */
3406
3407 if (JUMP_P (insn)
3408 && (simplejump_p (insn) || GET_CODE (PATTERN (insn)) == RETURN)
3409 && (other = prev_active_insn (insn)) != 0
3410 && any_condjump_p (other)
3411 && no_labels_between_p (other, insn)
3412 && 0 > mostly_true_jump (other,
3413 get_branch_condition (other,
3414 JUMP_LABEL (other))))
3415 {
3416 rtx other_target = JUMP_LABEL (other);
3417 target_label = JUMP_LABEL (insn);
3418
3419 if (invert_jump (other, target_label, 0))
3420 reorg_redirect_jump (insn, other_target);
3421 }
3422
3423 /* Now look only at cases where we have filled a delay slot. */
3424 if (!NONJUMP_INSN_P (insn)
3425 || GET_CODE (PATTERN (insn)) != SEQUENCE)
3426 continue;
3427
3428 pat = PATTERN (insn);
3429 delay_insn = XVECEXP (pat, 0, 0);
3430
3431 /* See if the first insn in the delay slot is redundant with some
3432 previous insn. Remove it from the delay slot if so; then set up
3433 to reprocess this insn. */
3434 if (redundant_insn (XVECEXP (pat, 0, 1), delay_insn, 0))
3435 {
3436 delete_from_delay_slot (XVECEXP (pat, 0, 1));
3437 next = prev_active_insn (next);
3438 continue;
3439 }
3440
3441 /* See if we have a RETURN insn with a filled delay slot followed
3442 by a RETURN insn with an unfilled a delay slot. If so, we can delete
3443 the first RETURN (but not its delay insn). This gives the same
3444 effect in fewer instructions.
3445
3446 Only do so if optimizing for size since this results in slower, but
3447 smaller code. */
3448 if (optimize_function_for_size_p (cfun)
3449 && GET_CODE (PATTERN (delay_insn)) == RETURN
3450 && next
3451 && JUMP_P (next)
3452 && GET_CODE (PATTERN (next)) == RETURN)
3453 {
3454 rtx after;
3455 int i;
3456
3457 /* Delete the RETURN and just execute the delay list insns.
3458
3459 We do this by deleting the INSN containing the SEQUENCE, then
3460 re-emitting the insns separately, and then deleting the RETURN.
3461 This allows the count of the jump target to be properly
3462 decremented. */
3463
3464 /* Clear the from target bit, since these insns are no longer
3465 in delay slots. */
3466 for (i = 0; i < XVECLEN (pat, 0); i++)
3467 INSN_FROM_TARGET_P (XVECEXP (pat, 0, i)) = 0;
3468
3469 trial = PREV_INSN (insn);
3470 delete_related_insns (insn);
3471 gcc_assert (GET_CODE (pat) == SEQUENCE);
3472 after = trial;
3473 for (i = 0; i < XVECLEN (pat, 0); i++)
3474 {
3475 rtx this_insn = XVECEXP (pat, 0, i);
3476 add_insn_after (this_insn, after, NULL);
3477 after = this_insn;
3478 }
3479 delete_scheduled_jump (delay_insn);
3480 continue;
3481 }
3482
3483 /* Now look only at the cases where we have a filled JUMP_INSN. */
3484 if (!JUMP_P (XVECEXP (PATTERN (insn), 0, 0))
3485 || ! (condjump_p (XVECEXP (PATTERN (insn), 0, 0))
3486 || condjump_in_parallel_p (XVECEXP (PATTERN (insn), 0, 0))))
3487 continue;
3488
3489 target_label = JUMP_LABEL (delay_insn);
3490
3491 if (target_label)
3492 {
3493 /* If this jump goes to another unconditional jump, thread it, but
3494 don't convert a jump into a RETURN here. */
3495 trial = skip_consecutive_labels (follow_jumps (target_label));
3496 if (trial == 0)
3497 trial = find_end_label ();
3498
3499 if (trial && trial != target_label
3500 && redirect_with_delay_slots_safe_p (delay_insn, trial, insn))
3501 {
3502 reorg_redirect_jump (delay_insn, trial);
3503 target_label = trial;
3504 }
3505
3506 /* If the first insn at TARGET_LABEL is redundant with a previous
3507 insn, redirect the jump to the following insn process again. */
3508 trial = next_active_insn (target_label);
3509 if (trial && GET_CODE (PATTERN (trial)) != SEQUENCE
3510 && redundant_insn (trial, insn, 0)
3511 && ! can_throw_internal (trial))
3512 {
3513 /* Figure out where to emit the special USE insn so we don't
3514 later incorrectly compute register live/death info. */
3515 rtx tmp = next_active_insn (trial);
3516 if (tmp == 0)
3517 tmp = find_end_label ();
3518
3519 if (tmp)
3520 {
3521 /* Insert the special USE insn and update dataflow info. */
3522 update_block (trial, tmp);
3523
3524 /* Now emit a label before the special USE insn, and
3525 redirect our jump to the new label. */
3526 target_label = get_label_before (PREV_INSN (tmp));
3527 reorg_redirect_jump (delay_insn, target_label);
3528 next = insn;
3529 continue;
3530 }
3531 }
3532
3533 /* Similarly, if it is an unconditional jump with one insn in its
3534 delay list and that insn is redundant, thread the jump. */
3535 if (trial && GET_CODE (PATTERN (trial)) == SEQUENCE
3536 && XVECLEN (PATTERN (trial), 0) == 2
3537 && JUMP_P (XVECEXP (PATTERN (trial), 0, 0))
3538 && (simplejump_p (XVECEXP (PATTERN (trial), 0, 0))
3539 || GET_CODE (PATTERN (XVECEXP (PATTERN (trial), 0, 0))) == RETURN)
3540 && redundant_insn (XVECEXP (PATTERN (trial), 0, 1), insn, 0))
3541 {
3542 target_label = JUMP_LABEL (XVECEXP (PATTERN (trial), 0, 0));
3543 if (target_label == 0)
3544 target_label = find_end_label ();
3545
3546 if (target_label
3547 && redirect_with_delay_slots_safe_p (delay_insn, target_label,
3548 insn))
3549 {
3550 reorg_redirect_jump (delay_insn, target_label);
3551 next = insn;
3552 continue;
3553 }
3554 }
3555 }
3556
3557 if (! INSN_ANNULLED_BRANCH_P (delay_insn)
3558 && prev_active_insn (target_label) == insn
3559 && ! condjump_in_parallel_p (delay_insn)
3560 #ifdef HAVE_cc0
3561 /* If the last insn in the delay slot sets CC0 for some insn,
3562 various code assumes that it is in a delay slot. We could
3563 put it back where it belonged and delete the register notes,
3564 but it doesn't seem worthwhile in this uncommon case. */
3565 && ! find_reg_note (XVECEXP (pat, 0, XVECLEN (pat, 0) - 1),
3566 REG_CC_USER, NULL_RTX)
3567 #endif
3568 )
3569 {
3570 rtx after;
3571 int i;
3572
3573 /* All this insn does is execute its delay list and jump to the
3574 following insn. So delete the jump and just execute the delay
3575 list insns.
3576
3577 We do this by deleting the INSN containing the SEQUENCE, then
3578 re-emitting the insns separately, and then deleting the jump.
3579 This allows the count of the jump target to be properly
3580 decremented. */
3581
3582 /* Clear the from target bit, since these insns are no longer
3583 in delay slots. */
3584 for (i = 0; i < XVECLEN (pat, 0); i++)
3585 INSN_FROM_TARGET_P (XVECEXP (pat, 0, i)) = 0;
3586
3587 trial = PREV_INSN (insn);
3588 delete_related_insns (insn);
3589 gcc_assert (GET_CODE (pat) == SEQUENCE);
3590 after = trial;
3591 for (i = 0; i < XVECLEN (pat, 0); i++)
3592 {
3593 rtx this_insn = XVECEXP (pat, 0, i);
3594 add_insn_after (this_insn, after, NULL);
3595 after = this_insn;
3596 }
3597 delete_scheduled_jump (delay_insn);
3598 continue;
3599 }
3600
3601 /* See if this is an unconditional jump around a single insn which is
3602 identical to the one in its delay slot. In this case, we can just
3603 delete the branch and the insn in its delay slot. */
3604 if (next && NONJUMP_INSN_P (next)
3605 && prev_label (next_active_insn (next)) == target_label
3606 && simplejump_p (insn)
3607 && XVECLEN (pat, 0) == 2
3608 && rtx_equal_p (PATTERN (next), PATTERN (XVECEXP (pat, 0, 1))))
3609 {
3610 delete_related_insns (insn);
3611 continue;
3612 }
3613
3614 /* See if this jump (with its delay slots) conditionally branches
3615 around an unconditional jump (without delay slots). If so, invert
3616 this jump and point it to the target of the second jump. We cannot
3617 do this for annulled jumps, though. Again, don't convert a jump to
3618 a RETURN here. */
3619 if (! INSN_ANNULLED_BRANCH_P (delay_insn)
3620 && any_condjump_p (delay_insn)
3621 && next && JUMP_P (next)
3622 && (simplejump_p (next) || GET_CODE (PATTERN (next)) == RETURN)
3623 && next_active_insn (target_label) == next_active_insn (next)
3624 && no_labels_between_p (insn, next))
3625 {
3626 rtx label = JUMP_LABEL (next);
3627 rtx old_label = JUMP_LABEL (delay_insn);
3628
3629 if (label == 0)
3630 label = find_end_label ();
3631
3632 /* find_end_label can generate a new label. Check this first. */
3633 if (label
3634 && no_labels_between_p (insn, next)
3635 && redirect_with_delay_slots_safe_p (delay_insn, label, insn))
3636 {
3637 /* Be careful how we do this to avoid deleting code or labels
3638 that are momentarily dead. See similar optimization in
3639 jump.c */
3640 if (old_label)
3641 ++LABEL_NUSES (old_label);
3642
3643 if (invert_jump (delay_insn, label, 1))
3644 {
3645 int i;
3646
3647 /* Must update the INSN_FROM_TARGET_P bits now that
3648 the branch is reversed, so that mark_target_live_regs
3649 will handle the delay slot insn correctly. */
3650 for (i = 1; i < XVECLEN (PATTERN (insn), 0); i++)
3651 {
3652 rtx slot = XVECEXP (PATTERN (insn), 0, i);
3653 INSN_FROM_TARGET_P (slot) = ! INSN_FROM_TARGET_P (slot);
3654 }
3655
3656 delete_related_insns (next);
3657 next = insn;
3658 }
3659
3660 if (old_label && --LABEL_NUSES (old_label) == 0)
3661 delete_related_insns (old_label);
3662 continue;
3663 }
3664 }
3665
3666 /* If we own the thread opposite the way this insn branches, see if we
3667 can merge its delay slots with following insns. */
3668 if (INSN_FROM_TARGET_P (XVECEXP (pat, 0, 1))
3669 && own_thread_p (NEXT_INSN (insn), 0, 1))
3670 try_merge_delay_insns (insn, next);
3671 else if (! INSN_FROM_TARGET_P (XVECEXP (pat, 0, 1))
3672 && own_thread_p (target_label, target_label, 0))
3673 try_merge_delay_insns (insn, next_active_insn (target_label));
3674
3675 /* If we get here, we haven't deleted INSN. But we may have deleted
3676 NEXT, so recompute it. */
3677 next = next_active_insn (insn);
3678 }
3679 }
3680 \f
3681 #ifdef HAVE_return
3682
3683 /* Look for filled jumps to the end of function label. We can try to convert
3684 them into RETURN insns if the insns in the delay slot are valid for the
3685 RETURN as well. */
3686
3687 static void
3688 make_return_insns (rtx first)
3689 {
3690 rtx insn, jump_insn, pat;
3691 rtx real_return_label = end_of_function_label;
3692 int slots, i;
3693
3694 #ifdef DELAY_SLOTS_FOR_EPILOGUE
3695 /* If a previous pass filled delay slots in the epilogue, things get a
3696 bit more complicated, as those filler insns would generally (without
3697 data flow analysis) have to be executed after any existing branch
3698 delay slot filler insns. It is also unknown whether such a
3699 transformation would actually be profitable. Note that the existing
3700 code only cares for branches with (some) filled delay slots. */
3701 if (crtl->epilogue_delay_list != NULL)
3702 return;
3703 #endif
3704
3705 /* See if there is a RETURN insn in the function other than the one we
3706 made for END_OF_FUNCTION_LABEL. If so, set up anything we can't change
3707 into a RETURN to jump to it. */
3708 for (insn = first; insn; insn = NEXT_INSN (insn))
3709 if (JUMP_P (insn) && GET_CODE (PATTERN (insn)) == RETURN)
3710 {
3711 real_return_label = get_label_before (insn);
3712 break;
3713 }
3714
3715 /* Show an extra usage of REAL_RETURN_LABEL so it won't go away if it
3716 was equal to END_OF_FUNCTION_LABEL. */
3717 LABEL_NUSES (real_return_label)++;
3718
3719 /* Clear the list of insns to fill so we can use it. */
3720 obstack_free (&unfilled_slots_obstack, unfilled_firstobj);
3721
3722 for (insn = first; insn; insn = NEXT_INSN (insn))
3723 {
3724 int flags;
3725
3726 /* Only look at filled JUMP_INSNs that go to the end of function
3727 label. */
3728 if (!NONJUMP_INSN_P (insn)
3729 || GET_CODE (PATTERN (insn)) != SEQUENCE
3730 || !JUMP_P (XVECEXP (PATTERN (insn), 0, 0))
3731 || JUMP_LABEL (XVECEXP (PATTERN (insn), 0, 0)) != end_of_function_label)
3732 continue;
3733
3734 pat = PATTERN (insn);
3735 jump_insn = XVECEXP (pat, 0, 0);
3736
3737 /* If we can't make the jump into a RETURN, try to redirect it to the best
3738 RETURN and go on to the next insn. */
3739 if (! reorg_redirect_jump (jump_insn, NULL_RTX))
3740 {
3741 /* Make sure redirecting the jump will not invalidate the delay
3742 slot insns. */
3743 if (redirect_with_delay_slots_safe_p (jump_insn,
3744 real_return_label,
3745 insn))
3746 reorg_redirect_jump (jump_insn, real_return_label);
3747 continue;
3748 }
3749
3750 /* See if this RETURN can accept the insns current in its delay slot.
3751 It can if it has more or an equal number of slots and the contents
3752 of each is valid. */
3753
3754 flags = get_jump_flags (jump_insn, JUMP_LABEL (jump_insn));
3755 slots = num_delay_slots (jump_insn);
3756 if (slots >= XVECLEN (pat, 0) - 1)
3757 {
3758 for (i = 1; i < XVECLEN (pat, 0); i++)
3759 if (! (
3760 #ifdef ANNUL_IFFALSE_SLOTS
3761 (INSN_ANNULLED_BRANCH_P (jump_insn)
3762 && INSN_FROM_TARGET_P (XVECEXP (pat, 0, i)))
3763 ? eligible_for_annul_false (jump_insn, i - 1,
3764 XVECEXP (pat, 0, i), flags) :
3765 #endif
3766 #ifdef ANNUL_IFTRUE_SLOTS
3767 (INSN_ANNULLED_BRANCH_P (jump_insn)
3768 && ! INSN_FROM_TARGET_P (XVECEXP (pat, 0, i)))
3769 ? eligible_for_annul_true (jump_insn, i - 1,
3770 XVECEXP (pat, 0, i), flags) :
3771 #endif
3772 eligible_for_delay (jump_insn, i - 1,
3773 XVECEXP (pat, 0, i), flags)))
3774 break;
3775 }
3776 else
3777 i = 0;
3778
3779 if (i == XVECLEN (pat, 0))
3780 continue;
3781
3782 /* We have to do something with this insn. If it is an unconditional
3783 RETURN, delete the SEQUENCE and output the individual insns,
3784 followed by the RETURN. Then set things up so we try to find
3785 insns for its delay slots, if it needs some. */
3786 if (GET_CODE (PATTERN (jump_insn)) == RETURN)
3787 {
3788 rtx prev = PREV_INSN (insn);
3789
3790 delete_related_insns (insn);
3791 for (i = 1; i < XVECLEN (pat, 0); i++)
3792 prev = emit_insn_after (PATTERN (XVECEXP (pat, 0, i)), prev);
3793
3794 insn = emit_jump_insn_after (PATTERN (jump_insn), prev);
3795 emit_barrier_after (insn);
3796
3797 if (slots)
3798 obstack_ptr_grow (&unfilled_slots_obstack, insn);
3799 }
3800 else
3801 /* It is probably more efficient to keep this with its current
3802 delay slot as a branch to a RETURN. */
3803 reorg_redirect_jump (jump_insn, real_return_label);
3804 }
3805
3806 /* Now delete REAL_RETURN_LABEL if we never used it. Then try to fill any
3807 new delay slots we have created. */
3808 if (--LABEL_NUSES (real_return_label) == 0)
3809 delete_related_insns (real_return_label);
3810
3811 fill_simple_delay_slots (1);
3812 fill_simple_delay_slots (0);
3813 }
3814 #endif
3815 \f
3816 /* Try to find insns to place in delay slots. */
3817
3818 void
3819 dbr_schedule (rtx first)
3820 {
3821 rtx insn, next, epilogue_insn = 0;
3822 int i;
3823
3824 /* If the current function has no insns other than the prologue and
3825 epilogue, then do not try to fill any delay slots. */
3826 if (n_basic_blocks == NUM_FIXED_BLOCKS)
3827 return;
3828
3829 /* Find the highest INSN_UID and allocate and initialize our map from
3830 INSN_UID's to position in code. */
3831 for (max_uid = 0, insn = first; insn; insn = NEXT_INSN (insn))
3832 {
3833 if (INSN_UID (insn) > max_uid)
3834 max_uid = INSN_UID (insn);
3835 if (NOTE_P (insn)
3836 && NOTE_KIND (insn) == NOTE_INSN_EPILOGUE_BEG)
3837 epilogue_insn = insn;
3838 }
3839
3840 uid_to_ruid = XNEWVEC (int, max_uid + 1);
3841 for (i = 0, insn = first; insn; i++, insn = NEXT_INSN (insn))
3842 uid_to_ruid[INSN_UID (insn)] = i;
3843
3844 /* Initialize the list of insns that need filling. */
3845 if (unfilled_firstobj == 0)
3846 {
3847 gcc_obstack_init (&unfilled_slots_obstack);
3848 unfilled_firstobj = XOBNEWVAR (&unfilled_slots_obstack, rtx, 0);
3849 }
3850
3851 for (insn = next_active_insn (first); insn; insn = next_active_insn (insn))
3852 {
3853 rtx target;
3854
3855 INSN_ANNULLED_BRANCH_P (insn) = 0;
3856 INSN_FROM_TARGET_P (insn) = 0;
3857
3858 /* Skip vector tables. We can't get attributes for them. */
3859 if (JUMP_TABLE_DATA_P (insn))
3860 continue;
3861
3862 if (num_delay_slots (insn) > 0)
3863 obstack_ptr_grow (&unfilled_slots_obstack, insn);
3864
3865 /* Ensure all jumps go to the last of a set of consecutive labels. */
3866 if (JUMP_P (insn)
3867 && (condjump_p (insn) || condjump_in_parallel_p (insn))
3868 && JUMP_LABEL (insn) != 0
3869 && ((target = skip_consecutive_labels (JUMP_LABEL (insn)))
3870 != JUMP_LABEL (insn)))
3871 redirect_jump (insn, target, 1);
3872 }
3873
3874 init_resource_info (epilogue_insn);
3875
3876 /* Show we haven't computed an end-of-function label yet. */
3877 end_of_function_label = 0;
3878
3879 /* Initialize the statistics for this function. */
3880 memset (num_insns_needing_delays, 0, sizeof num_insns_needing_delays);
3881 memset (num_filled_delays, 0, sizeof num_filled_delays);
3882
3883 /* Now do the delay slot filling. Try everything twice in case earlier
3884 changes make more slots fillable. */
3885
3886 for (reorg_pass_number = 0;
3887 reorg_pass_number < MAX_REORG_PASSES;
3888 reorg_pass_number++)
3889 {
3890 fill_simple_delay_slots (1);
3891 fill_simple_delay_slots (0);
3892 fill_eager_delay_slots ();
3893 relax_delay_slots (first);
3894 }
3895
3896 /* If we made an end of function label, indicate that it is now
3897 safe to delete it by undoing our prior adjustment to LABEL_NUSES.
3898 If it is now unused, delete it. */
3899 if (end_of_function_label && --LABEL_NUSES (end_of_function_label) == 0)
3900 delete_related_insns (end_of_function_label);
3901
3902 #ifdef HAVE_return
3903 if (HAVE_return && end_of_function_label != 0)
3904 make_return_insns (first);
3905 #endif
3906
3907 /* Delete any USE insns made by update_block; subsequent passes don't need
3908 them or know how to deal with them. */
3909 for (insn = first; insn; insn = next)
3910 {
3911 next = NEXT_INSN (insn);
3912
3913 if (NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == USE
3914 && INSN_P (XEXP (PATTERN (insn), 0)))
3915 next = delete_related_insns (insn);
3916 }
3917
3918 obstack_free (&unfilled_slots_obstack, unfilled_firstobj);
3919
3920 /* It is not clear why the line below is needed, but it does seem to be. */
3921 unfilled_firstobj = XOBNEWVAR (&unfilled_slots_obstack, rtx, 0);
3922
3923 if (dump_file)
3924 {
3925 int i, j, need_comma;
3926 int total_delay_slots[MAX_DELAY_HISTOGRAM + 1];
3927 int total_annul_slots[MAX_DELAY_HISTOGRAM + 1];
3928
3929 for (reorg_pass_number = 0;
3930 reorg_pass_number < MAX_REORG_PASSES;
3931 reorg_pass_number++)
3932 {
3933 fprintf (dump_file, ";; Reorg pass #%d:\n", reorg_pass_number + 1);
3934 for (i = 0; i < NUM_REORG_FUNCTIONS; i++)
3935 {
3936 need_comma = 0;
3937 fprintf (dump_file, ";; Reorg function #%d\n", i);
3938
3939 fprintf (dump_file, ";; %d insns needing delay slots\n;; ",
3940 num_insns_needing_delays[i][reorg_pass_number]);
3941
3942 for (j = 0; j < MAX_DELAY_HISTOGRAM + 1; j++)
3943 if (num_filled_delays[i][j][reorg_pass_number])
3944 {
3945 if (need_comma)
3946 fprintf (dump_file, ", ");
3947 need_comma = 1;
3948 fprintf (dump_file, "%d got %d delays",
3949 num_filled_delays[i][j][reorg_pass_number], j);
3950 }
3951 fprintf (dump_file, "\n");
3952 }
3953 }
3954 memset (total_delay_slots, 0, sizeof total_delay_slots);
3955 memset (total_annul_slots, 0, sizeof total_annul_slots);
3956 for (insn = first; insn; insn = NEXT_INSN (insn))
3957 {
3958 if (! INSN_DELETED_P (insn)
3959 && NONJUMP_INSN_P (insn)
3960 && GET_CODE (PATTERN (insn)) != USE
3961 && GET_CODE (PATTERN (insn)) != CLOBBER)
3962 {
3963 if (GET_CODE (PATTERN (insn)) == SEQUENCE)
3964 {
3965 j = XVECLEN (PATTERN (insn), 0) - 1;
3966 if (j > MAX_DELAY_HISTOGRAM)
3967 j = MAX_DELAY_HISTOGRAM;
3968 if (INSN_ANNULLED_BRANCH_P (XVECEXP (PATTERN (insn), 0, 0)))
3969 total_annul_slots[j]++;
3970 else
3971 total_delay_slots[j]++;
3972 }
3973 else if (num_delay_slots (insn) > 0)
3974 total_delay_slots[0]++;
3975 }
3976 }
3977 fprintf (dump_file, ";; Reorg totals: ");
3978 need_comma = 0;
3979 for (j = 0; j < MAX_DELAY_HISTOGRAM + 1; j++)
3980 {
3981 if (total_delay_slots[j])
3982 {
3983 if (need_comma)
3984 fprintf (dump_file, ", ");
3985 need_comma = 1;
3986 fprintf (dump_file, "%d got %d delays", total_delay_slots[j], j);
3987 }
3988 }
3989 fprintf (dump_file, "\n");
3990 #if defined (ANNUL_IFTRUE_SLOTS) || defined (ANNUL_IFFALSE_SLOTS)
3991 fprintf (dump_file, ";; Reorg annuls: ");
3992 need_comma = 0;
3993 for (j = 0; j < MAX_DELAY_HISTOGRAM + 1; j++)
3994 {
3995 if (total_annul_slots[j])
3996 {
3997 if (need_comma)
3998 fprintf (dump_file, ", ");
3999 need_comma = 1;
4000 fprintf (dump_file, "%d got %d delays", total_annul_slots[j], j);
4001 }
4002 }
4003 fprintf (dump_file, "\n");
4004 #endif
4005 fprintf (dump_file, "\n");
4006 }
4007
4008 /* For all JUMP insns, fill in branch prediction notes, so that during
4009 assembler output a target can set branch prediction bits in the code.
4010 We have to do this now, as up until this point the destinations of
4011 JUMPS can be moved around and changed, but past right here that cannot
4012 happen. */
4013 for (insn = first; insn; insn = NEXT_INSN (insn))
4014 {
4015 int pred_flags;
4016
4017 if (NONJUMP_INSN_P (insn))
4018 {
4019 rtx pat = PATTERN (insn);
4020
4021 if (GET_CODE (pat) == SEQUENCE)
4022 insn = XVECEXP (pat, 0, 0);
4023 }
4024 if (!JUMP_P (insn))
4025 continue;
4026
4027 pred_flags = get_jump_flags (insn, JUMP_LABEL (insn));
4028 add_reg_note (insn, REG_BR_PRED, GEN_INT (pred_flags));
4029 }
4030 free_resource_info ();
4031 free (uid_to_ruid);
4032 #ifdef DELAY_SLOTS_FOR_EPILOGUE
4033 /* SPARC assembler, for instance, emit warning when debug info is output
4034 into the delay slot. */
4035 {
4036 rtx link;
4037
4038 for (link = crtl->epilogue_delay_list;
4039 link;
4040 link = XEXP (link, 1))
4041 INSN_LOCATOR (XEXP (link, 0)) = 0;
4042 }
4043
4044 #endif
4045 crtl->dbr_scheduled_p = true;
4046 }
4047 #endif /* DELAY_SLOTS */
4048 \f
4049 static bool
4050 gate_handle_delay_slots (void)
4051 {
4052 #ifdef DELAY_SLOTS
4053 /* At -O0 dataflow info isn't updated after RA. */
4054 return optimize > 0 && flag_delayed_branch && !crtl->dbr_scheduled_p;
4055 #else
4056 return 0;
4057 #endif
4058 }
4059
4060 /* Run delay slot optimization. */
4061 static unsigned int
4062 rest_of_handle_delay_slots (void)
4063 {
4064 #ifdef DELAY_SLOTS
4065 dbr_schedule (get_insns ());
4066 #endif
4067 return 0;
4068 }
4069
4070 struct rtl_opt_pass pass_delay_slots =
4071 {
4072 {
4073 RTL_PASS,
4074 "dbr", /* name */
4075 gate_handle_delay_slots, /* gate */
4076 rest_of_handle_delay_slots, /* execute */
4077 NULL, /* sub */
4078 NULL, /* next */
4079 0, /* static_pass_number */
4080 TV_DBR_SCHED, /* tv_id */
4081 0, /* properties_required */
4082 0, /* properties_provided */
4083 0, /* properties_destroyed */
4084 0, /* todo_flags_start */
4085 TODO_dump_func |
4086 TODO_ggc_collect /* todo_flags_finish */
4087 }
4088 };
4089
4090 /* Machine dependent reorg pass. */
4091 static bool
4092 gate_handle_machine_reorg (void)
4093 {
4094 return targetm.machine_dependent_reorg != 0;
4095 }
4096
4097
4098 static unsigned int
4099 rest_of_handle_machine_reorg (void)
4100 {
4101 targetm.machine_dependent_reorg ();
4102 return 0;
4103 }
4104
4105 struct rtl_opt_pass pass_machine_reorg =
4106 {
4107 {
4108 RTL_PASS,
4109 "mach", /* name */
4110 gate_handle_machine_reorg, /* gate */
4111 rest_of_handle_machine_reorg, /* execute */
4112 NULL, /* sub */
4113 NULL, /* next */
4114 0, /* static_pass_number */
4115 TV_MACH_DEP, /* tv_id */
4116 0, /* properties_required */
4117 0, /* properties_provided */
4118 0, /* properties_destroyed */
4119 0, /* todo_flags_start */
4120 TODO_dump_func |
4121 TODO_ggc_collect /* todo_flags_finish */
4122 }
4123 };