re PR c++/48969 (ICE with -std=c++0x)
[gcc.git] / gcc / reorg.c
1 /* Perform instruction reorganizations for delay slot filling.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu).
6 Hacked by Michael Tiemann (tiemann@cygnus.com).
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 /* Instruction reorganization pass.
25
26 This pass runs after register allocation and final jump
27 optimization. It should be the last pass to run before peephole.
28 It serves primarily to fill delay slots of insns, typically branch
29 and call insns. Other insns typically involve more complicated
30 interactions of data dependencies and resource constraints, and
31 are better handled by scheduling before register allocation (by the
32 function `schedule_insns').
33
34 The Branch Penalty is the number of extra cycles that are needed to
35 execute a branch insn. On an ideal machine, branches take a single
36 cycle, and the Branch Penalty is 0. Several RISC machines approach
37 branch delays differently:
38
39 The MIPS has a single branch delay slot. Most insns
40 (except other branches) can be used to fill this slot. When the
41 slot is filled, two insns execute in two cycles, reducing the
42 branch penalty to zero.
43
44 The SPARC always has a branch delay slot, but its effects can be
45 annulled when the branch is not taken. This means that failing to
46 find other sources of insns, we can hoist an insn from the branch
47 target that would only be safe to execute knowing that the branch
48 is taken.
49
50 The HP-PA always has a branch delay slot. For unconditional branches
51 its effects can be annulled when the branch is taken. The effects
52 of the delay slot in a conditional branch can be nullified for forward
53 taken branches, or for untaken backward branches. This means
54 we can hoist insns from the fall-through path for forward branches or
55 steal insns from the target of backward branches.
56
57 The TMS320C3x and C4x have three branch delay slots. When the three
58 slots are filled, the branch penalty is zero. Most insns can fill the
59 delay slots except jump insns.
60
61 Three techniques for filling delay slots have been implemented so far:
62
63 (1) `fill_simple_delay_slots' is the simplest, most efficient way
64 to fill delay slots. This pass first looks for insns which come
65 from before the branch and which are safe to execute after the
66 branch. Then it searches after the insn requiring delay slots or,
67 in the case of a branch, for insns that are after the point at
68 which the branch merges into the fallthrough code, if such a point
69 exists. When such insns are found, the branch penalty decreases
70 and no code expansion takes place.
71
72 (2) `fill_eager_delay_slots' is more complicated: it is used for
73 scheduling conditional jumps, or for scheduling jumps which cannot
74 be filled using (1). A machine need not have annulled jumps to use
75 this strategy, but it helps (by keeping more options open).
76 `fill_eager_delay_slots' tries to guess the direction the branch
77 will go; if it guesses right 100% of the time, it can reduce the
78 branch penalty as much as `fill_simple_delay_slots' does. If it
79 guesses wrong 100% of the time, it might as well schedule nops. When
80 `fill_eager_delay_slots' takes insns from the fall-through path of
81 the jump, usually there is no code expansion; when it takes insns
82 from the branch target, there is code expansion if it is not the
83 only way to reach that target.
84
85 (3) `relax_delay_slots' uses a set of rules to simplify code that
86 has been reorganized by (1) and (2). It finds cases where
87 conditional test can be eliminated, jumps can be threaded, extra
88 insns can be eliminated, etc. It is the job of (1) and (2) to do a
89 good job of scheduling locally; `relax_delay_slots' takes care of
90 making the various individual schedules work well together. It is
91 especially tuned to handle the control flow interactions of branch
92 insns. It does nothing for insns with delay slots that do not
93 branch.
94
95 On machines that use CC0, we are very conservative. We will not make
96 a copy of an insn involving CC0 since we want to maintain a 1-1
97 correspondence between the insn that sets and uses CC0. The insns are
98 allowed to be separated by placing an insn that sets CC0 (but not an insn
99 that uses CC0; we could do this, but it doesn't seem worthwhile) in a
100 delay slot. In that case, we point each insn at the other with REG_CC_USER
101 and REG_CC_SETTER notes. Note that these restrictions affect very few
102 machines because most RISC machines with delay slots will not use CC0
103 (the RT is the only known exception at this point).
104
105 Not yet implemented:
106
107 The Acorn Risc Machine can conditionally execute most insns, so
108 it is profitable to move single insns into a position to execute
109 based on the condition code of the previous insn.
110
111 The HP-PA can conditionally nullify insns, providing a similar
112 effect to the ARM, differing mostly in which insn is "in charge". */
113
114 #include "config.h"
115 #include "system.h"
116 #include "coretypes.h"
117 #include "tm.h"
118 #include "diagnostic-core.h"
119 #include "rtl.h"
120 #include "tm_p.h"
121 #include "expr.h"
122 #include "function.h"
123 #include "insn-config.h"
124 #include "conditions.h"
125 #include "hard-reg-set.h"
126 #include "basic-block.h"
127 #include "regs.h"
128 #include "recog.h"
129 #include "flags.h"
130 #include "output.h"
131 #include "obstack.h"
132 #include "insn-attr.h"
133 #include "resource.h"
134 #include "except.h"
135 #include "params.h"
136 #include "timevar.h"
137 #include "target.h"
138 #include "tree-pass.h"
139
140 #ifdef DELAY_SLOTS
141
142 #ifndef ANNUL_IFTRUE_SLOTS
143 #define eligible_for_annul_true(INSN, SLOTS, TRIAL, FLAGS) 0
144 #endif
145 #ifndef ANNUL_IFFALSE_SLOTS
146 #define eligible_for_annul_false(INSN, SLOTS, TRIAL, FLAGS) 0
147 #endif
148
149 /* Insns which have delay slots that have not yet been filled. */
150
151 static struct obstack unfilled_slots_obstack;
152 static rtx *unfilled_firstobj;
153
154 /* Define macros to refer to the first and last slot containing unfilled
155 insns. These are used because the list may move and its address
156 should be recomputed at each use. */
157
158 #define unfilled_slots_base \
159 ((rtx *) obstack_base (&unfilled_slots_obstack))
160
161 #define unfilled_slots_next \
162 ((rtx *) obstack_next_free (&unfilled_slots_obstack))
163
164 /* Points to the label before the end of the function. */
165 static rtx end_of_function_label;
166
167 /* Mapping between INSN_UID's and position in the code since INSN_UID's do
168 not always monotonically increase. */
169 static int *uid_to_ruid;
170
171 /* Highest valid index in `uid_to_ruid'. */
172 static int max_uid;
173
174 static int stop_search_p (rtx, int);
175 static int resource_conflicts_p (struct resources *, struct resources *);
176 static int insn_references_resource_p (rtx, struct resources *, bool);
177 static int insn_sets_resource_p (rtx, struct resources *, bool);
178 static rtx find_end_label (void);
179 static rtx emit_delay_sequence (rtx, rtx, int);
180 static rtx add_to_delay_list (rtx, rtx);
181 static rtx delete_from_delay_slot (rtx);
182 static void delete_scheduled_jump (rtx);
183 static void note_delay_statistics (int, int);
184 #if defined(ANNUL_IFFALSE_SLOTS) || defined(ANNUL_IFTRUE_SLOTS)
185 static rtx optimize_skip (rtx);
186 #endif
187 static int get_jump_flags (rtx, rtx);
188 static int rare_destination (rtx);
189 static int mostly_true_jump (rtx, rtx);
190 static rtx get_branch_condition (rtx, rtx);
191 static int condition_dominates_p (rtx, rtx);
192 static int redirect_with_delay_slots_safe_p (rtx, rtx, rtx);
193 static int redirect_with_delay_list_safe_p (rtx, rtx, rtx);
194 static int check_annul_list_true_false (int, rtx);
195 static rtx steal_delay_list_from_target (rtx, rtx, rtx, rtx,
196 struct resources *,
197 struct resources *,
198 struct resources *,
199 int, int *, int *, rtx *);
200 static rtx steal_delay_list_from_fallthrough (rtx, rtx, rtx, rtx,
201 struct resources *,
202 struct resources *,
203 struct resources *,
204 int, int *, int *);
205 static void try_merge_delay_insns (rtx, rtx);
206 static rtx redundant_insn (rtx, rtx, rtx);
207 static int own_thread_p (rtx, rtx, int);
208 static void update_block (rtx, rtx);
209 static int reorg_redirect_jump (rtx, rtx);
210 static void update_reg_dead_notes (rtx, rtx);
211 static void fix_reg_dead_note (rtx, rtx);
212 static void update_reg_unused_notes (rtx, rtx);
213 static void fill_simple_delay_slots (int);
214 static rtx fill_slots_from_thread (rtx, rtx, rtx, rtx,
215 int, int, int, int,
216 int *, rtx);
217 static void fill_eager_delay_slots (void);
218 static void relax_delay_slots (rtx);
219 #ifdef HAVE_return
220 static void make_return_insns (rtx);
221 #endif
222 \f
223 /* Return TRUE if this insn should stop the search for insn to fill delay
224 slots. LABELS_P indicates that labels should terminate the search.
225 In all cases, jumps terminate the search. */
226
227 static int
228 stop_search_p (rtx insn, int labels_p)
229 {
230 if (insn == 0)
231 return 1;
232
233 /* If the insn can throw an exception that is caught within the function,
234 it may effectively perform a jump from the viewpoint of the function.
235 Therefore act like for a jump. */
236 if (can_throw_internal (insn))
237 return 1;
238
239 switch (GET_CODE (insn))
240 {
241 case NOTE:
242 case CALL_INSN:
243 return 0;
244
245 case CODE_LABEL:
246 return labels_p;
247
248 case JUMP_INSN:
249 case BARRIER:
250 return 1;
251
252 case INSN:
253 /* OK unless it contains a delay slot or is an `asm' insn of some type.
254 We don't know anything about these. */
255 return (GET_CODE (PATTERN (insn)) == SEQUENCE
256 || GET_CODE (PATTERN (insn)) == ASM_INPUT
257 || asm_noperands (PATTERN (insn)) >= 0);
258
259 default:
260 gcc_unreachable ();
261 }
262 }
263 \f
264 /* Return TRUE if any resources are marked in both RES1 and RES2 or if either
265 resource set contains a volatile memory reference. Otherwise, return FALSE. */
266
267 static int
268 resource_conflicts_p (struct resources *res1, struct resources *res2)
269 {
270 if ((res1->cc && res2->cc) || (res1->memory && res2->memory)
271 || (res1->unch_memory && res2->unch_memory)
272 || res1->volatil || res2->volatil)
273 return 1;
274
275 #ifdef HARD_REG_SET
276 return (res1->regs & res2->regs) != HARD_CONST (0);
277 #else
278 {
279 int i;
280
281 for (i = 0; i < HARD_REG_SET_LONGS; i++)
282 if ((res1->regs[i] & res2->regs[i]) != 0)
283 return 1;
284 return 0;
285 }
286 #endif
287 }
288
289 /* Return TRUE if any resource marked in RES, a `struct resources', is
290 referenced by INSN. If INCLUDE_DELAYED_EFFECTS is set, return if the called
291 routine is using those resources.
292
293 We compute this by computing all the resources referenced by INSN and
294 seeing if this conflicts with RES. It might be faster to directly check
295 ourselves, and this is the way it used to work, but it means duplicating
296 a large block of complex code. */
297
298 static int
299 insn_references_resource_p (rtx insn, struct resources *res,
300 bool include_delayed_effects)
301 {
302 struct resources insn_res;
303
304 CLEAR_RESOURCE (&insn_res);
305 mark_referenced_resources (insn, &insn_res, include_delayed_effects);
306 return resource_conflicts_p (&insn_res, res);
307 }
308
309 /* Return TRUE if INSN modifies resources that are marked in RES.
310 INCLUDE_DELAYED_EFFECTS is set if the actions of that routine should be
311 included. CC0 is only modified if it is explicitly set; see comments
312 in front of mark_set_resources for details. */
313
314 static int
315 insn_sets_resource_p (rtx insn, struct resources *res,
316 bool include_delayed_effects)
317 {
318 struct resources insn_sets;
319
320 CLEAR_RESOURCE (&insn_sets);
321 mark_set_resources (insn, &insn_sets, 0,
322 (include_delayed_effects
323 ? MARK_SRC_DEST_CALL
324 : MARK_SRC_DEST));
325 return resource_conflicts_p (&insn_sets, res);
326 }
327 \f
328 /* Find a label at the end of the function or before a RETURN. If there
329 is none, try to make one. If that fails, returns 0.
330
331 The property of such a label is that it is placed just before the
332 epilogue or a bare RETURN insn, so that another bare RETURN can be
333 turned into a jump to the label unconditionally. In particular, the
334 label cannot be placed before a RETURN insn with a filled delay slot.
335
336 ??? There may be a problem with the current implementation. Suppose
337 we start with a bare RETURN insn and call find_end_label. It may set
338 end_of_function_label just before the RETURN. Suppose the machinery
339 is able to fill the delay slot of the RETURN insn afterwards. Then
340 end_of_function_label is no longer valid according to the property
341 described above and find_end_label will still return it unmodified.
342 Note that this is probably mitigated by the following observation:
343 once end_of_function_label is made, it is very likely the target of
344 a jump, so filling the delay slot of the RETURN will be much more
345 difficult. */
346
347 static rtx
348 find_end_label (void)
349 {
350 rtx insn;
351
352 /* If we found one previously, return it. */
353 if (end_of_function_label)
354 return end_of_function_label;
355
356 /* Otherwise, see if there is a label at the end of the function. If there
357 is, it must be that RETURN insns aren't needed, so that is our return
358 label and we don't have to do anything else. */
359
360 insn = get_last_insn ();
361 while (NOTE_P (insn)
362 || (NONJUMP_INSN_P (insn)
363 && (GET_CODE (PATTERN (insn)) == USE
364 || GET_CODE (PATTERN (insn)) == CLOBBER)))
365 insn = PREV_INSN (insn);
366
367 /* When a target threads its epilogue we might already have a
368 suitable return insn. If so put a label before it for the
369 end_of_function_label. */
370 if (BARRIER_P (insn)
371 && JUMP_P (PREV_INSN (insn))
372 && GET_CODE (PATTERN (PREV_INSN (insn))) == RETURN)
373 {
374 rtx temp = PREV_INSN (PREV_INSN (insn));
375 end_of_function_label = gen_label_rtx ();
376 LABEL_NUSES (end_of_function_label) = 0;
377
378 /* Put the label before an USE insns that may precede the RETURN insn. */
379 while (GET_CODE (temp) == USE)
380 temp = PREV_INSN (temp);
381
382 emit_label_after (end_of_function_label, temp);
383 }
384
385 else if (LABEL_P (insn))
386 end_of_function_label = insn;
387 else
388 {
389 end_of_function_label = gen_label_rtx ();
390 LABEL_NUSES (end_of_function_label) = 0;
391 /* If the basic block reorder pass moves the return insn to
392 some other place try to locate it again and put our
393 end_of_function_label there. */
394 while (insn && ! (JUMP_P (insn)
395 && (GET_CODE (PATTERN (insn)) == RETURN)))
396 insn = PREV_INSN (insn);
397 if (insn)
398 {
399 insn = PREV_INSN (insn);
400
401 /* Put the label before an USE insns that may proceed the
402 RETURN insn. */
403 while (GET_CODE (insn) == USE)
404 insn = PREV_INSN (insn);
405
406 emit_label_after (end_of_function_label, insn);
407 }
408 else
409 {
410 #ifdef HAVE_epilogue
411 if (HAVE_epilogue
412 #ifdef HAVE_return
413 && ! HAVE_return
414 #endif
415 )
416 {
417 /* The RETURN insn has its delay slot filled so we cannot
418 emit the label just before it. Since we already have
419 an epilogue and cannot emit a new RETURN, we cannot
420 emit the label at all. */
421 end_of_function_label = NULL_RTX;
422 return end_of_function_label;
423 }
424 #endif /* HAVE_epilogue */
425
426 /* Otherwise, make a new label and emit a RETURN and BARRIER,
427 if needed. */
428 emit_label (end_of_function_label);
429 #ifdef HAVE_return
430 /* We don't bother trying to create a return insn if the
431 epilogue has filled delay-slots; we would have to try and
432 move the delay-slot fillers to the delay-slots for the new
433 return insn or in front of the new return insn. */
434 if (crtl->epilogue_delay_list == NULL
435 && HAVE_return)
436 {
437 /* The return we make may have delay slots too. */
438 rtx insn = gen_return ();
439 insn = emit_jump_insn (insn);
440 emit_barrier ();
441 if (num_delay_slots (insn) > 0)
442 obstack_ptr_grow (&unfilled_slots_obstack, insn);
443 }
444 #endif
445 }
446 }
447
448 /* Show one additional use for this label so it won't go away until
449 we are done. */
450 ++LABEL_NUSES (end_of_function_label);
451
452 return end_of_function_label;
453 }
454 \f
455 /* Put INSN and LIST together in a SEQUENCE rtx of LENGTH, and replace
456 the pattern of INSN with the SEQUENCE.
457
458 Chain the insns so that NEXT_INSN of each insn in the sequence points to
459 the next and NEXT_INSN of the last insn in the sequence points to
460 the first insn after the sequence. Similarly for PREV_INSN. This makes
461 it easier to scan all insns.
462
463 Returns the SEQUENCE that replaces INSN. */
464
465 static rtx
466 emit_delay_sequence (rtx insn, rtx list, int length)
467 {
468 int i = 1;
469 rtx li;
470 int had_barrier = 0;
471
472 /* Allocate the rtvec to hold the insns and the SEQUENCE. */
473 rtvec seqv = rtvec_alloc (length + 1);
474 rtx seq = gen_rtx_SEQUENCE (VOIDmode, seqv);
475 rtx seq_insn = make_insn_raw (seq);
476 rtx first = get_insns ();
477 rtx last = get_last_insn ();
478
479 /* Make a copy of the insn having delay slots. */
480 rtx delay_insn = copy_rtx (insn);
481
482 /* If INSN is followed by a BARRIER, delete the BARRIER since it will only
483 confuse further processing. Update LAST in case it was the last insn.
484 We will put the BARRIER back in later. */
485 if (NEXT_INSN (insn) && BARRIER_P (NEXT_INSN (insn)))
486 {
487 delete_related_insns (NEXT_INSN (insn));
488 last = get_last_insn ();
489 had_barrier = 1;
490 }
491
492 /* Splice our SEQUENCE into the insn stream where INSN used to be. */
493 NEXT_INSN (seq_insn) = NEXT_INSN (insn);
494 PREV_INSN (seq_insn) = PREV_INSN (insn);
495
496 if (insn != last)
497 PREV_INSN (NEXT_INSN (seq_insn)) = seq_insn;
498
499 if (insn != first)
500 NEXT_INSN (PREV_INSN (seq_insn)) = seq_insn;
501
502 /* Note the calls to set_new_first_and_last_insn must occur after
503 SEQ_INSN has been completely spliced into the insn stream.
504
505 Otherwise CUR_INSN_UID will get set to an incorrect value because
506 set_new_first_and_last_insn will not find SEQ_INSN in the chain. */
507 if (insn == last)
508 set_new_first_and_last_insn (first, seq_insn);
509
510 if (insn == first)
511 set_new_first_and_last_insn (seq_insn, last);
512
513 /* Build our SEQUENCE and rebuild the insn chain. */
514 XVECEXP (seq, 0, 0) = delay_insn;
515 INSN_DELETED_P (delay_insn) = 0;
516 PREV_INSN (delay_insn) = PREV_INSN (seq_insn);
517
518 INSN_LOCATOR (seq_insn) = INSN_LOCATOR (delay_insn);
519
520 for (li = list; li; li = XEXP (li, 1), i++)
521 {
522 rtx tem = XEXP (li, 0);
523 rtx note, next;
524
525 /* Show that this copy of the insn isn't deleted. */
526 INSN_DELETED_P (tem) = 0;
527
528 XVECEXP (seq, 0, i) = tem;
529 PREV_INSN (tem) = XVECEXP (seq, 0, i - 1);
530 NEXT_INSN (XVECEXP (seq, 0, i - 1)) = tem;
531
532 /* SPARC assembler, for instance, emit warning when debug info is output
533 into the delay slot. */
534 if (INSN_LOCATOR (tem) && !INSN_LOCATOR (seq_insn))
535 INSN_LOCATOR (seq_insn) = INSN_LOCATOR (tem);
536 INSN_LOCATOR (tem) = 0;
537
538 for (note = REG_NOTES (tem); note; note = next)
539 {
540 next = XEXP (note, 1);
541 switch (REG_NOTE_KIND (note))
542 {
543 case REG_DEAD:
544 /* Remove any REG_DEAD notes because we can't rely on them now
545 that the insn has been moved. */
546 remove_note (tem, note);
547 break;
548
549 case REG_LABEL_OPERAND:
550 case REG_LABEL_TARGET:
551 /* Keep the label reference count up to date. */
552 if (LABEL_P (XEXP (note, 0)))
553 LABEL_NUSES (XEXP (note, 0)) ++;
554 break;
555
556 default:
557 break;
558 }
559 }
560 }
561
562 NEXT_INSN (XVECEXP (seq, 0, length)) = NEXT_INSN (seq_insn);
563
564 /* If the previous insn is a SEQUENCE, update the NEXT_INSN pointer on the
565 last insn in that SEQUENCE to point to us. Similarly for the first
566 insn in the following insn if it is a SEQUENCE. */
567
568 if (PREV_INSN (seq_insn) && NONJUMP_INSN_P (PREV_INSN (seq_insn))
569 && GET_CODE (PATTERN (PREV_INSN (seq_insn))) == SEQUENCE)
570 NEXT_INSN (XVECEXP (PATTERN (PREV_INSN (seq_insn)), 0,
571 XVECLEN (PATTERN (PREV_INSN (seq_insn)), 0) - 1))
572 = seq_insn;
573
574 if (NEXT_INSN (seq_insn) && NONJUMP_INSN_P (NEXT_INSN (seq_insn))
575 && GET_CODE (PATTERN (NEXT_INSN (seq_insn))) == SEQUENCE)
576 PREV_INSN (XVECEXP (PATTERN (NEXT_INSN (seq_insn)), 0, 0)) = seq_insn;
577
578 /* If there used to be a BARRIER, put it back. */
579 if (had_barrier)
580 emit_barrier_after (seq_insn);
581
582 gcc_assert (i == length + 1);
583
584 return seq_insn;
585 }
586
587 /* Add INSN to DELAY_LIST and return the head of the new list. The list must
588 be in the order in which the insns are to be executed. */
589
590 static rtx
591 add_to_delay_list (rtx insn, rtx delay_list)
592 {
593 /* If we have an empty list, just make a new list element. If
594 INSN has its block number recorded, clear it since we may
595 be moving the insn to a new block. */
596
597 if (delay_list == 0)
598 {
599 clear_hashed_info_for_insn (insn);
600 return gen_rtx_INSN_LIST (VOIDmode, insn, NULL_RTX);
601 }
602
603 /* Otherwise this must be an INSN_LIST. Add INSN to the end of the
604 list. */
605 XEXP (delay_list, 1) = add_to_delay_list (insn, XEXP (delay_list, 1));
606
607 return delay_list;
608 }
609 \f
610 /* Delete INSN from the delay slot of the insn that it is in, which may
611 produce an insn with no delay slots. Return the new insn. */
612
613 static rtx
614 delete_from_delay_slot (rtx insn)
615 {
616 rtx trial, seq_insn, seq, prev;
617 rtx delay_list = 0;
618 int i;
619 int had_barrier = 0;
620
621 /* We first must find the insn containing the SEQUENCE with INSN in its
622 delay slot. Do this by finding an insn, TRIAL, where
623 PREV_INSN (NEXT_INSN (TRIAL)) != TRIAL. */
624
625 for (trial = insn;
626 PREV_INSN (NEXT_INSN (trial)) == trial;
627 trial = NEXT_INSN (trial))
628 ;
629
630 seq_insn = PREV_INSN (NEXT_INSN (trial));
631 seq = PATTERN (seq_insn);
632
633 if (NEXT_INSN (seq_insn) && BARRIER_P (NEXT_INSN (seq_insn)))
634 had_barrier = 1;
635
636 /* Create a delay list consisting of all the insns other than the one
637 we are deleting (unless we were the only one). */
638 if (XVECLEN (seq, 0) > 2)
639 for (i = 1; i < XVECLEN (seq, 0); i++)
640 if (XVECEXP (seq, 0, i) != insn)
641 delay_list = add_to_delay_list (XVECEXP (seq, 0, i), delay_list);
642
643 /* Delete the old SEQUENCE, re-emit the insn that used to have the delay
644 list, and rebuild the delay list if non-empty. */
645 prev = PREV_INSN (seq_insn);
646 trial = XVECEXP (seq, 0, 0);
647 delete_related_insns (seq_insn);
648 add_insn_after (trial, prev, NULL);
649
650 /* If there was a barrier after the old SEQUENCE, remit it. */
651 if (had_barrier)
652 emit_barrier_after (trial);
653
654 /* If there are any delay insns, remit them. Otherwise clear the
655 annul flag. */
656 if (delay_list)
657 trial = emit_delay_sequence (trial, delay_list, XVECLEN (seq, 0) - 2);
658 else if (INSN_P (trial))
659 INSN_ANNULLED_BRANCH_P (trial) = 0;
660
661 INSN_FROM_TARGET_P (insn) = 0;
662
663 /* Show we need to fill this insn again. */
664 obstack_ptr_grow (&unfilled_slots_obstack, trial);
665
666 return trial;
667 }
668 \f
669 /* Delete INSN, a JUMP_INSN. If it is a conditional jump, we must track down
670 the insn that sets CC0 for it and delete it too. */
671
672 static void
673 delete_scheduled_jump (rtx insn)
674 {
675 /* Delete the insn that sets cc0 for us. On machines without cc0, we could
676 delete the insn that sets the condition code, but it is hard to find it.
677 Since this case is rare anyway, don't bother trying; there would likely
678 be other insns that became dead anyway, which we wouldn't know to
679 delete. */
680
681 #ifdef HAVE_cc0
682 if (reg_mentioned_p (cc0_rtx, insn))
683 {
684 rtx note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
685
686 /* If a reg-note was found, it points to an insn to set CC0. This
687 insn is in the delay list of some other insn. So delete it from
688 the delay list it was in. */
689 if (note)
690 {
691 if (! FIND_REG_INC_NOTE (XEXP (note, 0), NULL_RTX)
692 && sets_cc0_p (PATTERN (XEXP (note, 0))) == 1)
693 delete_from_delay_slot (XEXP (note, 0));
694 }
695 else
696 {
697 /* The insn setting CC0 is our previous insn, but it may be in
698 a delay slot. It will be the last insn in the delay slot, if
699 it is. */
700 rtx trial = previous_insn (insn);
701 if (NOTE_P (trial))
702 trial = prev_nonnote_insn (trial);
703 if (sets_cc0_p (PATTERN (trial)) != 1
704 || FIND_REG_INC_NOTE (trial, NULL_RTX))
705 return;
706 if (PREV_INSN (NEXT_INSN (trial)) == trial)
707 delete_related_insns (trial);
708 else
709 delete_from_delay_slot (trial);
710 }
711 }
712 #endif
713
714 delete_related_insns (insn);
715 }
716 \f
717 /* Counters for delay-slot filling. */
718
719 #define NUM_REORG_FUNCTIONS 2
720 #define MAX_DELAY_HISTOGRAM 3
721 #define MAX_REORG_PASSES 2
722
723 static int num_insns_needing_delays[NUM_REORG_FUNCTIONS][MAX_REORG_PASSES];
724
725 static int num_filled_delays[NUM_REORG_FUNCTIONS][MAX_DELAY_HISTOGRAM+1][MAX_REORG_PASSES];
726
727 static int reorg_pass_number;
728
729 static void
730 note_delay_statistics (int slots_filled, int index)
731 {
732 num_insns_needing_delays[index][reorg_pass_number]++;
733 if (slots_filled > MAX_DELAY_HISTOGRAM)
734 slots_filled = MAX_DELAY_HISTOGRAM;
735 num_filled_delays[index][slots_filled][reorg_pass_number]++;
736 }
737 \f
738 #if defined(ANNUL_IFFALSE_SLOTS) || defined(ANNUL_IFTRUE_SLOTS)
739
740 /* Optimize the following cases:
741
742 1. When a conditional branch skips over only one instruction,
743 use an annulling branch and put that insn in the delay slot.
744 Use either a branch that annuls when the condition if true or
745 invert the test with a branch that annuls when the condition is
746 false. This saves insns, since otherwise we must copy an insn
747 from the L1 target.
748
749 (orig) (skip) (otherwise)
750 Bcc.n L1 Bcc',a L1 Bcc,a L1'
751 insn insn insn2
752 L1: L1: L1:
753 insn2 insn2 insn2
754 insn3 insn3 L1':
755 insn3
756
757 2. When a conditional branch skips over only one instruction,
758 and after that, it unconditionally branches somewhere else,
759 perform the similar optimization. This saves executing the
760 second branch in the case where the inverted condition is true.
761
762 Bcc.n L1 Bcc',a L2
763 insn insn
764 L1: L1:
765 Bra L2 Bra L2
766
767 INSN is a JUMP_INSN.
768
769 This should be expanded to skip over N insns, where N is the number
770 of delay slots required. */
771
772 static rtx
773 optimize_skip (rtx insn)
774 {
775 rtx trial = next_nonnote_insn (insn);
776 rtx next_trial = next_active_insn (trial);
777 rtx delay_list = 0;
778 int flags;
779
780 flags = get_jump_flags (insn, JUMP_LABEL (insn));
781
782 if (trial == 0
783 || !NONJUMP_INSN_P (trial)
784 || GET_CODE (PATTERN (trial)) == SEQUENCE
785 || recog_memoized (trial) < 0
786 || (! eligible_for_annul_false (insn, 0, trial, flags)
787 && ! eligible_for_annul_true (insn, 0, trial, flags))
788 || can_throw_internal (trial))
789 return 0;
790
791 /* There are two cases where we are just executing one insn (we assume
792 here that a branch requires only one insn; this should be generalized
793 at some point): Where the branch goes around a single insn or where
794 we have one insn followed by a branch to the same label we branch to.
795 In both of these cases, inverting the jump and annulling the delay
796 slot give the same effect in fewer insns. */
797 if ((next_trial == next_active_insn (JUMP_LABEL (insn))
798 && ! (next_trial == 0 && crtl->epilogue_delay_list != 0))
799 || (next_trial != 0
800 && JUMP_P (next_trial)
801 && JUMP_LABEL (insn) == JUMP_LABEL (next_trial)
802 && (simplejump_p (next_trial)
803 || GET_CODE (PATTERN (next_trial)) == RETURN)))
804 {
805 if (eligible_for_annul_false (insn, 0, trial, flags))
806 {
807 if (invert_jump (insn, JUMP_LABEL (insn), 1))
808 INSN_FROM_TARGET_P (trial) = 1;
809 else if (! eligible_for_annul_true (insn, 0, trial, flags))
810 return 0;
811 }
812
813 delay_list = add_to_delay_list (trial, NULL_RTX);
814 next_trial = next_active_insn (trial);
815 update_block (trial, trial);
816 delete_related_insns (trial);
817
818 /* Also, if we are targeting an unconditional
819 branch, thread our jump to the target of that branch. Don't
820 change this into a RETURN here, because it may not accept what
821 we have in the delay slot. We'll fix this up later. */
822 if (next_trial && JUMP_P (next_trial)
823 && (simplejump_p (next_trial)
824 || GET_CODE (PATTERN (next_trial)) == RETURN))
825 {
826 rtx target_label = JUMP_LABEL (next_trial);
827 if (target_label == 0)
828 target_label = find_end_label ();
829
830 if (target_label)
831 {
832 /* Recompute the flags based on TARGET_LABEL since threading
833 the jump to TARGET_LABEL may change the direction of the
834 jump (which may change the circumstances in which the
835 delay slot is nullified). */
836 flags = get_jump_flags (insn, target_label);
837 if (eligible_for_annul_true (insn, 0, trial, flags))
838 reorg_redirect_jump (insn, target_label);
839 }
840 }
841
842 INSN_ANNULLED_BRANCH_P (insn) = 1;
843 }
844
845 return delay_list;
846 }
847 #endif
848 \f
849 /* Encode and return branch direction and prediction information for
850 INSN assuming it will jump to LABEL.
851
852 Non conditional branches return no direction information and
853 are predicted as very likely taken. */
854
855 static int
856 get_jump_flags (rtx insn, rtx label)
857 {
858 int flags;
859
860 /* get_jump_flags can be passed any insn with delay slots, these may
861 be INSNs, CALL_INSNs, or JUMP_INSNs. Only JUMP_INSNs have branch
862 direction information, and only if they are conditional jumps.
863
864 If LABEL is zero, then there is no way to determine the branch
865 direction. */
866 if (JUMP_P (insn)
867 && (condjump_p (insn) || condjump_in_parallel_p (insn))
868 && INSN_UID (insn) <= max_uid
869 && label != 0
870 && INSN_UID (label) <= max_uid)
871 flags
872 = (uid_to_ruid[INSN_UID (label)] > uid_to_ruid[INSN_UID (insn)])
873 ? ATTR_FLAG_forward : ATTR_FLAG_backward;
874 /* No valid direction information. */
875 else
876 flags = 0;
877
878 /* If insn is a conditional branch call mostly_true_jump to get
879 determine the branch prediction.
880
881 Non conditional branches are predicted as very likely taken. */
882 if (JUMP_P (insn)
883 && (condjump_p (insn) || condjump_in_parallel_p (insn)))
884 {
885 int prediction;
886
887 prediction = mostly_true_jump (insn, get_branch_condition (insn, label));
888 switch (prediction)
889 {
890 case 2:
891 flags |= (ATTR_FLAG_very_likely | ATTR_FLAG_likely);
892 break;
893 case 1:
894 flags |= ATTR_FLAG_likely;
895 break;
896 case 0:
897 flags |= ATTR_FLAG_unlikely;
898 break;
899 case -1:
900 flags |= (ATTR_FLAG_very_unlikely | ATTR_FLAG_unlikely);
901 break;
902
903 default:
904 gcc_unreachable ();
905 }
906 }
907 else
908 flags |= (ATTR_FLAG_very_likely | ATTR_FLAG_likely);
909
910 return flags;
911 }
912
913 /* Return 1 if INSN is a destination that will be branched to rarely (the
914 return point of a function); return 2 if DEST will be branched to very
915 rarely (a call to a function that doesn't return). Otherwise,
916 return 0. */
917
918 static int
919 rare_destination (rtx insn)
920 {
921 int jump_count = 0;
922 rtx next;
923
924 for (; insn; insn = next)
925 {
926 if (NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE)
927 insn = XVECEXP (PATTERN (insn), 0, 0);
928
929 next = NEXT_INSN (insn);
930
931 switch (GET_CODE (insn))
932 {
933 case CODE_LABEL:
934 return 0;
935 case BARRIER:
936 /* A BARRIER can either be after a JUMP_INSN or a CALL_INSN. We
937 don't scan past JUMP_INSNs, so any barrier we find here must
938 have been after a CALL_INSN and hence mean the call doesn't
939 return. */
940 return 2;
941 case JUMP_INSN:
942 if (GET_CODE (PATTERN (insn)) == RETURN)
943 return 1;
944 else if (simplejump_p (insn)
945 && jump_count++ < 10)
946 next = JUMP_LABEL (insn);
947 else
948 return 0;
949
950 default:
951 break;
952 }
953 }
954
955 /* If we got here it means we hit the end of the function. So this
956 is an unlikely destination. */
957
958 return 1;
959 }
960
961 /* Return truth value of the statement that this branch
962 is mostly taken. If we think that the branch is extremely likely
963 to be taken, we return 2. If the branch is slightly more likely to be
964 taken, return 1. If the branch is slightly less likely to be taken,
965 return 0 and if the branch is highly unlikely to be taken, return -1.
966
967 CONDITION, if nonzero, is the condition that JUMP_INSN is testing. */
968
969 static int
970 mostly_true_jump (rtx jump_insn, rtx condition)
971 {
972 rtx target_label = JUMP_LABEL (jump_insn);
973 rtx note;
974 int rare_dest, rare_fallthrough;
975
976 /* If branch probabilities are available, then use that number since it
977 always gives a correct answer. */
978 note = find_reg_note (jump_insn, REG_BR_PROB, 0);
979 if (note)
980 {
981 int prob = INTVAL (XEXP (note, 0));
982
983 if (prob >= REG_BR_PROB_BASE * 9 / 10)
984 return 2;
985 else if (prob >= REG_BR_PROB_BASE / 2)
986 return 1;
987 else if (prob >= REG_BR_PROB_BASE / 10)
988 return 0;
989 else
990 return -1;
991 }
992
993 /* Look at the relative rarities of the fallthrough and destination. If
994 they differ, we can predict the branch that way. */
995 rare_dest = rare_destination (target_label);
996 rare_fallthrough = rare_destination (NEXT_INSN (jump_insn));
997
998 switch (rare_fallthrough - rare_dest)
999 {
1000 case -2:
1001 return -1;
1002 case -1:
1003 return 0;
1004 case 0:
1005 break;
1006 case 1:
1007 return 1;
1008 case 2:
1009 return 2;
1010 }
1011
1012 /* If we couldn't figure out what this jump was, assume it won't be
1013 taken. This should be rare. */
1014 if (condition == 0)
1015 return 0;
1016
1017 /* Predict backward branches usually take, forward branches usually not. If
1018 we don't know whether this is forward or backward, assume the branch
1019 will be taken, since most are. */
1020 return (target_label == 0 || INSN_UID (jump_insn) > max_uid
1021 || INSN_UID (target_label) > max_uid
1022 || (uid_to_ruid[INSN_UID (jump_insn)]
1023 > uid_to_ruid[INSN_UID (target_label)]));
1024 }
1025
1026 /* Return the condition under which INSN will branch to TARGET. If TARGET
1027 is zero, return the condition under which INSN will return. If INSN is
1028 an unconditional branch, return const_true_rtx. If INSN isn't a simple
1029 type of jump, or it doesn't go to TARGET, return 0. */
1030
1031 static rtx
1032 get_branch_condition (rtx insn, rtx target)
1033 {
1034 rtx pat = PATTERN (insn);
1035 rtx src;
1036
1037 if (condjump_in_parallel_p (insn))
1038 pat = XVECEXP (pat, 0, 0);
1039
1040 if (GET_CODE (pat) == RETURN)
1041 return target == 0 ? const_true_rtx : 0;
1042
1043 else if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
1044 return 0;
1045
1046 src = SET_SRC (pat);
1047 if (GET_CODE (src) == LABEL_REF && XEXP (src, 0) == target)
1048 return const_true_rtx;
1049
1050 else if (GET_CODE (src) == IF_THEN_ELSE
1051 && ((target == 0 && GET_CODE (XEXP (src, 1)) == RETURN)
1052 || (GET_CODE (XEXP (src, 1)) == LABEL_REF
1053 && XEXP (XEXP (src, 1), 0) == target))
1054 && XEXP (src, 2) == pc_rtx)
1055 return XEXP (src, 0);
1056
1057 else if (GET_CODE (src) == IF_THEN_ELSE
1058 && ((target == 0 && GET_CODE (XEXP (src, 2)) == RETURN)
1059 || (GET_CODE (XEXP (src, 2)) == LABEL_REF
1060 && XEXP (XEXP (src, 2), 0) == target))
1061 && XEXP (src, 1) == pc_rtx)
1062 {
1063 enum rtx_code rev;
1064 rev = reversed_comparison_code (XEXP (src, 0), insn);
1065 if (rev != UNKNOWN)
1066 return gen_rtx_fmt_ee (rev, GET_MODE (XEXP (src, 0)),
1067 XEXP (XEXP (src, 0), 0),
1068 XEXP (XEXP (src, 0), 1));
1069 }
1070
1071 return 0;
1072 }
1073
1074 /* Return nonzero if CONDITION is more strict than the condition of
1075 INSN, i.e., if INSN will always branch if CONDITION is true. */
1076
1077 static int
1078 condition_dominates_p (rtx condition, rtx insn)
1079 {
1080 rtx other_condition = get_branch_condition (insn, JUMP_LABEL (insn));
1081 enum rtx_code code = GET_CODE (condition);
1082 enum rtx_code other_code;
1083
1084 if (rtx_equal_p (condition, other_condition)
1085 || other_condition == const_true_rtx)
1086 return 1;
1087
1088 else if (condition == const_true_rtx || other_condition == 0)
1089 return 0;
1090
1091 other_code = GET_CODE (other_condition);
1092 if (GET_RTX_LENGTH (code) != 2 || GET_RTX_LENGTH (other_code) != 2
1093 || ! rtx_equal_p (XEXP (condition, 0), XEXP (other_condition, 0))
1094 || ! rtx_equal_p (XEXP (condition, 1), XEXP (other_condition, 1)))
1095 return 0;
1096
1097 return comparison_dominates_p (code, other_code);
1098 }
1099
1100 /* Return nonzero if redirecting JUMP to NEWLABEL does not invalidate
1101 any insns already in the delay slot of JUMP. */
1102
1103 static int
1104 redirect_with_delay_slots_safe_p (rtx jump, rtx newlabel, rtx seq)
1105 {
1106 int flags, i;
1107 rtx pat = PATTERN (seq);
1108
1109 /* Make sure all the delay slots of this jump would still
1110 be valid after threading the jump. If they are still
1111 valid, then return nonzero. */
1112
1113 flags = get_jump_flags (jump, newlabel);
1114 for (i = 1; i < XVECLEN (pat, 0); i++)
1115 if (! (
1116 #ifdef ANNUL_IFFALSE_SLOTS
1117 (INSN_ANNULLED_BRANCH_P (jump)
1118 && INSN_FROM_TARGET_P (XVECEXP (pat, 0, i)))
1119 ? eligible_for_annul_false (jump, i - 1,
1120 XVECEXP (pat, 0, i), flags) :
1121 #endif
1122 #ifdef ANNUL_IFTRUE_SLOTS
1123 (INSN_ANNULLED_BRANCH_P (jump)
1124 && ! INSN_FROM_TARGET_P (XVECEXP (pat, 0, i)))
1125 ? eligible_for_annul_true (jump, i - 1,
1126 XVECEXP (pat, 0, i), flags) :
1127 #endif
1128 eligible_for_delay (jump, i - 1, XVECEXP (pat, 0, i), flags)))
1129 break;
1130
1131 return (i == XVECLEN (pat, 0));
1132 }
1133
1134 /* Return nonzero if redirecting JUMP to NEWLABEL does not invalidate
1135 any insns we wish to place in the delay slot of JUMP. */
1136
1137 static int
1138 redirect_with_delay_list_safe_p (rtx jump, rtx newlabel, rtx delay_list)
1139 {
1140 int flags, i;
1141 rtx li;
1142
1143 /* Make sure all the insns in DELAY_LIST would still be
1144 valid after threading the jump. If they are still
1145 valid, then return nonzero. */
1146
1147 flags = get_jump_flags (jump, newlabel);
1148 for (li = delay_list, i = 0; li; li = XEXP (li, 1), i++)
1149 if (! (
1150 #ifdef ANNUL_IFFALSE_SLOTS
1151 (INSN_ANNULLED_BRANCH_P (jump)
1152 && INSN_FROM_TARGET_P (XEXP (li, 0)))
1153 ? eligible_for_annul_false (jump, i, XEXP (li, 0), flags) :
1154 #endif
1155 #ifdef ANNUL_IFTRUE_SLOTS
1156 (INSN_ANNULLED_BRANCH_P (jump)
1157 && ! INSN_FROM_TARGET_P (XEXP (li, 0)))
1158 ? eligible_for_annul_true (jump, i, XEXP (li, 0), flags) :
1159 #endif
1160 eligible_for_delay (jump, i, XEXP (li, 0), flags)))
1161 break;
1162
1163 return (li == NULL);
1164 }
1165
1166 /* DELAY_LIST is a list of insns that have already been placed into delay
1167 slots. See if all of them have the same annulling status as ANNUL_TRUE_P.
1168 If not, return 0; otherwise return 1. */
1169
1170 static int
1171 check_annul_list_true_false (int annul_true_p, rtx delay_list)
1172 {
1173 rtx temp;
1174
1175 if (delay_list)
1176 {
1177 for (temp = delay_list; temp; temp = XEXP (temp, 1))
1178 {
1179 rtx trial = XEXP (temp, 0);
1180
1181 if ((annul_true_p && INSN_FROM_TARGET_P (trial))
1182 || (!annul_true_p && !INSN_FROM_TARGET_P (trial)))
1183 return 0;
1184 }
1185 }
1186
1187 return 1;
1188 }
1189 \f
1190 /* INSN branches to an insn whose pattern SEQ is a SEQUENCE. Given that
1191 the condition tested by INSN is CONDITION and the resources shown in
1192 OTHER_NEEDED are needed after INSN, see whether INSN can take all the insns
1193 from SEQ's delay list, in addition to whatever insns it may execute
1194 (in DELAY_LIST). SETS and NEEDED are denote resources already set and
1195 needed while searching for delay slot insns. Return the concatenated
1196 delay list if possible, otherwise, return 0.
1197
1198 SLOTS_TO_FILL is the total number of slots required by INSN, and
1199 PSLOTS_FILLED points to the number filled so far (also the number of
1200 insns in DELAY_LIST). It is updated with the number that have been
1201 filled from the SEQUENCE, if any.
1202
1203 PANNUL_P points to a nonzero value if we already know that we need
1204 to annul INSN. If this routine determines that annulling is needed,
1205 it may set that value nonzero.
1206
1207 PNEW_THREAD points to a location that is to receive the place at which
1208 execution should continue. */
1209
1210 static rtx
1211 steal_delay_list_from_target (rtx insn, rtx condition, rtx seq,
1212 rtx delay_list, struct resources *sets,
1213 struct resources *needed,
1214 struct resources *other_needed,
1215 int slots_to_fill, int *pslots_filled,
1216 int *pannul_p, rtx *pnew_thread)
1217 {
1218 rtx temp;
1219 int slots_remaining = slots_to_fill - *pslots_filled;
1220 int total_slots_filled = *pslots_filled;
1221 rtx new_delay_list = 0;
1222 int must_annul = *pannul_p;
1223 int used_annul = 0;
1224 int i;
1225 struct resources cc_set;
1226
1227 /* We can't do anything if there are more delay slots in SEQ than we
1228 can handle, or if we don't know that it will be a taken branch.
1229 We know that it will be a taken branch if it is either an unconditional
1230 branch or a conditional branch with a stricter branch condition.
1231
1232 Also, exit if the branch has more than one set, since then it is computing
1233 other results that can't be ignored, e.g. the HPPA mov&branch instruction.
1234 ??? It may be possible to move other sets into INSN in addition to
1235 moving the instructions in the delay slots.
1236
1237 We can not steal the delay list if one of the instructions in the
1238 current delay_list modifies the condition codes and the jump in the
1239 sequence is a conditional jump. We can not do this because we can
1240 not change the direction of the jump because the condition codes
1241 will effect the direction of the jump in the sequence. */
1242
1243 CLEAR_RESOURCE (&cc_set);
1244 for (temp = delay_list; temp; temp = XEXP (temp, 1))
1245 {
1246 rtx trial = XEXP (temp, 0);
1247
1248 mark_set_resources (trial, &cc_set, 0, MARK_SRC_DEST_CALL);
1249 if (insn_references_resource_p (XVECEXP (seq , 0, 0), &cc_set, false))
1250 return delay_list;
1251 }
1252
1253 if (XVECLEN (seq, 0) - 1 > slots_remaining
1254 || ! condition_dominates_p (condition, XVECEXP (seq, 0, 0))
1255 || ! single_set (XVECEXP (seq, 0, 0)))
1256 return delay_list;
1257
1258 #ifdef MD_CAN_REDIRECT_BRANCH
1259 /* On some targets, branches with delay slots can have a limited
1260 displacement. Give the back end a chance to tell us we can't do
1261 this. */
1262 if (! MD_CAN_REDIRECT_BRANCH (insn, XVECEXP (seq, 0, 0)))
1263 return delay_list;
1264 #endif
1265
1266 for (i = 1; i < XVECLEN (seq, 0); i++)
1267 {
1268 rtx trial = XVECEXP (seq, 0, i);
1269 int flags;
1270
1271 if (insn_references_resource_p (trial, sets, false)
1272 || insn_sets_resource_p (trial, needed, false)
1273 || insn_sets_resource_p (trial, sets, false)
1274 #ifdef HAVE_cc0
1275 /* If TRIAL sets CC0, we can't copy it, so we can't steal this
1276 delay list. */
1277 || find_reg_note (trial, REG_CC_USER, NULL_RTX)
1278 #endif
1279 /* If TRIAL is from the fallthrough code of an annulled branch insn
1280 in SEQ, we cannot use it. */
1281 || (INSN_ANNULLED_BRANCH_P (XVECEXP (seq, 0, 0))
1282 && ! INSN_FROM_TARGET_P (trial)))
1283 return delay_list;
1284
1285 /* If this insn was already done (usually in a previous delay slot),
1286 pretend we put it in our delay slot. */
1287 if (redundant_insn (trial, insn, new_delay_list))
1288 continue;
1289
1290 /* We will end up re-vectoring this branch, so compute flags
1291 based on jumping to the new label. */
1292 flags = get_jump_flags (insn, JUMP_LABEL (XVECEXP (seq, 0, 0)));
1293
1294 if (! must_annul
1295 && ((condition == const_true_rtx
1296 || (! insn_sets_resource_p (trial, other_needed, false)
1297 && ! may_trap_or_fault_p (PATTERN (trial)))))
1298 ? eligible_for_delay (insn, total_slots_filled, trial, flags)
1299 : (must_annul || (delay_list == NULL && new_delay_list == NULL))
1300 && (must_annul = 1,
1301 check_annul_list_true_false (0, delay_list)
1302 && check_annul_list_true_false (0, new_delay_list)
1303 && eligible_for_annul_false (insn, total_slots_filled,
1304 trial, flags)))
1305 {
1306 if (must_annul)
1307 used_annul = 1;
1308 temp = copy_rtx (trial);
1309 INSN_FROM_TARGET_P (temp) = 1;
1310 new_delay_list = add_to_delay_list (temp, new_delay_list);
1311 total_slots_filled++;
1312
1313 if (--slots_remaining == 0)
1314 break;
1315 }
1316 else
1317 return delay_list;
1318 }
1319
1320 /* Show the place to which we will be branching. */
1321 *pnew_thread = next_active_insn (JUMP_LABEL (XVECEXP (seq, 0, 0)));
1322
1323 /* Add any new insns to the delay list and update the count of the
1324 number of slots filled. */
1325 *pslots_filled = total_slots_filled;
1326 if (used_annul)
1327 *pannul_p = 1;
1328
1329 if (delay_list == 0)
1330 return new_delay_list;
1331
1332 for (temp = new_delay_list; temp; temp = XEXP (temp, 1))
1333 delay_list = add_to_delay_list (XEXP (temp, 0), delay_list);
1334
1335 return delay_list;
1336 }
1337 \f
1338 /* Similar to steal_delay_list_from_target except that SEQ is on the
1339 fallthrough path of INSN. Here we only do something if the delay insn
1340 of SEQ is an unconditional branch. In that case we steal its delay slot
1341 for INSN since unconditional branches are much easier to fill. */
1342
1343 static rtx
1344 steal_delay_list_from_fallthrough (rtx insn, rtx condition, rtx seq,
1345 rtx delay_list, struct resources *sets,
1346 struct resources *needed,
1347 struct resources *other_needed,
1348 int slots_to_fill, int *pslots_filled,
1349 int *pannul_p)
1350 {
1351 int i;
1352 int flags;
1353 int must_annul = *pannul_p;
1354 int used_annul = 0;
1355
1356 flags = get_jump_flags (insn, JUMP_LABEL (insn));
1357
1358 /* We can't do anything if SEQ's delay insn isn't an
1359 unconditional branch. */
1360
1361 if (! simplejump_p (XVECEXP (seq, 0, 0))
1362 && GET_CODE (PATTERN (XVECEXP (seq, 0, 0))) != RETURN)
1363 return delay_list;
1364
1365 for (i = 1; i < XVECLEN (seq, 0); i++)
1366 {
1367 rtx trial = XVECEXP (seq, 0, i);
1368
1369 /* If TRIAL sets CC0, stealing it will move it too far from the use
1370 of CC0. */
1371 if (insn_references_resource_p (trial, sets, false)
1372 || insn_sets_resource_p (trial, needed, false)
1373 || insn_sets_resource_p (trial, sets, false)
1374 #ifdef HAVE_cc0
1375 || sets_cc0_p (PATTERN (trial))
1376 #endif
1377 )
1378
1379 break;
1380
1381 /* If this insn was already done, we don't need it. */
1382 if (redundant_insn (trial, insn, delay_list))
1383 {
1384 delete_from_delay_slot (trial);
1385 continue;
1386 }
1387
1388 if (! must_annul
1389 && ((condition == const_true_rtx
1390 || (! insn_sets_resource_p (trial, other_needed, false)
1391 && ! may_trap_or_fault_p (PATTERN (trial)))))
1392 ? eligible_for_delay (insn, *pslots_filled, trial, flags)
1393 : (must_annul || delay_list == NULL) && (must_annul = 1,
1394 check_annul_list_true_false (1, delay_list)
1395 && eligible_for_annul_true (insn, *pslots_filled, trial, flags)))
1396 {
1397 if (must_annul)
1398 used_annul = 1;
1399 delete_from_delay_slot (trial);
1400 delay_list = add_to_delay_list (trial, delay_list);
1401
1402 if (++(*pslots_filled) == slots_to_fill)
1403 break;
1404 }
1405 else
1406 break;
1407 }
1408
1409 if (used_annul)
1410 *pannul_p = 1;
1411 return delay_list;
1412 }
1413 \f
1414 /* Try merging insns starting at THREAD which match exactly the insns in
1415 INSN's delay list.
1416
1417 If all insns were matched and the insn was previously annulling, the
1418 annul bit will be cleared.
1419
1420 For each insn that is merged, if the branch is or will be non-annulling,
1421 we delete the merged insn. */
1422
1423 static void
1424 try_merge_delay_insns (rtx insn, rtx thread)
1425 {
1426 rtx trial, next_trial;
1427 rtx delay_insn = XVECEXP (PATTERN (insn), 0, 0);
1428 int annul_p = INSN_ANNULLED_BRANCH_P (delay_insn);
1429 int slot_number = 1;
1430 int num_slots = XVECLEN (PATTERN (insn), 0);
1431 rtx next_to_match = XVECEXP (PATTERN (insn), 0, slot_number);
1432 struct resources set, needed;
1433 rtx merged_insns = 0;
1434 int i;
1435 int flags;
1436
1437 flags = get_jump_flags (delay_insn, JUMP_LABEL (delay_insn));
1438
1439 CLEAR_RESOURCE (&needed);
1440 CLEAR_RESOURCE (&set);
1441
1442 /* If this is not an annulling branch, take into account anything needed in
1443 INSN's delay slot. This prevents two increments from being incorrectly
1444 folded into one. If we are annulling, this would be the correct
1445 thing to do. (The alternative, looking at things set in NEXT_TO_MATCH
1446 will essentially disable this optimization. This method is somewhat of
1447 a kludge, but I don't see a better way.) */
1448 if (! annul_p)
1449 for (i = 1 ; i < num_slots; i++)
1450 if (XVECEXP (PATTERN (insn), 0, i))
1451 mark_referenced_resources (XVECEXP (PATTERN (insn), 0, i), &needed,
1452 true);
1453
1454 for (trial = thread; !stop_search_p (trial, 1); trial = next_trial)
1455 {
1456 rtx pat = PATTERN (trial);
1457 rtx oldtrial = trial;
1458
1459 next_trial = next_nonnote_insn (trial);
1460
1461 /* TRIAL must be a CALL_INSN or INSN. Skip USE and CLOBBER. */
1462 if (NONJUMP_INSN_P (trial)
1463 && (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER))
1464 continue;
1465
1466 if (GET_CODE (next_to_match) == GET_CODE (trial)
1467 #ifdef HAVE_cc0
1468 /* We can't share an insn that sets cc0. */
1469 && ! sets_cc0_p (pat)
1470 #endif
1471 && ! insn_references_resource_p (trial, &set, true)
1472 && ! insn_sets_resource_p (trial, &set, true)
1473 && ! insn_sets_resource_p (trial, &needed, true)
1474 && (trial = try_split (pat, trial, 0)) != 0
1475 /* Update next_trial, in case try_split succeeded. */
1476 && (next_trial = next_nonnote_insn (trial))
1477 /* Likewise THREAD. */
1478 && (thread = oldtrial == thread ? trial : thread)
1479 && rtx_equal_p (PATTERN (next_to_match), PATTERN (trial))
1480 /* Have to test this condition if annul condition is different
1481 from (and less restrictive than) non-annulling one. */
1482 && eligible_for_delay (delay_insn, slot_number - 1, trial, flags))
1483 {
1484
1485 if (! annul_p)
1486 {
1487 update_block (trial, thread);
1488 if (trial == thread)
1489 thread = next_active_insn (thread);
1490
1491 delete_related_insns (trial);
1492 INSN_FROM_TARGET_P (next_to_match) = 0;
1493 }
1494 else
1495 merged_insns = gen_rtx_INSN_LIST (VOIDmode, trial, merged_insns);
1496
1497 if (++slot_number == num_slots)
1498 break;
1499
1500 next_to_match = XVECEXP (PATTERN (insn), 0, slot_number);
1501 }
1502
1503 mark_set_resources (trial, &set, 0, MARK_SRC_DEST_CALL);
1504 mark_referenced_resources (trial, &needed, true);
1505 }
1506
1507 /* See if we stopped on a filled insn. If we did, try to see if its
1508 delay slots match. */
1509 if (slot_number != num_slots
1510 && trial && NONJUMP_INSN_P (trial)
1511 && GET_CODE (PATTERN (trial)) == SEQUENCE
1512 && ! INSN_ANNULLED_BRANCH_P (XVECEXP (PATTERN (trial), 0, 0)))
1513 {
1514 rtx pat = PATTERN (trial);
1515 rtx filled_insn = XVECEXP (pat, 0, 0);
1516
1517 /* Account for resources set/needed by the filled insn. */
1518 mark_set_resources (filled_insn, &set, 0, MARK_SRC_DEST_CALL);
1519 mark_referenced_resources (filled_insn, &needed, true);
1520
1521 for (i = 1; i < XVECLEN (pat, 0); i++)
1522 {
1523 rtx dtrial = XVECEXP (pat, 0, i);
1524
1525 if (! insn_references_resource_p (dtrial, &set, true)
1526 && ! insn_sets_resource_p (dtrial, &set, true)
1527 && ! insn_sets_resource_p (dtrial, &needed, true)
1528 #ifdef HAVE_cc0
1529 && ! sets_cc0_p (PATTERN (dtrial))
1530 #endif
1531 && rtx_equal_p (PATTERN (next_to_match), PATTERN (dtrial))
1532 && eligible_for_delay (delay_insn, slot_number - 1, dtrial, flags))
1533 {
1534 if (! annul_p)
1535 {
1536 rtx new_rtx;
1537
1538 update_block (dtrial, thread);
1539 new_rtx = delete_from_delay_slot (dtrial);
1540 if (INSN_DELETED_P (thread))
1541 thread = new_rtx;
1542 INSN_FROM_TARGET_P (next_to_match) = 0;
1543 }
1544 else
1545 merged_insns = gen_rtx_INSN_LIST (SImode, dtrial,
1546 merged_insns);
1547
1548 if (++slot_number == num_slots)
1549 break;
1550
1551 next_to_match = XVECEXP (PATTERN (insn), 0, slot_number);
1552 }
1553 else
1554 {
1555 /* Keep track of the set/referenced resources for the delay
1556 slots of any trial insns we encounter. */
1557 mark_set_resources (dtrial, &set, 0, MARK_SRC_DEST_CALL);
1558 mark_referenced_resources (dtrial, &needed, true);
1559 }
1560 }
1561 }
1562
1563 /* If all insns in the delay slot have been matched and we were previously
1564 annulling the branch, we need not any more. In that case delete all the
1565 merged insns. Also clear the INSN_FROM_TARGET_P bit of each insn in
1566 the delay list so that we know that it isn't only being used at the
1567 target. */
1568 if (slot_number == num_slots && annul_p)
1569 {
1570 for (; merged_insns; merged_insns = XEXP (merged_insns, 1))
1571 {
1572 if (GET_MODE (merged_insns) == SImode)
1573 {
1574 rtx new_rtx;
1575
1576 update_block (XEXP (merged_insns, 0), thread);
1577 new_rtx = delete_from_delay_slot (XEXP (merged_insns, 0));
1578 if (INSN_DELETED_P (thread))
1579 thread = new_rtx;
1580 }
1581 else
1582 {
1583 update_block (XEXP (merged_insns, 0), thread);
1584 delete_related_insns (XEXP (merged_insns, 0));
1585 }
1586 }
1587
1588 INSN_ANNULLED_BRANCH_P (delay_insn) = 0;
1589
1590 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1591 INSN_FROM_TARGET_P (XVECEXP (PATTERN (insn), 0, i)) = 0;
1592 }
1593 }
1594 \f
1595 /* See if INSN is redundant with an insn in front of TARGET. Often this
1596 is called when INSN is a candidate for a delay slot of TARGET.
1597 DELAY_LIST are insns that will be placed in delay slots of TARGET in front
1598 of INSN. Often INSN will be redundant with an insn in a delay slot of
1599 some previous insn. This happens when we have a series of branches to the
1600 same label; in that case the first insn at the target might want to go
1601 into each of the delay slots.
1602
1603 If we are not careful, this routine can take up a significant fraction
1604 of the total compilation time (4%), but only wins rarely. Hence we
1605 speed this routine up by making two passes. The first pass goes back
1606 until it hits a label and sees if it finds an insn with an identical
1607 pattern. Only in this (relatively rare) event does it check for
1608 data conflicts.
1609
1610 We do not split insns we encounter. This could cause us not to find a
1611 redundant insn, but the cost of splitting seems greater than the possible
1612 gain in rare cases. */
1613
1614 static rtx
1615 redundant_insn (rtx insn, rtx target, rtx delay_list)
1616 {
1617 rtx target_main = target;
1618 rtx ipat = PATTERN (insn);
1619 rtx trial, pat;
1620 struct resources needed, set;
1621 int i;
1622 unsigned insns_to_search;
1623
1624 /* If INSN has any REG_UNUSED notes, it can't match anything since we
1625 are allowed to not actually assign to such a register. */
1626 if (find_reg_note (insn, REG_UNUSED, NULL_RTX) != 0)
1627 return 0;
1628
1629 /* Scan backwards looking for a match. */
1630 for (trial = PREV_INSN (target),
1631 insns_to_search = MAX_DELAY_SLOT_INSN_SEARCH;
1632 trial && insns_to_search > 0;
1633 trial = PREV_INSN (trial))
1634 {
1635 if (LABEL_P (trial))
1636 return 0;
1637
1638 if (!NONDEBUG_INSN_P (trial))
1639 continue;
1640 --insns_to_search;
1641
1642 pat = PATTERN (trial);
1643 if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
1644 continue;
1645
1646 if (GET_CODE (pat) == SEQUENCE)
1647 {
1648 /* Stop for a CALL and its delay slots because it is difficult to
1649 track its resource needs correctly. */
1650 if (CALL_P (XVECEXP (pat, 0, 0)))
1651 return 0;
1652
1653 /* Stop for an INSN or JUMP_INSN with delayed effects and its delay
1654 slots because it is difficult to track its resource needs
1655 correctly. */
1656
1657 #ifdef INSN_SETS_ARE_DELAYED
1658 if (INSN_SETS_ARE_DELAYED (XVECEXP (pat, 0, 0)))
1659 return 0;
1660 #endif
1661
1662 #ifdef INSN_REFERENCES_ARE_DELAYED
1663 if (INSN_REFERENCES_ARE_DELAYED (XVECEXP (pat, 0, 0)))
1664 return 0;
1665 #endif
1666
1667 /* See if any of the insns in the delay slot match, updating
1668 resource requirements as we go. */
1669 for (i = XVECLEN (pat, 0) - 1; i > 0; i--)
1670 if (GET_CODE (XVECEXP (pat, 0, i)) == GET_CODE (insn)
1671 && rtx_equal_p (PATTERN (XVECEXP (pat, 0, i)), ipat)
1672 && ! find_reg_note (XVECEXP (pat, 0, i), REG_UNUSED, NULL_RTX))
1673 break;
1674
1675 /* If found a match, exit this loop early. */
1676 if (i > 0)
1677 break;
1678 }
1679
1680 else if (GET_CODE (trial) == GET_CODE (insn) && rtx_equal_p (pat, ipat)
1681 && ! find_reg_note (trial, REG_UNUSED, NULL_RTX))
1682 break;
1683 }
1684
1685 /* If we didn't find an insn that matches, return 0. */
1686 if (trial == 0)
1687 return 0;
1688
1689 /* See what resources this insn sets and needs. If they overlap, or
1690 if this insn references CC0, it can't be redundant. */
1691
1692 CLEAR_RESOURCE (&needed);
1693 CLEAR_RESOURCE (&set);
1694 mark_set_resources (insn, &set, 0, MARK_SRC_DEST_CALL);
1695 mark_referenced_resources (insn, &needed, true);
1696
1697 /* If TARGET is a SEQUENCE, get the main insn. */
1698 if (NONJUMP_INSN_P (target) && GET_CODE (PATTERN (target)) == SEQUENCE)
1699 target_main = XVECEXP (PATTERN (target), 0, 0);
1700
1701 if (resource_conflicts_p (&needed, &set)
1702 #ifdef HAVE_cc0
1703 || reg_mentioned_p (cc0_rtx, ipat)
1704 #endif
1705 /* The insn requiring the delay may not set anything needed or set by
1706 INSN. */
1707 || insn_sets_resource_p (target_main, &needed, true)
1708 || insn_sets_resource_p (target_main, &set, true))
1709 return 0;
1710
1711 /* Insns we pass may not set either NEEDED or SET, so merge them for
1712 simpler tests. */
1713 needed.memory |= set.memory;
1714 needed.unch_memory |= set.unch_memory;
1715 IOR_HARD_REG_SET (needed.regs, set.regs);
1716
1717 /* This insn isn't redundant if it conflicts with an insn that either is
1718 or will be in a delay slot of TARGET. */
1719
1720 while (delay_list)
1721 {
1722 if (insn_sets_resource_p (XEXP (delay_list, 0), &needed, true))
1723 return 0;
1724 delay_list = XEXP (delay_list, 1);
1725 }
1726
1727 if (NONJUMP_INSN_P (target) && GET_CODE (PATTERN (target)) == SEQUENCE)
1728 for (i = 1; i < XVECLEN (PATTERN (target), 0); i++)
1729 if (insn_sets_resource_p (XVECEXP (PATTERN (target), 0, i), &needed,
1730 true))
1731 return 0;
1732
1733 /* Scan backwards until we reach a label or an insn that uses something
1734 INSN sets or sets something insn uses or sets. */
1735
1736 for (trial = PREV_INSN (target),
1737 insns_to_search = MAX_DELAY_SLOT_INSN_SEARCH;
1738 trial && !LABEL_P (trial) && insns_to_search > 0;
1739 trial = PREV_INSN (trial))
1740 {
1741 if (!NONDEBUG_INSN_P (trial))
1742 continue;
1743 --insns_to_search;
1744
1745 pat = PATTERN (trial);
1746 if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
1747 continue;
1748
1749 if (GET_CODE (pat) == SEQUENCE)
1750 {
1751 /* If this is a CALL_INSN and its delay slots, it is hard to track
1752 the resource needs properly, so give up. */
1753 if (CALL_P (XVECEXP (pat, 0, 0)))
1754 return 0;
1755
1756 /* If this is an INSN or JUMP_INSN with delayed effects, it
1757 is hard to track the resource needs properly, so give up. */
1758
1759 #ifdef INSN_SETS_ARE_DELAYED
1760 if (INSN_SETS_ARE_DELAYED (XVECEXP (pat, 0, 0)))
1761 return 0;
1762 #endif
1763
1764 #ifdef INSN_REFERENCES_ARE_DELAYED
1765 if (INSN_REFERENCES_ARE_DELAYED (XVECEXP (pat, 0, 0)))
1766 return 0;
1767 #endif
1768
1769 /* See if any of the insns in the delay slot match, updating
1770 resource requirements as we go. */
1771 for (i = XVECLEN (pat, 0) - 1; i > 0; i--)
1772 {
1773 rtx candidate = XVECEXP (pat, 0, i);
1774
1775 /* If an insn will be annulled if the branch is false, it isn't
1776 considered as a possible duplicate insn. */
1777 if (rtx_equal_p (PATTERN (candidate), ipat)
1778 && ! (INSN_ANNULLED_BRANCH_P (XVECEXP (pat, 0, 0))
1779 && INSN_FROM_TARGET_P (candidate)))
1780 {
1781 /* Show that this insn will be used in the sequel. */
1782 INSN_FROM_TARGET_P (candidate) = 0;
1783 return candidate;
1784 }
1785
1786 /* Unless this is an annulled insn from the target of a branch,
1787 we must stop if it sets anything needed or set by INSN. */
1788 if ((! INSN_ANNULLED_BRANCH_P (XVECEXP (pat, 0, 0))
1789 || ! INSN_FROM_TARGET_P (candidate))
1790 && insn_sets_resource_p (candidate, &needed, true))
1791 return 0;
1792 }
1793
1794 /* If the insn requiring the delay slot conflicts with INSN, we
1795 must stop. */
1796 if (insn_sets_resource_p (XVECEXP (pat, 0, 0), &needed, true))
1797 return 0;
1798 }
1799 else
1800 {
1801 /* See if TRIAL is the same as INSN. */
1802 pat = PATTERN (trial);
1803 if (rtx_equal_p (pat, ipat))
1804 return trial;
1805
1806 /* Can't go any further if TRIAL conflicts with INSN. */
1807 if (insn_sets_resource_p (trial, &needed, true))
1808 return 0;
1809 }
1810 }
1811
1812 return 0;
1813 }
1814 \f
1815 /* Return 1 if THREAD can only be executed in one way. If LABEL is nonzero,
1816 it is the target of the branch insn being scanned. If ALLOW_FALLTHROUGH
1817 is nonzero, we are allowed to fall into this thread; otherwise, we are
1818 not.
1819
1820 If LABEL is used more than one or we pass a label other than LABEL before
1821 finding an active insn, we do not own this thread. */
1822
1823 static int
1824 own_thread_p (rtx thread, rtx label, int allow_fallthrough)
1825 {
1826 rtx active_insn;
1827 rtx insn;
1828
1829 /* We don't own the function end. */
1830 if (thread == 0)
1831 return 0;
1832
1833 /* Get the first active insn, or THREAD, if it is an active insn. */
1834 active_insn = next_active_insn (PREV_INSN (thread));
1835
1836 for (insn = thread; insn != active_insn; insn = NEXT_INSN (insn))
1837 if (LABEL_P (insn)
1838 && (insn != label || LABEL_NUSES (insn) != 1))
1839 return 0;
1840
1841 if (allow_fallthrough)
1842 return 1;
1843
1844 /* Ensure that we reach a BARRIER before any insn or label. */
1845 for (insn = prev_nonnote_insn (thread);
1846 insn == 0 || !BARRIER_P (insn);
1847 insn = prev_nonnote_insn (insn))
1848 if (insn == 0
1849 || LABEL_P (insn)
1850 || (NONJUMP_INSN_P (insn)
1851 && GET_CODE (PATTERN (insn)) != USE
1852 && GET_CODE (PATTERN (insn)) != CLOBBER))
1853 return 0;
1854
1855 return 1;
1856 }
1857 \f
1858 /* Called when INSN is being moved from a location near the target of a jump.
1859 We leave a marker of the form (use (INSN)) immediately in front
1860 of WHERE for mark_target_live_regs. These markers will be deleted when
1861 reorg finishes.
1862
1863 We used to try to update the live status of registers if WHERE is at
1864 the start of a basic block, but that can't work since we may remove a
1865 BARRIER in relax_delay_slots. */
1866
1867 static void
1868 update_block (rtx insn, rtx where)
1869 {
1870 /* Ignore if this was in a delay slot and it came from the target of
1871 a branch. */
1872 if (INSN_FROM_TARGET_P (insn))
1873 return;
1874
1875 emit_insn_before (gen_rtx_USE (VOIDmode, insn), where);
1876
1877 /* INSN might be making a value live in a block where it didn't use to
1878 be. So recompute liveness information for this block. */
1879
1880 incr_ticks_for_insn (insn);
1881 }
1882
1883 /* Similar to REDIRECT_JUMP except that we update the BB_TICKS entry for
1884 the basic block containing the jump. */
1885
1886 static int
1887 reorg_redirect_jump (rtx jump, rtx nlabel)
1888 {
1889 incr_ticks_for_insn (jump);
1890 return redirect_jump (jump, nlabel, 1);
1891 }
1892
1893 /* Called when INSN is being moved forward into a delay slot of DELAYED_INSN.
1894 We check every instruction between INSN and DELAYED_INSN for REG_DEAD notes
1895 that reference values used in INSN. If we find one, then we move the
1896 REG_DEAD note to INSN.
1897
1898 This is needed to handle the case where a later insn (after INSN) has a
1899 REG_DEAD note for a register used by INSN, and this later insn subsequently
1900 gets moved before a CODE_LABEL because it is a redundant insn. In this
1901 case, mark_target_live_regs may be confused into thinking the register
1902 is dead because it sees a REG_DEAD note immediately before a CODE_LABEL. */
1903
1904 static void
1905 update_reg_dead_notes (rtx insn, rtx delayed_insn)
1906 {
1907 rtx p, link, next;
1908
1909 for (p = next_nonnote_insn (insn); p != delayed_insn;
1910 p = next_nonnote_insn (p))
1911 for (link = REG_NOTES (p); link; link = next)
1912 {
1913 next = XEXP (link, 1);
1914
1915 if (REG_NOTE_KIND (link) != REG_DEAD
1916 || !REG_P (XEXP (link, 0)))
1917 continue;
1918
1919 if (reg_referenced_p (XEXP (link, 0), PATTERN (insn)))
1920 {
1921 /* Move the REG_DEAD note from P to INSN. */
1922 remove_note (p, link);
1923 XEXP (link, 1) = REG_NOTES (insn);
1924 REG_NOTES (insn) = link;
1925 }
1926 }
1927 }
1928
1929 /* Called when an insn redundant with start_insn is deleted. If there
1930 is a REG_DEAD note for the target of start_insn between start_insn
1931 and stop_insn, then the REG_DEAD note needs to be deleted since the
1932 value no longer dies there.
1933
1934 If the REG_DEAD note isn't deleted, then mark_target_live_regs may be
1935 confused into thinking the register is dead. */
1936
1937 static void
1938 fix_reg_dead_note (rtx start_insn, rtx stop_insn)
1939 {
1940 rtx p, link, next;
1941
1942 for (p = next_nonnote_insn (start_insn); p != stop_insn;
1943 p = next_nonnote_insn (p))
1944 for (link = REG_NOTES (p); link; link = next)
1945 {
1946 next = XEXP (link, 1);
1947
1948 if (REG_NOTE_KIND (link) != REG_DEAD
1949 || !REG_P (XEXP (link, 0)))
1950 continue;
1951
1952 if (reg_set_p (XEXP (link, 0), PATTERN (start_insn)))
1953 {
1954 remove_note (p, link);
1955 return;
1956 }
1957 }
1958 }
1959
1960 /* Delete any REG_UNUSED notes that exist on INSN but not on REDUNDANT_INSN.
1961
1962 This handles the case of udivmodXi4 instructions which optimize their
1963 output depending on whether any REG_UNUSED notes are present.
1964 we must make sure that INSN calculates as many results as REDUNDANT_INSN
1965 does. */
1966
1967 static void
1968 update_reg_unused_notes (rtx insn, rtx redundant_insn)
1969 {
1970 rtx link, next;
1971
1972 for (link = REG_NOTES (insn); link; link = next)
1973 {
1974 next = XEXP (link, 1);
1975
1976 if (REG_NOTE_KIND (link) != REG_UNUSED
1977 || !REG_P (XEXP (link, 0)))
1978 continue;
1979
1980 if (! find_regno_note (redundant_insn, REG_UNUSED,
1981 REGNO (XEXP (link, 0))))
1982 remove_note (insn, link);
1983 }
1984 }
1985 \f
1986 /* Return the label before INSN, or put a new label there. */
1987
1988 static rtx
1989 get_label_before (rtx insn)
1990 {
1991 rtx label;
1992
1993 /* Find an existing label at this point
1994 or make a new one if there is none. */
1995 label = prev_nonnote_insn (insn);
1996
1997 if (label == 0 || !LABEL_P (label))
1998 {
1999 rtx prev = PREV_INSN (insn);
2000
2001 label = gen_label_rtx ();
2002 emit_label_after (label, prev);
2003 LABEL_NUSES (label) = 0;
2004 }
2005 return label;
2006 }
2007
2008 /* Scan a function looking for insns that need a delay slot and find insns to
2009 put into the delay slot.
2010
2011 NON_JUMPS_P is nonzero if we are to only try to fill non-jump insns (such
2012 as calls). We do these first since we don't want jump insns (that are
2013 easier to fill) to get the only insns that could be used for non-jump insns.
2014 When it is zero, only try to fill JUMP_INSNs.
2015
2016 When slots are filled in this manner, the insns (including the
2017 delay_insn) are put together in a SEQUENCE rtx. In this fashion,
2018 it is possible to tell whether a delay slot has really been filled
2019 or not. `final' knows how to deal with this, by communicating
2020 through FINAL_SEQUENCE. */
2021
2022 static void
2023 fill_simple_delay_slots (int non_jumps_p)
2024 {
2025 rtx insn, pat, trial, next_trial;
2026 int i;
2027 int num_unfilled_slots = unfilled_slots_next - unfilled_slots_base;
2028 struct resources needed, set;
2029 int slots_to_fill, slots_filled;
2030 rtx delay_list;
2031
2032 for (i = 0; i < num_unfilled_slots; i++)
2033 {
2034 int flags;
2035 /* Get the next insn to fill. If it has already had any slots assigned,
2036 we can't do anything with it. Maybe we'll improve this later. */
2037
2038 insn = unfilled_slots_base[i];
2039 if (insn == 0
2040 || INSN_DELETED_P (insn)
2041 || (NONJUMP_INSN_P (insn)
2042 && GET_CODE (PATTERN (insn)) == SEQUENCE)
2043 || (JUMP_P (insn) && non_jumps_p)
2044 || (!JUMP_P (insn) && ! non_jumps_p))
2045 continue;
2046
2047 /* It may have been that this insn used to need delay slots, but
2048 now doesn't; ignore in that case. This can happen, for example,
2049 on the HP PA RISC, where the number of delay slots depends on
2050 what insns are nearby. */
2051 slots_to_fill = num_delay_slots (insn);
2052
2053 /* Some machine description have defined instructions to have
2054 delay slots only in certain circumstances which may depend on
2055 nearby insns (which change due to reorg's actions).
2056
2057 For example, the PA port normally has delay slots for unconditional
2058 jumps.
2059
2060 However, the PA port claims such jumps do not have a delay slot
2061 if they are immediate successors of certain CALL_INSNs. This
2062 allows the port to favor filling the delay slot of the call with
2063 the unconditional jump. */
2064 if (slots_to_fill == 0)
2065 continue;
2066
2067 /* This insn needs, or can use, some delay slots. SLOTS_TO_FILL
2068 says how many. After initialization, first try optimizing
2069
2070 call _foo call _foo
2071 nop add %o7,.-L1,%o7
2072 b,a L1
2073 nop
2074
2075 If this case applies, the delay slot of the call is filled with
2076 the unconditional jump. This is done first to avoid having the
2077 delay slot of the call filled in the backward scan. Also, since
2078 the unconditional jump is likely to also have a delay slot, that
2079 insn must exist when it is subsequently scanned.
2080
2081 This is tried on each insn with delay slots as some machines
2082 have insns which perform calls, but are not represented as
2083 CALL_INSNs. */
2084
2085 slots_filled = 0;
2086 delay_list = 0;
2087
2088 if (JUMP_P (insn))
2089 flags = get_jump_flags (insn, JUMP_LABEL (insn));
2090 else
2091 flags = get_jump_flags (insn, NULL_RTX);
2092
2093 if ((trial = next_active_insn (insn))
2094 && JUMP_P (trial)
2095 && simplejump_p (trial)
2096 && eligible_for_delay (insn, slots_filled, trial, flags)
2097 && no_labels_between_p (insn, trial)
2098 && ! can_throw_internal (trial))
2099 {
2100 rtx *tmp;
2101 slots_filled++;
2102 delay_list = add_to_delay_list (trial, delay_list);
2103
2104 /* TRIAL may have had its delay slot filled, then unfilled. When
2105 the delay slot is unfilled, TRIAL is placed back on the unfilled
2106 slots obstack. Unfortunately, it is placed on the end of the
2107 obstack, not in its original location. Therefore, we must search
2108 from entry i + 1 to the end of the unfilled slots obstack to
2109 try and find TRIAL. */
2110 tmp = &unfilled_slots_base[i + 1];
2111 while (*tmp != trial && tmp != unfilled_slots_next)
2112 tmp++;
2113
2114 /* Remove the unconditional jump from consideration for delay slot
2115 filling and unthread it. */
2116 if (*tmp == trial)
2117 *tmp = 0;
2118 {
2119 rtx next = NEXT_INSN (trial);
2120 rtx prev = PREV_INSN (trial);
2121 if (prev)
2122 NEXT_INSN (prev) = next;
2123 if (next)
2124 PREV_INSN (next) = prev;
2125 }
2126 }
2127
2128 /* Now, scan backwards from the insn to search for a potential
2129 delay-slot candidate. Stop searching when a label or jump is hit.
2130
2131 For each candidate, if it is to go into the delay slot (moved
2132 forward in execution sequence), it must not need or set any resources
2133 that were set by later insns and must not set any resources that
2134 are needed for those insns.
2135
2136 The delay slot insn itself sets resources unless it is a call
2137 (in which case the called routine, not the insn itself, is doing
2138 the setting). */
2139
2140 if (slots_filled < slots_to_fill)
2141 {
2142 CLEAR_RESOURCE (&needed);
2143 CLEAR_RESOURCE (&set);
2144 mark_set_resources (insn, &set, 0, MARK_SRC_DEST);
2145 mark_referenced_resources (insn, &needed, false);
2146
2147 for (trial = prev_nonnote_insn (insn); ! stop_search_p (trial, 1);
2148 trial = next_trial)
2149 {
2150 next_trial = prev_nonnote_insn (trial);
2151
2152 /* This must be an INSN or CALL_INSN. */
2153 pat = PATTERN (trial);
2154
2155 /* USE and CLOBBER at this level was just for flow; ignore it. */
2156 if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
2157 continue;
2158
2159 /* Check for resource conflict first, to avoid unnecessary
2160 splitting. */
2161 if (! insn_references_resource_p (trial, &set, true)
2162 && ! insn_sets_resource_p (trial, &set, true)
2163 && ! insn_sets_resource_p (trial, &needed, true)
2164 #ifdef HAVE_cc0
2165 /* Can't separate set of cc0 from its use. */
2166 && ! (reg_mentioned_p (cc0_rtx, pat) && ! sets_cc0_p (pat))
2167 #endif
2168 && ! can_throw_internal (trial))
2169 {
2170 trial = try_split (pat, trial, 1);
2171 next_trial = prev_nonnote_insn (trial);
2172 if (eligible_for_delay (insn, slots_filled, trial, flags))
2173 {
2174 /* In this case, we are searching backward, so if we
2175 find insns to put on the delay list, we want
2176 to put them at the head, rather than the
2177 tail, of the list. */
2178
2179 update_reg_dead_notes (trial, insn);
2180 delay_list = gen_rtx_INSN_LIST (VOIDmode,
2181 trial, delay_list);
2182 update_block (trial, trial);
2183 delete_related_insns (trial);
2184 if (slots_to_fill == ++slots_filled)
2185 break;
2186 continue;
2187 }
2188 }
2189
2190 mark_set_resources (trial, &set, 0, MARK_SRC_DEST_CALL);
2191 mark_referenced_resources (trial, &needed, true);
2192 }
2193 }
2194
2195 /* If all needed slots haven't been filled, we come here. */
2196
2197 /* Try to optimize case of jumping around a single insn. */
2198 #if defined(ANNUL_IFFALSE_SLOTS) || defined(ANNUL_IFTRUE_SLOTS)
2199 if (slots_filled != slots_to_fill
2200 && delay_list == 0
2201 && JUMP_P (insn)
2202 && (condjump_p (insn) || condjump_in_parallel_p (insn)))
2203 {
2204 delay_list = optimize_skip (insn);
2205 if (delay_list)
2206 slots_filled += 1;
2207 }
2208 #endif
2209
2210 /* Try to get insns from beyond the insn needing the delay slot.
2211 These insns can neither set or reference resources set in insns being
2212 skipped, cannot set resources in the insn being skipped, and, if this
2213 is a CALL_INSN (or a CALL_INSN is passed), cannot trap (because the
2214 call might not return).
2215
2216 There used to be code which continued past the target label if
2217 we saw all uses of the target label. This code did not work,
2218 because it failed to account for some instructions which were
2219 both annulled and marked as from the target. This can happen as a
2220 result of optimize_skip. Since this code was redundant with
2221 fill_eager_delay_slots anyways, it was just deleted. */
2222
2223 if (slots_filled != slots_to_fill
2224 /* If this instruction could throw an exception which is
2225 caught in the same function, then it's not safe to fill
2226 the delay slot with an instruction from beyond this
2227 point. For example, consider:
2228
2229 int i = 2;
2230
2231 try {
2232 f();
2233 i = 3;
2234 } catch (...) {}
2235
2236 return i;
2237
2238 Even though `i' is a local variable, we must be sure not
2239 to put `i = 3' in the delay slot if `f' might throw an
2240 exception.
2241
2242 Presumably, we should also check to see if we could get
2243 back to this function via `setjmp'. */
2244 && ! can_throw_internal (insn)
2245 && (!JUMP_P (insn)
2246 || ((condjump_p (insn) || condjump_in_parallel_p (insn))
2247 && ! simplejump_p (insn)
2248 && JUMP_LABEL (insn) != 0)))
2249 {
2250 /* Invariant: If insn is a JUMP_INSN, the insn's jump
2251 label. Otherwise, zero. */
2252 rtx target = 0;
2253 int maybe_never = 0;
2254 rtx pat, trial_delay;
2255
2256 CLEAR_RESOURCE (&needed);
2257 CLEAR_RESOURCE (&set);
2258
2259 if (CALL_P (insn))
2260 {
2261 mark_set_resources (insn, &set, 0, MARK_SRC_DEST_CALL);
2262 mark_referenced_resources (insn, &needed, true);
2263 maybe_never = 1;
2264 }
2265 else
2266 {
2267 mark_set_resources (insn, &set, 0, MARK_SRC_DEST_CALL);
2268 mark_referenced_resources (insn, &needed, true);
2269 if (JUMP_P (insn))
2270 target = JUMP_LABEL (insn);
2271 }
2272
2273 if (target == 0)
2274 for (trial = next_nonnote_insn (insn); trial; trial = next_trial)
2275 {
2276 next_trial = next_nonnote_insn (trial);
2277
2278 if (LABEL_P (trial)
2279 || BARRIER_P (trial))
2280 break;
2281
2282 /* We must have an INSN, JUMP_INSN, or CALL_INSN. */
2283 pat = PATTERN (trial);
2284
2285 /* Stand-alone USE and CLOBBER are just for flow. */
2286 if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
2287 continue;
2288
2289 /* If this already has filled delay slots, get the insn needing
2290 the delay slots. */
2291 if (GET_CODE (pat) == SEQUENCE)
2292 trial_delay = XVECEXP (pat, 0, 0);
2293 else
2294 trial_delay = trial;
2295
2296 /* Stop our search when seeing an unconditional jump. */
2297 if (JUMP_P (trial_delay))
2298 break;
2299
2300 /* See if we have a resource problem before we try to
2301 split. */
2302 if (GET_CODE (pat) != SEQUENCE
2303 && ! insn_references_resource_p (trial, &set, true)
2304 && ! insn_sets_resource_p (trial, &set, true)
2305 && ! insn_sets_resource_p (trial, &needed, true)
2306 #ifdef HAVE_cc0
2307 && ! (reg_mentioned_p (cc0_rtx, pat) && ! sets_cc0_p (pat))
2308 #endif
2309 && ! (maybe_never && may_trap_or_fault_p (pat))
2310 && (trial = try_split (pat, trial, 0))
2311 && eligible_for_delay (insn, slots_filled, trial, flags)
2312 && ! can_throw_internal(trial))
2313 {
2314 next_trial = next_nonnote_insn (trial);
2315 delay_list = add_to_delay_list (trial, delay_list);
2316
2317 #ifdef HAVE_cc0
2318 if (reg_mentioned_p (cc0_rtx, pat))
2319 link_cc0_insns (trial);
2320 #endif
2321
2322 delete_related_insns (trial);
2323 if (slots_to_fill == ++slots_filled)
2324 break;
2325 continue;
2326 }
2327
2328 mark_set_resources (trial, &set, 0, MARK_SRC_DEST_CALL);
2329 mark_referenced_resources (trial, &needed, true);
2330
2331 /* Ensure we don't put insns between the setting of cc and the
2332 comparison by moving a setting of cc into an earlier delay
2333 slot since these insns could clobber the condition code. */
2334 set.cc = 1;
2335
2336 /* If this is a call or jump, we might not get here. */
2337 if (CALL_P (trial_delay)
2338 || JUMP_P (trial_delay))
2339 maybe_never = 1;
2340 }
2341
2342 /* If there are slots left to fill and our search was stopped by an
2343 unconditional branch, try the insn at the branch target. We can
2344 redirect the branch if it works.
2345
2346 Don't do this if the insn at the branch target is a branch. */
2347 if (slots_to_fill != slots_filled
2348 && trial
2349 && JUMP_P (trial)
2350 && simplejump_p (trial)
2351 && (target == 0 || JUMP_LABEL (trial) == target)
2352 && (next_trial = next_active_insn (JUMP_LABEL (trial))) != 0
2353 && ! (NONJUMP_INSN_P (next_trial)
2354 && GET_CODE (PATTERN (next_trial)) == SEQUENCE)
2355 && !JUMP_P (next_trial)
2356 && ! insn_references_resource_p (next_trial, &set, true)
2357 && ! insn_sets_resource_p (next_trial, &set, true)
2358 && ! insn_sets_resource_p (next_trial, &needed, true)
2359 #ifdef HAVE_cc0
2360 && ! reg_mentioned_p (cc0_rtx, PATTERN (next_trial))
2361 #endif
2362 && ! (maybe_never && may_trap_or_fault_p (PATTERN (next_trial)))
2363 && (next_trial = try_split (PATTERN (next_trial), next_trial, 0))
2364 && eligible_for_delay (insn, slots_filled, next_trial, flags)
2365 && ! can_throw_internal (trial))
2366 {
2367 /* See comment in relax_delay_slots about necessity of using
2368 next_real_insn here. */
2369 rtx new_label = next_real_insn (next_trial);
2370
2371 if (new_label != 0)
2372 new_label = get_label_before (new_label);
2373 else
2374 new_label = find_end_label ();
2375
2376 if (new_label)
2377 {
2378 delay_list
2379 = add_to_delay_list (copy_rtx (next_trial), delay_list);
2380 slots_filled++;
2381 reorg_redirect_jump (trial, new_label);
2382
2383 /* If we merged because we both jumped to the same place,
2384 redirect the original insn also. */
2385 if (target)
2386 reorg_redirect_jump (insn, new_label);
2387 }
2388 }
2389 }
2390
2391 /* If this is an unconditional jump, then try to get insns from the
2392 target of the jump. */
2393 if (JUMP_P (insn)
2394 && simplejump_p (insn)
2395 && slots_filled != slots_to_fill)
2396 delay_list
2397 = fill_slots_from_thread (insn, const_true_rtx,
2398 next_active_insn (JUMP_LABEL (insn)),
2399 NULL, 1, 1,
2400 own_thread_p (JUMP_LABEL (insn),
2401 JUMP_LABEL (insn), 0),
2402 slots_to_fill, &slots_filled,
2403 delay_list);
2404
2405 if (delay_list)
2406 unfilled_slots_base[i]
2407 = emit_delay_sequence (insn, delay_list, slots_filled);
2408
2409 if (slots_to_fill == slots_filled)
2410 unfilled_slots_base[i] = 0;
2411
2412 note_delay_statistics (slots_filled, 0);
2413 }
2414
2415 #ifdef DELAY_SLOTS_FOR_EPILOGUE
2416 /* See if the epilogue needs any delay slots. Try to fill them if so.
2417 The only thing we can do is scan backwards from the end of the
2418 function. If we did this in a previous pass, it is incorrect to do it
2419 again. */
2420 if (crtl->epilogue_delay_list)
2421 return;
2422
2423 slots_to_fill = DELAY_SLOTS_FOR_EPILOGUE;
2424 if (slots_to_fill == 0)
2425 return;
2426
2427 slots_filled = 0;
2428 CLEAR_RESOURCE (&set);
2429
2430 /* The frame pointer and stack pointer are needed at the beginning of
2431 the epilogue, so instructions setting them can not be put in the
2432 epilogue delay slot. However, everything else needed at function
2433 end is safe, so we don't want to use end_of_function_needs here. */
2434 CLEAR_RESOURCE (&needed);
2435 if (frame_pointer_needed)
2436 {
2437 SET_HARD_REG_BIT (needed.regs, FRAME_POINTER_REGNUM);
2438 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2439 SET_HARD_REG_BIT (needed.regs, HARD_FRAME_POINTER_REGNUM);
2440 #endif
2441 if (! EXIT_IGNORE_STACK
2442 || current_function_sp_is_unchanging)
2443 SET_HARD_REG_BIT (needed.regs, STACK_POINTER_REGNUM);
2444 }
2445 else
2446 SET_HARD_REG_BIT (needed.regs, STACK_POINTER_REGNUM);
2447
2448 #ifdef EPILOGUE_USES
2449 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2450 {
2451 if (EPILOGUE_USES (i))
2452 SET_HARD_REG_BIT (needed.regs, i);
2453 }
2454 #endif
2455
2456 for (trial = get_last_insn (); ! stop_search_p (trial, 1);
2457 trial = PREV_INSN (trial))
2458 {
2459 if (NOTE_P (trial))
2460 continue;
2461 pat = PATTERN (trial);
2462 if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
2463 continue;
2464
2465 if (! insn_references_resource_p (trial, &set, true)
2466 && ! insn_sets_resource_p (trial, &needed, true)
2467 && ! insn_sets_resource_p (trial, &set, true)
2468 #ifdef HAVE_cc0
2469 /* Don't want to mess with cc0 here. */
2470 && ! reg_mentioned_p (cc0_rtx, pat)
2471 #endif
2472 && ! can_throw_internal (trial))
2473 {
2474 trial = try_split (pat, trial, 1);
2475 if (ELIGIBLE_FOR_EPILOGUE_DELAY (trial, slots_filled))
2476 {
2477 /* Here as well we are searching backward, so put the
2478 insns we find on the head of the list. */
2479
2480 crtl->epilogue_delay_list
2481 = gen_rtx_INSN_LIST (VOIDmode, trial,
2482 crtl->epilogue_delay_list);
2483 mark_end_of_function_resources (trial, true);
2484 update_block (trial, trial);
2485 delete_related_insns (trial);
2486
2487 /* Clear deleted bit so final.c will output the insn. */
2488 INSN_DELETED_P (trial) = 0;
2489
2490 if (slots_to_fill == ++slots_filled)
2491 break;
2492 continue;
2493 }
2494 }
2495
2496 mark_set_resources (trial, &set, 0, MARK_SRC_DEST_CALL);
2497 mark_referenced_resources (trial, &needed, true);
2498 }
2499
2500 note_delay_statistics (slots_filled, 0);
2501 #endif
2502 }
2503 \f
2504 /* Follow any unconditional jump at LABEL;
2505 return the ultimate label reached by any such chain of jumps.
2506 Return null if the chain ultimately leads to a return instruction.
2507 If LABEL is not followed by a jump, return LABEL.
2508 If the chain loops or we can't find end, return LABEL,
2509 since that tells caller to avoid changing the insn. */
2510
2511 static rtx
2512 follow_jumps (rtx label)
2513 {
2514 rtx insn;
2515 rtx next;
2516 rtx value = label;
2517 int depth;
2518
2519 for (depth = 0;
2520 (depth < 10
2521 && (insn = next_active_insn (value)) != 0
2522 && JUMP_P (insn)
2523 && ((JUMP_LABEL (insn) != 0 && any_uncondjump_p (insn)
2524 && onlyjump_p (insn))
2525 || GET_CODE (PATTERN (insn)) == RETURN)
2526 && (next = NEXT_INSN (insn))
2527 && BARRIER_P (next));
2528 depth++)
2529 {
2530 rtx tem;
2531
2532 /* If we have found a cycle, make the insn jump to itself. */
2533 if (JUMP_LABEL (insn) == label)
2534 return label;
2535
2536 tem = next_active_insn (JUMP_LABEL (insn));
2537 if (tem && (GET_CODE (PATTERN (tem)) == ADDR_VEC
2538 || GET_CODE (PATTERN (tem)) == ADDR_DIFF_VEC))
2539 break;
2540
2541 value = JUMP_LABEL (insn);
2542 }
2543 if (depth == 10)
2544 return label;
2545 return value;
2546 }
2547
2548 /* Try to find insns to place in delay slots.
2549
2550 INSN is the jump needing SLOTS_TO_FILL delay slots. It tests CONDITION
2551 or is an unconditional branch if CONDITION is const_true_rtx.
2552 *PSLOTS_FILLED is updated with the number of slots that we have filled.
2553
2554 THREAD is a flow-of-control, either the insns to be executed if the
2555 branch is true or if the branch is false, THREAD_IF_TRUE says which.
2556
2557 OPPOSITE_THREAD is the thread in the opposite direction. It is used
2558 to see if any potential delay slot insns set things needed there.
2559
2560 LIKELY is nonzero if it is extremely likely that the branch will be
2561 taken and THREAD_IF_TRUE is set. This is used for the branch at the
2562 end of a loop back up to the top.
2563
2564 OWN_THREAD and OWN_OPPOSITE_THREAD are true if we are the only user of the
2565 thread. I.e., it is the fallthrough code of our jump or the target of the
2566 jump when we are the only jump going there.
2567
2568 If OWN_THREAD is false, it must be the "true" thread of a jump. In that
2569 case, we can only take insns from the head of the thread for our delay
2570 slot. We then adjust the jump to point after the insns we have taken. */
2571
2572 static rtx
2573 fill_slots_from_thread (rtx insn, rtx condition, rtx thread,
2574 rtx opposite_thread, int likely, int thread_if_true,
2575 int own_thread, int slots_to_fill,
2576 int *pslots_filled, rtx delay_list)
2577 {
2578 rtx new_thread;
2579 struct resources opposite_needed, set, needed;
2580 rtx trial;
2581 int lose = 0;
2582 int must_annul = 0;
2583 int flags;
2584
2585 /* Validate our arguments. */
2586 gcc_assert(condition != const_true_rtx || thread_if_true);
2587 gcc_assert(own_thread || thread_if_true);
2588
2589 flags = get_jump_flags (insn, JUMP_LABEL (insn));
2590
2591 /* If our thread is the end of subroutine, we can't get any delay
2592 insns from that. */
2593 if (thread == 0)
2594 return delay_list;
2595
2596 /* If this is an unconditional branch, nothing is needed at the
2597 opposite thread. Otherwise, compute what is needed there. */
2598 if (condition == const_true_rtx)
2599 CLEAR_RESOURCE (&opposite_needed);
2600 else
2601 mark_target_live_regs (get_insns (), opposite_thread, &opposite_needed);
2602
2603 /* If the insn at THREAD can be split, do it here to avoid having to
2604 update THREAD and NEW_THREAD if it is done in the loop below. Also
2605 initialize NEW_THREAD. */
2606
2607 new_thread = thread = try_split (PATTERN (thread), thread, 0);
2608
2609 /* Scan insns at THREAD. We are looking for an insn that can be removed
2610 from THREAD (it neither sets nor references resources that were set
2611 ahead of it and it doesn't set anything needs by the insns ahead of
2612 it) and that either can be placed in an annulling insn or aren't
2613 needed at OPPOSITE_THREAD. */
2614
2615 CLEAR_RESOURCE (&needed);
2616 CLEAR_RESOURCE (&set);
2617
2618 /* If we do not own this thread, we must stop as soon as we find
2619 something that we can't put in a delay slot, since all we can do
2620 is branch into THREAD at a later point. Therefore, labels stop
2621 the search if this is not the `true' thread. */
2622
2623 for (trial = thread;
2624 ! stop_search_p (trial, ! thread_if_true) && (! lose || own_thread);
2625 trial = next_nonnote_insn (trial))
2626 {
2627 rtx pat, old_trial;
2628
2629 /* If we have passed a label, we no longer own this thread. */
2630 if (LABEL_P (trial))
2631 {
2632 own_thread = 0;
2633 continue;
2634 }
2635
2636 pat = PATTERN (trial);
2637 if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
2638 continue;
2639
2640 /* If TRIAL conflicts with the insns ahead of it, we lose. Also,
2641 don't separate or copy insns that set and use CC0. */
2642 if (! insn_references_resource_p (trial, &set, true)
2643 && ! insn_sets_resource_p (trial, &set, true)
2644 && ! insn_sets_resource_p (trial, &needed, true)
2645 #ifdef HAVE_cc0
2646 && ! (reg_mentioned_p (cc0_rtx, pat)
2647 && (! own_thread || ! sets_cc0_p (pat)))
2648 #endif
2649 && ! can_throw_internal (trial))
2650 {
2651 rtx prior_insn;
2652
2653 /* If TRIAL is redundant with some insn before INSN, we don't
2654 actually need to add it to the delay list; we can merely pretend
2655 we did. */
2656 if ((prior_insn = redundant_insn (trial, insn, delay_list)))
2657 {
2658 fix_reg_dead_note (prior_insn, insn);
2659 if (own_thread)
2660 {
2661 update_block (trial, thread);
2662 if (trial == thread)
2663 {
2664 thread = next_active_insn (thread);
2665 if (new_thread == trial)
2666 new_thread = thread;
2667 }
2668
2669 delete_related_insns (trial);
2670 }
2671 else
2672 {
2673 update_reg_unused_notes (prior_insn, trial);
2674 new_thread = next_active_insn (trial);
2675 }
2676
2677 continue;
2678 }
2679
2680 /* There are two ways we can win: If TRIAL doesn't set anything
2681 needed at the opposite thread and can't trap, or if it can
2682 go into an annulled delay slot. */
2683 if (!must_annul
2684 && (condition == const_true_rtx
2685 || (! insn_sets_resource_p (trial, &opposite_needed, true)
2686 && ! may_trap_or_fault_p (pat))))
2687 {
2688 old_trial = trial;
2689 trial = try_split (pat, trial, 0);
2690 if (new_thread == old_trial)
2691 new_thread = trial;
2692 if (thread == old_trial)
2693 thread = trial;
2694 pat = PATTERN (trial);
2695 if (eligible_for_delay (insn, *pslots_filled, trial, flags))
2696 goto winner;
2697 }
2698 else if (0
2699 #ifdef ANNUL_IFTRUE_SLOTS
2700 || ! thread_if_true
2701 #endif
2702 #ifdef ANNUL_IFFALSE_SLOTS
2703 || thread_if_true
2704 #endif
2705 )
2706 {
2707 old_trial = trial;
2708 trial = try_split (pat, trial, 0);
2709 if (new_thread == old_trial)
2710 new_thread = trial;
2711 if (thread == old_trial)
2712 thread = trial;
2713 pat = PATTERN (trial);
2714 if ((must_annul || delay_list == NULL) && (thread_if_true
2715 ? check_annul_list_true_false (0, delay_list)
2716 && eligible_for_annul_false (insn, *pslots_filled, trial, flags)
2717 : check_annul_list_true_false (1, delay_list)
2718 && eligible_for_annul_true (insn, *pslots_filled, trial, flags)))
2719 {
2720 rtx temp;
2721
2722 must_annul = 1;
2723 winner:
2724
2725 #ifdef HAVE_cc0
2726 if (reg_mentioned_p (cc0_rtx, pat))
2727 link_cc0_insns (trial);
2728 #endif
2729
2730 /* If we own this thread, delete the insn. If this is the
2731 destination of a branch, show that a basic block status
2732 may have been updated. In any case, mark the new
2733 starting point of this thread. */
2734 if (own_thread)
2735 {
2736 rtx note;
2737
2738 update_block (trial, thread);
2739 if (trial == thread)
2740 {
2741 thread = next_active_insn (thread);
2742 if (new_thread == trial)
2743 new_thread = thread;
2744 }
2745
2746 /* We are moving this insn, not deleting it. We must
2747 temporarily increment the use count on any referenced
2748 label lest it be deleted by delete_related_insns. */
2749 for (note = REG_NOTES (trial);
2750 note != NULL_RTX;
2751 note = XEXP (note, 1))
2752 if (REG_NOTE_KIND (note) == REG_LABEL_OPERAND
2753 || REG_NOTE_KIND (note) == REG_LABEL_TARGET)
2754 {
2755 /* REG_LABEL_OPERAND could be
2756 NOTE_INSN_DELETED_LABEL too. */
2757 if (LABEL_P (XEXP (note, 0)))
2758 LABEL_NUSES (XEXP (note, 0))++;
2759 else
2760 gcc_assert (REG_NOTE_KIND (note)
2761 == REG_LABEL_OPERAND);
2762 }
2763 if (JUMP_P (trial) && JUMP_LABEL (trial))
2764 LABEL_NUSES (JUMP_LABEL (trial))++;
2765
2766 delete_related_insns (trial);
2767
2768 for (note = REG_NOTES (trial);
2769 note != NULL_RTX;
2770 note = XEXP (note, 1))
2771 if (REG_NOTE_KIND (note) == REG_LABEL_OPERAND
2772 || REG_NOTE_KIND (note) == REG_LABEL_TARGET)
2773 {
2774 /* REG_LABEL_OPERAND could be
2775 NOTE_INSN_DELETED_LABEL too. */
2776 if (LABEL_P (XEXP (note, 0)))
2777 LABEL_NUSES (XEXP (note, 0))--;
2778 else
2779 gcc_assert (REG_NOTE_KIND (note)
2780 == REG_LABEL_OPERAND);
2781 }
2782 if (JUMP_P (trial) && JUMP_LABEL (trial))
2783 LABEL_NUSES (JUMP_LABEL (trial))--;
2784 }
2785 else
2786 new_thread = next_active_insn (trial);
2787
2788 temp = own_thread ? trial : copy_rtx (trial);
2789 if (thread_if_true)
2790 INSN_FROM_TARGET_P (temp) = 1;
2791
2792 delay_list = add_to_delay_list (temp, delay_list);
2793
2794 if (slots_to_fill == ++(*pslots_filled))
2795 {
2796 /* Even though we have filled all the slots, we
2797 may be branching to a location that has a
2798 redundant insn. Skip any if so. */
2799 while (new_thread && ! own_thread
2800 && ! insn_sets_resource_p (new_thread, &set, true)
2801 && ! insn_sets_resource_p (new_thread, &needed,
2802 true)
2803 && ! insn_references_resource_p (new_thread,
2804 &set, true)
2805 && (prior_insn
2806 = redundant_insn (new_thread, insn,
2807 delay_list)))
2808 {
2809 /* We know we do not own the thread, so no need
2810 to call update_block and delete_insn. */
2811 fix_reg_dead_note (prior_insn, insn);
2812 update_reg_unused_notes (prior_insn, new_thread);
2813 new_thread = next_active_insn (new_thread);
2814 }
2815 break;
2816 }
2817
2818 continue;
2819 }
2820 }
2821 }
2822
2823 /* This insn can't go into a delay slot. */
2824 lose = 1;
2825 mark_set_resources (trial, &set, 0, MARK_SRC_DEST_CALL);
2826 mark_referenced_resources (trial, &needed, true);
2827
2828 /* Ensure we don't put insns between the setting of cc and the comparison
2829 by moving a setting of cc into an earlier delay slot since these insns
2830 could clobber the condition code. */
2831 set.cc = 1;
2832
2833 /* If this insn is a register-register copy and the next insn has
2834 a use of our destination, change it to use our source. That way,
2835 it will become a candidate for our delay slot the next time
2836 through this loop. This case occurs commonly in loops that
2837 scan a list.
2838
2839 We could check for more complex cases than those tested below,
2840 but it doesn't seem worth it. It might also be a good idea to try
2841 to swap the two insns. That might do better.
2842
2843 We can't do this if the next insn modifies our destination, because
2844 that would make the replacement into the insn invalid. We also can't
2845 do this if it modifies our source, because it might be an earlyclobber
2846 operand. This latter test also prevents updating the contents of
2847 a PRE_INC. We also can't do this if there's overlap of source and
2848 destination. Overlap may happen for larger-than-register-size modes. */
2849
2850 if (NONJUMP_INSN_P (trial) && GET_CODE (pat) == SET
2851 && REG_P (SET_SRC (pat))
2852 && REG_P (SET_DEST (pat))
2853 && !reg_overlap_mentioned_p (SET_DEST (pat), SET_SRC (pat)))
2854 {
2855 rtx next = next_nonnote_insn (trial);
2856
2857 if (next && NONJUMP_INSN_P (next)
2858 && GET_CODE (PATTERN (next)) != USE
2859 && ! reg_set_p (SET_DEST (pat), next)
2860 && ! reg_set_p (SET_SRC (pat), next)
2861 && reg_referenced_p (SET_DEST (pat), PATTERN (next))
2862 && ! modified_in_p (SET_DEST (pat), next))
2863 validate_replace_rtx (SET_DEST (pat), SET_SRC (pat), next);
2864 }
2865 }
2866
2867 /* If we stopped on a branch insn that has delay slots, see if we can
2868 steal some of the insns in those slots. */
2869 if (trial && NONJUMP_INSN_P (trial)
2870 && GET_CODE (PATTERN (trial)) == SEQUENCE
2871 && JUMP_P (XVECEXP (PATTERN (trial), 0, 0)))
2872 {
2873 /* If this is the `true' thread, we will want to follow the jump,
2874 so we can only do this if we have taken everything up to here. */
2875 if (thread_if_true && trial == new_thread)
2876 {
2877 delay_list
2878 = steal_delay_list_from_target (insn, condition, PATTERN (trial),
2879 delay_list, &set, &needed,
2880 &opposite_needed, slots_to_fill,
2881 pslots_filled, &must_annul,
2882 &new_thread);
2883 /* If we owned the thread and are told that it branched
2884 elsewhere, make sure we own the thread at the new location. */
2885 if (own_thread && trial != new_thread)
2886 own_thread = own_thread_p (new_thread, new_thread, 0);
2887 }
2888 else if (! thread_if_true)
2889 delay_list
2890 = steal_delay_list_from_fallthrough (insn, condition,
2891 PATTERN (trial),
2892 delay_list, &set, &needed,
2893 &opposite_needed, slots_to_fill,
2894 pslots_filled, &must_annul);
2895 }
2896
2897 /* If we haven't found anything for this delay slot and it is very
2898 likely that the branch will be taken, see if the insn at our target
2899 increments or decrements a register with an increment that does not
2900 depend on the destination register. If so, try to place the opposite
2901 arithmetic insn after the jump insn and put the arithmetic insn in the
2902 delay slot. If we can't do this, return. */
2903 if (delay_list == 0 && likely && new_thread
2904 && NONJUMP_INSN_P (new_thread)
2905 && GET_CODE (PATTERN (new_thread)) != ASM_INPUT
2906 && asm_noperands (PATTERN (new_thread)) < 0)
2907 {
2908 rtx pat = PATTERN (new_thread);
2909 rtx dest;
2910 rtx src;
2911
2912 trial = new_thread;
2913 pat = PATTERN (trial);
2914
2915 if (!NONJUMP_INSN_P (trial)
2916 || GET_CODE (pat) != SET
2917 || ! eligible_for_delay (insn, 0, trial, flags)
2918 || can_throw_internal (trial))
2919 return 0;
2920
2921 dest = SET_DEST (pat), src = SET_SRC (pat);
2922 if ((GET_CODE (src) == PLUS || GET_CODE (src) == MINUS)
2923 && rtx_equal_p (XEXP (src, 0), dest)
2924 && (!FLOAT_MODE_P (GET_MODE (src))
2925 || flag_unsafe_math_optimizations)
2926 && ! reg_overlap_mentioned_p (dest, XEXP (src, 1))
2927 && ! side_effects_p (pat))
2928 {
2929 rtx other = XEXP (src, 1);
2930 rtx new_arith;
2931 rtx ninsn;
2932
2933 /* If this is a constant adjustment, use the same code with
2934 the negated constant. Otherwise, reverse the sense of the
2935 arithmetic. */
2936 if (CONST_INT_P (other))
2937 new_arith = gen_rtx_fmt_ee (GET_CODE (src), GET_MODE (src), dest,
2938 negate_rtx (GET_MODE (src), other));
2939 else
2940 new_arith = gen_rtx_fmt_ee (GET_CODE (src) == PLUS ? MINUS : PLUS,
2941 GET_MODE (src), dest, other);
2942
2943 ninsn = emit_insn_after (gen_rtx_SET (VOIDmode, dest, new_arith),
2944 insn);
2945
2946 if (recog_memoized (ninsn) < 0
2947 || (extract_insn (ninsn), ! constrain_operands (1)))
2948 {
2949 delete_related_insns (ninsn);
2950 return 0;
2951 }
2952
2953 if (own_thread)
2954 {
2955 update_block (trial, thread);
2956 if (trial == thread)
2957 {
2958 thread = next_active_insn (thread);
2959 if (new_thread == trial)
2960 new_thread = thread;
2961 }
2962 delete_related_insns (trial);
2963 }
2964 else
2965 new_thread = next_active_insn (trial);
2966
2967 ninsn = own_thread ? trial : copy_rtx (trial);
2968 if (thread_if_true)
2969 INSN_FROM_TARGET_P (ninsn) = 1;
2970
2971 delay_list = add_to_delay_list (ninsn, NULL_RTX);
2972 (*pslots_filled)++;
2973 }
2974 }
2975
2976 if (delay_list && must_annul)
2977 INSN_ANNULLED_BRANCH_P (insn) = 1;
2978
2979 /* If we are to branch into the middle of this thread, find an appropriate
2980 label or make a new one if none, and redirect INSN to it. If we hit the
2981 end of the function, use the end-of-function label. */
2982 if (new_thread != thread)
2983 {
2984 rtx label;
2985
2986 gcc_assert (thread_if_true);
2987
2988 if (new_thread && JUMP_P (new_thread)
2989 && (simplejump_p (new_thread)
2990 || GET_CODE (PATTERN (new_thread)) == RETURN)
2991 && redirect_with_delay_list_safe_p (insn,
2992 JUMP_LABEL (new_thread),
2993 delay_list))
2994 new_thread = follow_jumps (JUMP_LABEL (new_thread));
2995
2996 if (new_thread == 0)
2997 label = find_end_label ();
2998 else if (LABEL_P (new_thread))
2999 label = new_thread;
3000 else
3001 label = get_label_before (new_thread);
3002
3003 if (label)
3004 reorg_redirect_jump (insn, label);
3005 }
3006
3007 return delay_list;
3008 }
3009 \f
3010 /* Make another attempt to find insns to place in delay slots.
3011
3012 We previously looked for insns located in front of the delay insn
3013 and, for non-jump delay insns, located behind the delay insn.
3014
3015 Here only try to schedule jump insns and try to move insns from either
3016 the target or the following insns into the delay slot. If annulling is
3017 supported, we will be likely to do this. Otherwise, we can do this only
3018 if safe. */
3019
3020 static void
3021 fill_eager_delay_slots (void)
3022 {
3023 rtx insn;
3024 int i;
3025 int num_unfilled_slots = unfilled_slots_next - unfilled_slots_base;
3026
3027 for (i = 0; i < num_unfilled_slots; i++)
3028 {
3029 rtx condition;
3030 rtx target_label, insn_at_target, fallthrough_insn;
3031 rtx delay_list = 0;
3032 int own_target;
3033 int own_fallthrough;
3034 int prediction, slots_to_fill, slots_filled;
3035
3036 insn = unfilled_slots_base[i];
3037 if (insn == 0
3038 || INSN_DELETED_P (insn)
3039 || !JUMP_P (insn)
3040 || ! (condjump_p (insn) || condjump_in_parallel_p (insn)))
3041 continue;
3042
3043 slots_to_fill = num_delay_slots (insn);
3044 /* Some machine description have defined instructions to have
3045 delay slots only in certain circumstances which may depend on
3046 nearby insns (which change due to reorg's actions).
3047
3048 For example, the PA port normally has delay slots for unconditional
3049 jumps.
3050
3051 However, the PA port claims such jumps do not have a delay slot
3052 if they are immediate successors of certain CALL_INSNs. This
3053 allows the port to favor filling the delay slot of the call with
3054 the unconditional jump. */
3055 if (slots_to_fill == 0)
3056 continue;
3057
3058 slots_filled = 0;
3059 target_label = JUMP_LABEL (insn);
3060 condition = get_branch_condition (insn, target_label);
3061
3062 if (condition == 0)
3063 continue;
3064
3065 /* Get the next active fallthrough and target insns and see if we own
3066 them. Then see whether the branch is likely true. We don't need
3067 to do a lot of this for unconditional branches. */
3068
3069 insn_at_target = next_active_insn (target_label);
3070 own_target = own_thread_p (target_label, target_label, 0);
3071
3072 if (condition == const_true_rtx)
3073 {
3074 own_fallthrough = 0;
3075 fallthrough_insn = 0;
3076 prediction = 2;
3077 }
3078 else
3079 {
3080 fallthrough_insn = next_active_insn (insn);
3081 own_fallthrough = own_thread_p (NEXT_INSN (insn), NULL_RTX, 1);
3082 prediction = mostly_true_jump (insn, condition);
3083 }
3084
3085 /* If this insn is expected to branch, first try to get insns from our
3086 target, then our fallthrough insns. If it is not expected to branch,
3087 try the other order. */
3088
3089 if (prediction > 0)
3090 {
3091 delay_list
3092 = fill_slots_from_thread (insn, condition, insn_at_target,
3093 fallthrough_insn, prediction == 2, 1,
3094 own_target,
3095 slots_to_fill, &slots_filled, delay_list);
3096
3097 if (delay_list == 0 && own_fallthrough)
3098 {
3099 /* Even though we didn't find anything for delay slots,
3100 we might have found a redundant insn which we deleted
3101 from the thread that was filled. So we have to recompute
3102 the next insn at the target. */
3103 target_label = JUMP_LABEL (insn);
3104 insn_at_target = next_active_insn (target_label);
3105
3106 delay_list
3107 = fill_slots_from_thread (insn, condition, fallthrough_insn,
3108 insn_at_target, 0, 0,
3109 own_fallthrough,
3110 slots_to_fill, &slots_filled,
3111 delay_list);
3112 }
3113 }
3114 else
3115 {
3116 if (own_fallthrough)
3117 delay_list
3118 = fill_slots_from_thread (insn, condition, fallthrough_insn,
3119 insn_at_target, 0, 0,
3120 own_fallthrough,
3121 slots_to_fill, &slots_filled,
3122 delay_list);
3123
3124 if (delay_list == 0)
3125 delay_list
3126 = fill_slots_from_thread (insn, condition, insn_at_target,
3127 next_active_insn (insn), 0, 1,
3128 own_target,
3129 slots_to_fill, &slots_filled,
3130 delay_list);
3131 }
3132
3133 if (delay_list)
3134 unfilled_slots_base[i]
3135 = emit_delay_sequence (insn, delay_list, slots_filled);
3136
3137 if (slots_to_fill == slots_filled)
3138 unfilled_slots_base[i] = 0;
3139
3140 note_delay_statistics (slots_filled, 1);
3141 }
3142 }
3143 \f
3144 static void delete_computation (rtx insn);
3145
3146 /* Recursively delete prior insns that compute the value (used only by INSN
3147 which the caller is deleting) stored in the register mentioned by NOTE
3148 which is a REG_DEAD note associated with INSN. */
3149
3150 static void
3151 delete_prior_computation (rtx note, rtx insn)
3152 {
3153 rtx our_prev;
3154 rtx reg = XEXP (note, 0);
3155
3156 for (our_prev = prev_nonnote_insn (insn);
3157 our_prev && (NONJUMP_INSN_P (our_prev)
3158 || CALL_P (our_prev));
3159 our_prev = prev_nonnote_insn (our_prev))
3160 {
3161 rtx pat = PATTERN (our_prev);
3162
3163 /* If we reach a CALL which is not calling a const function
3164 or the callee pops the arguments, then give up. */
3165 if (CALL_P (our_prev)
3166 && (! RTL_CONST_CALL_P (our_prev)
3167 || GET_CODE (pat) != SET || GET_CODE (SET_SRC (pat)) != CALL))
3168 break;
3169
3170 /* If we reach a SEQUENCE, it is too complex to try to
3171 do anything with it, so give up. We can be run during
3172 and after reorg, so SEQUENCE rtl can legitimately show
3173 up here. */
3174 if (GET_CODE (pat) == SEQUENCE)
3175 break;
3176
3177 if (GET_CODE (pat) == USE
3178 && NONJUMP_INSN_P (XEXP (pat, 0)))
3179 /* reorg creates USEs that look like this. We leave them
3180 alone because reorg needs them for its own purposes. */
3181 break;
3182
3183 if (reg_set_p (reg, pat))
3184 {
3185 if (side_effects_p (pat) && !CALL_P (our_prev))
3186 break;
3187
3188 if (GET_CODE (pat) == PARALLEL)
3189 {
3190 /* If we find a SET of something else, we can't
3191 delete the insn. */
3192
3193 int i;
3194
3195 for (i = 0; i < XVECLEN (pat, 0); i++)
3196 {
3197 rtx part = XVECEXP (pat, 0, i);
3198
3199 if (GET_CODE (part) == SET
3200 && SET_DEST (part) != reg)
3201 break;
3202 }
3203
3204 if (i == XVECLEN (pat, 0))
3205 delete_computation (our_prev);
3206 }
3207 else if (GET_CODE (pat) == SET
3208 && REG_P (SET_DEST (pat)))
3209 {
3210 int dest_regno = REGNO (SET_DEST (pat));
3211 int dest_endregno = END_REGNO (SET_DEST (pat));
3212 int regno = REGNO (reg);
3213 int endregno = END_REGNO (reg);
3214
3215 if (dest_regno >= regno
3216 && dest_endregno <= endregno)
3217 delete_computation (our_prev);
3218
3219 /* We may have a multi-word hard register and some, but not
3220 all, of the words of the register are needed in subsequent
3221 insns. Write REG_UNUSED notes for those parts that were not
3222 needed. */
3223 else if (dest_regno <= regno
3224 && dest_endregno >= endregno)
3225 {
3226 int i;
3227
3228 add_reg_note (our_prev, REG_UNUSED, reg);
3229
3230 for (i = dest_regno; i < dest_endregno; i++)
3231 if (! find_regno_note (our_prev, REG_UNUSED, i))
3232 break;
3233
3234 if (i == dest_endregno)
3235 delete_computation (our_prev);
3236 }
3237 }
3238
3239 break;
3240 }
3241
3242 /* If PAT references the register that dies here, it is an
3243 additional use. Hence any prior SET isn't dead. However, this
3244 insn becomes the new place for the REG_DEAD note. */
3245 if (reg_overlap_mentioned_p (reg, pat))
3246 {
3247 XEXP (note, 1) = REG_NOTES (our_prev);
3248 REG_NOTES (our_prev) = note;
3249 break;
3250 }
3251 }
3252 }
3253
3254 /* Delete INSN and recursively delete insns that compute values used only
3255 by INSN. This uses the REG_DEAD notes computed during flow analysis.
3256
3257 Look at all our REG_DEAD notes. If a previous insn does nothing other
3258 than set a register that dies in this insn, we can delete that insn
3259 as well.
3260
3261 On machines with CC0, if CC0 is used in this insn, we may be able to
3262 delete the insn that set it. */
3263
3264 static void
3265 delete_computation (rtx insn)
3266 {
3267 rtx note, next;
3268
3269 #ifdef HAVE_cc0
3270 if (reg_referenced_p (cc0_rtx, PATTERN (insn)))
3271 {
3272 rtx prev = prev_nonnote_insn (insn);
3273 /* We assume that at this stage
3274 CC's are always set explicitly
3275 and always immediately before the jump that
3276 will use them. So if the previous insn
3277 exists to set the CC's, delete it
3278 (unless it performs auto-increments, etc.). */
3279 if (prev && NONJUMP_INSN_P (prev)
3280 && sets_cc0_p (PATTERN (prev)))
3281 {
3282 if (sets_cc0_p (PATTERN (prev)) > 0
3283 && ! side_effects_p (PATTERN (prev)))
3284 delete_computation (prev);
3285 else
3286 /* Otherwise, show that cc0 won't be used. */
3287 add_reg_note (prev, REG_UNUSED, cc0_rtx);
3288 }
3289 }
3290 #endif
3291
3292 for (note = REG_NOTES (insn); note; note = next)
3293 {
3294 next = XEXP (note, 1);
3295
3296 if (REG_NOTE_KIND (note) != REG_DEAD
3297 /* Verify that the REG_NOTE is legitimate. */
3298 || !REG_P (XEXP (note, 0)))
3299 continue;
3300
3301 delete_prior_computation (note, insn);
3302 }
3303
3304 delete_related_insns (insn);
3305 }
3306
3307 /* If all INSN does is set the pc, delete it,
3308 and delete the insn that set the condition codes for it
3309 if that's what the previous thing was. */
3310
3311 static void
3312 delete_jump (rtx insn)
3313 {
3314 rtx set = single_set (insn);
3315
3316 if (set && GET_CODE (SET_DEST (set)) == PC)
3317 delete_computation (insn);
3318 }
3319
3320 \f
3321 /* Once we have tried two ways to fill a delay slot, make a pass over the
3322 code to try to improve the results and to do such things as more jump
3323 threading. */
3324
3325 static void
3326 relax_delay_slots (rtx first)
3327 {
3328 rtx insn, next, pat;
3329 rtx trial, delay_insn, target_label;
3330
3331 /* Look at every JUMP_INSN and see if we can improve it. */
3332 for (insn = first; insn; insn = next)
3333 {
3334 rtx other;
3335
3336 next = next_active_insn (insn);
3337
3338 /* If this is a jump insn, see if it now jumps to a jump, jumps to
3339 the next insn, or jumps to a label that is not the last of a
3340 group of consecutive labels. */
3341 if (JUMP_P (insn)
3342 && (condjump_p (insn) || condjump_in_parallel_p (insn))
3343 && (target_label = JUMP_LABEL (insn)) != 0)
3344 {
3345 target_label = skip_consecutive_labels (follow_jumps (target_label));
3346 if (target_label == 0)
3347 target_label = find_end_label ();
3348
3349 if (target_label && next_active_insn (target_label) == next
3350 && ! condjump_in_parallel_p (insn))
3351 {
3352 delete_jump (insn);
3353 continue;
3354 }
3355
3356 if (target_label && target_label != JUMP_LABEL (insn))
3357 reorg_redirect_jump (insn, target_label);
3358
3359 /* See if this jump conditionally branches around an unconditional
3360 jump. If so, invert this jump and point it to the target of the
3361 second jump. */
3362 if (next && JUMP_P (next)
3363 && any_condjump_p (insn)
3364 && (simplejump_p (next) || GET_CODE (PATTERN (next)) == RETURN)
3365 && target_label
3366 && next_active_insn (target_label) == next_active_insn (next)
3367 && no_labels_between_p (insn, next))
3368 {
3369 rtx label = JUMP_LABEL (next);
3370
3371 /* Be careful how we do this to avoid deleting code or
3372 labels that are momentarily dead. See similar optimization
3373 in jump.c.
3374
3375 We also need to ensure we properly handle the case when
3376 invert_jump fails. */
3377
3378 ++LABEL_NUSES (target_label);
3379 if (label)
3380 ++LABEL_NUSES (label);
3381
3382 if (invert_jump (insn, label, 1))
3383 {
3384 delete_related_insns (next);
3385 next = insn;
3386 }
3387
3388 if (label)
3389 --LABEL_NUSES (label);
3390
3391 if (--LABEL_NUSES (target_label) == 0)
3392 delete_related_insns (target_label);
3393
3394 continue;
3395 }
3396 }
3397
3398 /* If this is an unconditional jump and the previous insn is a
3399 conditional jump, try reversing the condition of the previous
3400 insn and swapping our targets. The next pass might be able to
3401 fill the slots.
3402
3403 Don't do this if we expect the conditional branch to be true, because
3404 we would then be making the more common case longer. */
3405
3406 if (JUMP_P (insn)
3407 && (simplejump_p (insn) || GET_CODE (PATTERN (insn)) == RETURN)
3408 && (other = prev_active_insn (insn)) != 0
3409 && any_condjump_p (other)
3410 && no_labels_between_p (other, insn)
3411 && 0 > mostly_true_jump (other,
3412 get_branch_condition (other,
3413 JUMP_LABEL (other))))
3414 {
3415 rtx other_target = JUMP_LABEL (other);
3416 target_label = JUMP_LABEL (insn);
3417
3418 if (invert_jump (other, target_label, 0))
3419 reorg_redirect_jump (insn, other_target);
3420 }
3421
3422 /* Now look only at cases where we have filled a delay slot. */
3423 if (!NONJUMP_INSN_P (insn)
3424 || GET_CODE (PATTERN (insn)) != SEQUENCE)
3425 continue;
3426
3427 pat = PATTERN (insn);
3428 delay_insn = XVECEXP (pat, 0, 0);
3429
3430 /* See if the first insn in the delay slot is redundant with some
3431 previous insn. Remove it from the delay slot if so; then set up
3432 to reprocess this insn. */
3433 if (redundant_insn (XVECEXP (pat, 0, 1), delay_insn, 0))
3434 {
3435 delete_from_delay_slot (XVECEXP (pat, 0, 1));
3436 next = prev_active_insn (next);
3437 continue;
3438 }
3439
3440 /* See if we have a RETURN insn with a filled delay slot followed
3441 by a RETURN insn with an unfilled a delay slot. If so, we can delete
3442 the first RETURN (but not its delay insn). This gives the same
3443 effect in fewer instructions.
3444
3445 Only do so if optimizing for size since this results in slower, but
3446 smaller code. */
3447 if (optimize_function_for_size_p (cfun)
3448 && GET_CODE (PATTERN (delay_insn)) == RETURN
3449 && next
3450 && JUMP_P (next)
3451 && GET_CODE (PATTERN (next)) == RETURN)
3452 {
3453 rtx after;
3454 int i;
3455
3456 /* Delete the RETURN and just execute the delay list insns.
3457
3458 We do this by deleting the INSN containing the SEQUENCE, then
3459 re-emitting the insns separately, and then deleting the RETURN.
3460 This allows the count of the jump target to be properly
3461 decremented.
3462
3463 Note that we need to change the INSN_UID of the re-emitted insns
3464 since it is used to hash the insns for mark_target_live_regs and
3465 the re-emitted insns will no longer be wrapped up in a SEQUENCE.
3466
3467 Clear the from target bit, since these insns are no longer
3468 in delay slots. */
3469 for (i = 0; i < XVECLEN (pat, 0); i++)
3470 INSN_FROM_TARGET_P (XVECEXP (pat, 0, i)) = 0;
3471
3472 trial = PREV_INSN (insn);
3473 delete_related_insns (insn);
3474 gcc_assert (GET_CODE (pat) == SEQUENCE);
3475 add_insn_after (delay_insn, trial, NULL);
3476 after = delay_insn;
3477 for (i = 1; i < XVECLEN (pat, 0); i++)
3478 after = emit_copy_of_insn_after (XVECEXP (pat, 0, i), after);
3479 delete_scheduled_jump (delay_insn);
3480 continue;
3481 }
3482
3483 /* Now look only at the cases where we have a filled JUMP_INSN. */
3484 if (!JUMP_P (XVECEXP (PATTERN (insn), 0, 0))
3485 || ! (condjump_p (XVECEXP (PATTERN (insn), 0, 0))
3486 || condjump_in_parallel_p (XVECEXP (PATTERN (insn), 0, 0))))
3487 continue;
3488
3489 target_label = JUMP_LABEL (delay_insn);
3490
3491 if (target_label)
3492 {
3493 /* If this jump goes to another unconditional jump, thread it, but
3494 don't convert a jump into a RETURN here. */
3495 trial = skip_consecutive_labels (follow_jumps (target_label));
3496 if (trial == 0)
3497 trial = find_end_label ();
3498
3499 if (trial && trial != target_label
3500 && redirect_with_delay_slots_safe_p (delay_insn, trial, insn))
3501 {
3502 reorg_redirect_jump (delay_insn, trial);
3503 target_label = trial;
3504 }
3505
3506 /* If the first insn at TARGET_LABEL is redundant with a previous
3507 insn, redirect the jump to the following insn and process again.
3508 We use next_real_insn instead of next_active_insn so we
3509 don't skip USE-markers, or we'll end up with incorrect
3510 liveness info. */
3511 trial = next_real_insn (target_label);
3512 if (trial && GET_CODE (PATTERN (trial)) != SEQUENCE
3513 && redundant_insn (trial, insn, 0)
3514 && ! can_throw_internal (trial))
3515 {
3516 /* Figure out where to emit the special USE insn so we don't
3517 later incorrectly compute register live/death info. */
3518 rtx tmp = next_active_insn (trial);
3519 if (tmp == 0)
3520 tmp = find_end_label ();
3521
3522 if (tmp)
3523 {
3524 /* Insert the special USE insn and update dataflow info. */
3525 update_block (trial, tmp);
3526
3527 /* Now emit a label before the special USE insn, and
3528 redirect our jump to the new label. */
3529 target_label = get_label_before (PREV_INSN (tmp));
3530 reorg_redirect_jump (delay_insn, target_label);
3531 next = insn;
3532 continue;
3533 }
3534 }
3535
3536 /* Similarly, if it is an unconditional jump with one insn in its
3537 delay list and that insn is redundant, thread the jump. */
3538 if (trial && GET_CODE (PATTERN (trial)) == SEQUENCE
3539 && XVECLEN (PATTERN (trial), 0) == 2
3540 && JUMP_P (XVECEXP (PATTERN (trial), 0, 0))
3541 && (simplejump_p (XVECEXP (PATTERN (trial), 0, 0))
3542 || GET_CODE (PATTERN (XVECEXP (PATTERN (trial), 0, 0))) == RETURN)
3543 && redundant_insn (XVECEXP (PATTERN (trial), 0, 1), insn, 0))
3544 {
3545 target_label = JUMP_LABEL (XVECEXP (PATTERN (trial), 0, 0));
3546 if (target_label == 0)
3547 target_label = find_end_label ();
3548
3549 if (target_label
3550 && redirect_with_delay_slots_safe_p (delay_insn, target_label,
3551 insn))
3552 {
3553 reorg_redirect_jump (delay_insn, target_label);
3554 next = insn;
3555 continue;
3556 }
3557 }
3558 }
3559
3560 if (! INSN_ANNULLED_BRANCH_P (delay_insn)
3561 && prev_active_insn (target_label) == insn
3562 && ! condjump_in_parallel_p (delay_insn)
3563 #ifdef HAVE_cc0
3564 /* If the last insn in the delay slot sets CC0 for some insn,
3565 various code assumes that it is in a delay slot. We could
3566 put it back where it belonged and delete the register notes,
3567 but it doesn't seem worthwhile in this uncommon case. */
3568 && ! find_reg_note (XVECEXP (pat, 0, XVECLEN (pat, 0) - 1),
3569 REG_CC_USER, NULL_RTX)
3570 #endif
3571 )
3572 {
3573 rtx after;
3574 int i;
3575
3576 /* All this insn does is execute its delay list and jump to the
3577 following insn. So delete the jump and just execute the delay
3578 list insns.
3579
3580 We do this by deleting the INSN containing the SEQUENCE, then
3581 re-emitting the insns separately, and then deleting the jump.
3582 This allows the count of the jump target to be properly
3583 decremented.
3584
3585 Note that we need to change the INSN_UID of the re-emitted insns
3586 since it is used to hash the insns for mark_target_live_regs and
3587 the re-emitted insns will no longer be wrapped up in a SEQUENCE.
3588
3589 Clear the from target bit, since these insns are no longer
3590 in delay slots. */
3591 for (i = 0; i < XVECLEN (pat, 0); i++)
3592 INSN_FROM_TARGET_P (XVECEXP (pat, 0, i)) = 0;
3593
3594 trial = PREV_INSN (insn);
3595 delete_related_insns (insn);
3596 gcc_assert (GET_CODE (pat) == SEQUENCE);
3597 add_insn_after (delay_insn, trial, NULL);
3598 after = delay_insn;
3599 for (i = 1; i < XVECLEN (pat, 0); i++)
3600 after = emit_copy_of_insn_after (XVECEXP (pat, 0, i), after);
3601 delete_scheduled_jump (delay_insn);
3602 continue;
3603 }
3604
3605 /* See if this is an unconditional jump around a single insn which is
3606 identical to the one in its delay slot. In this case, we can just
3607 delete the branch and the insn in its delay slot. */
3608 if (next && NONJUMP_INSN_P (next)
3609 && prev_label (next_active_insn (next)) == target_label
3610 && simplejump_p (insn)
3611 && XVECLEN (pat, 0) == 2
3612 && rtx_equal_p (PATTERN (next), PATTERN (XVECEXP (pat, 0, 1))))
3613 {
3614 delete_related_insns (insn);
3615 continue;
3616 }
3617
3618 /* See if this jump (with its delay slots) conditionally branches
3619 around an unconditional jump (without delay slots). If so, invert
3620 this jump and point it to the target of the second jump. We cannot
3621 do this for annulled jumps, though. Again, don't convert a jump to
3622 a RETURN here. */
3623 if (! INSN_ANNULLED_BRANCH_P (delay_insn)
3624 && any_condjump_p (delay_insn)
3625 && next && JUMP_P (next)
3626 && (simplejump_p (next) || GET_CODE (PATTERN (next)) == RETURN)
3627 && next_active_insn (target_label) == next_active_insn (next)
3628 && no_labels_between_p (insn, next))
3629 {
3630 rtx label = JUMP_LABEL (next);
3631 rtx old_label = JUMP_LABEL (delay_insn);
3632
3633 if (label == 0)
3634 label = find_end_label ();
3635
3636 /* find_end_label can generate a new label. Check this first. */
3637 if (label
3638 && no_labels_between_p (insn, next)
3639 && redirect_with_delay_slots_safe_p (delay_insn, label, insn))
3640 {
3641 /* Be careful how we do this to avoid deleting code or labels
3642 that are momentarily dead. See similar optimization in
3643 jump.c */
3644 if (old_label)
3645 ++LABEL_NUSES (old_label);
3646
3647 if (invert_jump (delay_insn, label, 1))
3648 {
3649 int i;
3650
3651 /* Must update the INSN_FROM_TARGET_P bits now that
3652 the branch is reversed, so that mark_target_live_regs
3653 will handle the delay slot insn correctly. */
3654 for (i = 1; i < XVECLEN (PATTERN (insn), 0); i++)
3655 {
3656 rtx slot = XVECEXP (PATTERN (insn), 0, i);
3657 INSN_FROM_TARGET_P (slot) = ! INSN_FROM_TARGET_P (slot);
3658 }
3659
3660 delete_related_insns (next);
3661 next = insn;
3662 }
3663
3664 if (old_label && --LABEL_NUSES (old_label) == 0)
3665 delete_related_insns (old_label);
3666 continue;
3667 }
3668 }
3669
3670 /* If we own the thread opposite the way this insn branches, see if we
3671 can merge its delay slots with following insns. */
3672 if (INSN_FROM_TARGET_P (XVECEXP (pat, 0, 1))
3673 && own_thread_p (NEXT_INSN (insn), 0, 1))
3674 try_merge_delay_insns (insn, next);
3675 else if (! INSN_FROM_TARGET_P (XVECEXP (pat, 0, 1))
3676 && own_thread_p (target_label, target_label, 0))
3677 try_merge_delay_insns (insn, next_active_insn (target_label));
3678
3679 /* If we get here, we haven't deleted INSN. But we may have deleted
3680 NEXT, so recompute it. */
3681 next = next_active_insn (insn);
3682 }
3683 }
3684 \f
3685 #ifdef HAVE_return
3686
3687 /* Look for filled jumps to the end of function label. We can try to convert
3688 them into RETURN insns if the insns in the delay slot are valid for the
3689 RETURN as well. */
3690
3691 static void
3692 make_return_insns (rtx first)
3693 {
3694 rtx insn, jump_insn, pat;
3695 rtx real_return_label = end_of_function_label;
3696 int slots, i;
3697
3698 #ifdef DELAY_SLOTS_FOR_EPILOGUE
3699 /* If a previous pass filled delay slots in the epilogue, things get a
3700 bit more complicated, as those filler insns would generally (without
3701 data flow analysis) have to be executed after any existing branch
3702 delay slot filler insns. It is also unknown whether such a
3703 transformation would actually be profitable. Note that the existing
3704 code only cares for branches with (some) filled delay slots. */
3705 if (crtl->epilogue_delay_list != NULL)
3706 return;
3707 #endif
3708
3709 /* See if there is a RETURN insn in the function other than the one we
3710 made for END_OF_FUNCTION_LABEL. If so, set up anything we can't change
3711 into a RETURN to jump to it. */
3712 for (insn = first; insn; insn = NEXT_INSN (insn))
3713 if (JUMP_P (insn) && GET_CODE (PATTERN (insn)) == RETURN)
3714 {
3715 real_return_label = get_label_before (insn);
3716 break;
3717 }
3718
3719 /* Show an extra usage of REAL_RETURN_LABEL so it won't go away if it
3720 was equal to END_OF_FUNCTION_LABEL. */
3721 LABEL_NUSES (real_return_label)++;
3722
3723 /* Clear the list of insns to fill so we can use it. */
3724 obstack_free (&unfilled_slots_obstack, unfilled_firstobj);
3725
3726 for (insn = first; insn; insn = NEXT_INSN (insn))
3727 {
3728 int flags;
3729
3730 /* Only look at filled JUMP_INSNs that go to the end of function
3731 label. */
3732 if (!NONJUMP_INSN_P (insn)
3733 || GET_CODE (PATTERN (insn)) != SEQUENCE
3734 || !JUMP_P (XVECEXP (PATTERN (insn), 0, 0))
3735 || JUMP_LABEL (XVECEXP (PATTERN (insn), 0, 0)) != end_of_function_label)
3736 continue;
3737
3738 pat = PATTERN (insn);
3739 jump_insn = XVECEXP (pat, 0, 0);
3740
3741 /* If we can't make the jump into a RETURN, try to redirect it to the best
3742 RETURN and go on to the next insn. */
3743 if (! reorg_redirect_jump (jump_insn, NULL_RTX))
3744 {
3745 /* Make sure redirecting the jump will not invalidate the delay
3746 slot insns. */
3747 if (redirect_with_delay_slots_safe_p (jump_insn,
3748 real_return_label,
3749 insn))
3750 reorg_redirect_jump (jump_insn, real_return_label);
3751 continue;
3752 }
3753
3754 /* See if this RETURN can accept the insns current in its delay slot.
3755 It can if it has more or an equal number of slots and the contents
3756 of each is valid. */
3757
3758 flags = get_jump_flags (jump_insn, JUMP_LABEL (jump_insn));
3759 slots = num_delay_slots (jump_insn);
3760 if (slots >= XVECLEN (pat, 0) - 1)
3761 {
3762 for (i = 1; i < XVECLEN (pat, 0); i++)
3763 if (! (
3764 #ifdef ANNUL_IFFALSE_SLOTS
3765 (INSN_ANNULLED_BRANCH_P (jump_insn)
3766 && INSN_FROM_TARGET_P (XVECEXP (pat, 0, i)))
3767 ? eligible_for_annul_false (jump_insn, i - 1,
3768 XVECEXP (pat, 0, i), flags) :
3769 #endif
3770 #ifdef ANNUL_IFTRUE_SLOTS
3771 (INSN_ANNULLED_BRANCH_P (jump_insn)
3772 && ! INSN_FROM_TARGET_P (XVECEXP (pat, 0, i)))
3773 ? eligible_for_annul_true (jump_insn, i - 1,
3774 XVECEXP (pat, 0, i), flags) :
3775 #endif
3776 eligible_for_delay (jump_insn, i - 1,
3777 XVECEXP (pat, 0, i), flags)))
3778 break;
3779 }
3780 else
3781 i = 0;
3782
3783 if (i == XVECLEN (pat, 0))
3784 continue;
3785
3786 /* We have to do something with this insn. If it is an unconditional
3787 RETURN, delete the SEQUENCE and output the individual insns,
3788 followed by the RETURN. Then set things up so we try to find
3789 insns for its delay slots, if it needs some. */
3790 if (GET_CODE (PATTERN (jump_insn)) == RETURN)
3791 {
3792 rtx prev = PREV_INSN (insn);
3793
3794 delete_related_insns (insn);
3795 for (i = 1; i < XVECLEN (pat, 0); i++)
3796 prev = emit_insn_after (PATTERN (XVECEXP (pat, 0, i)), prev);
3797
3798 insn = emit_jump_insn_after (PATTERN (jump_insn), prev);
3799 emit_barrier_after (insn);
3800
3801 if (slots)
3802 obstack_ptr_grow (&unfilled_slots_obstack, insn);
3803 }
3804 else
3805 /* It is probably more efficient to keep this with its current
3806 delay slot as a branch to a RETURN. */
3807 reorg_redirect_jump (jump_insn, real_return_label);
3808 }
3809
3810 /* Now delete REAL_RETURN_LABEL if we never used it. Then try to fill any
3811 new delay slots we have created. */
3812 if (--LABEL_NUSES (real_return_label) == 0)
3813 delete_related_insns (real_return_label);
3814
3815 fill_simple_delay_slots (1);
3816 fill_simple_delay_slots (0);
3817 }
3818 #endif
3819 \f
3820 /* Try to find insns to place in delay slots. */
3821
3822 void
3823 dbr_schedule (rtx first)
3824 {
3825 rtx insn, next, epilogue_insn = 0;
3826 int i;
3827
3828 /* If the current function has no insns other than the prologue and
3829 epilogue, then do not try to fill any delay slots. */
3830 if (n_basic_blocks == NUM_FIXED_BLOCKS)
3831 return;
3832
3833 /* Find the highest INSN_UID and allocate and initialize our map from
3834 INSN_UID's to position in code. */
3835 for (max_uid = 0, insn = first; insn; insn = NEXT_INSN (insn))
3836 {
3837 if (INSN_UID (insn) > max_uid)
3838 max_uid = INSN_UID (insn);
3839 if (NOTE_P (insn)
3840 && NOTE_KIND (insn) == NOTE_INSN_EPILOGUE_BEG)
3841 epilogue_insn = insn;
3842 }
3843
3844 uid_to_ruid = XNEWVEC (int, max_uid + 1);
3845 for (i = 0, insn = first; insn; i++, insn = NEXT_INSN (insn))
3846 uid_to_ruid[INSN_UID (insn)] = i;
3847
3848 /* Initialize the list of insns that need filling. */
3849 if (unfilled_firstobj == 0)
3850 {
3851 gcc_obstack_init (&unfilled_slots_obstack);
3852 unfilled_firstobj = XOBNEWVAR (&unfilled_slots_obstack, rtx, 0);
3853 }
3854
3855 for (insn = next_active_insn (first); insn; insn = next_active_insn (insn))
3856 {
3857 rtx target;
3858
3859 INSN_ANNULLED_BRANCH_P (insn) = 0;
3860 INSN_FROM_TARGET_P (insn) = 0;
3861
3862 /* Skip vector tables. We can't get attributes for them. */
3863 if (JUMP_TABLE_DATA_P (insn))
3864 continue;
3865
3866 if (num_delay_slots (insn) > 0)
3867 obstack_ptr_grow (&unfilled_slots_obstack, insn);
3868
3869 /* Ensure all jumps go to the last of a set of consecutive labels. */
3870 if (JUMP_P (insn)
3871 && (condjump_p (insn) || condjump_in_parallel_p (insn))
3872 && JUMP_LABEL (insn) != 0
3873 && ((target = skip_consecutive_labels (JUMP_LABEL (insn)))
3874 != JUMP_LABEL (insn)))
3875 redirect_jump (insn, target, 1);
3876 }
3877
3878 init_resource_info (epilogue_insn);
3879
3880 /* Show we haven't computed an end-of-function label yet. */
3881 end_of_function_label = 0;
3882
3883 /* Initialize the statistics for this function. */
3884 memset (num_insns_needing_delays, 0, sizeof num_insns_needing_delays);
3885 memset (num_filled_delays, 0, sizeof num_filled_delays);
3886
3887 /* Now do the delay slot filling. Try everything twice in case earlier
3888 changes make more slots fillable. */
3889
3890 for (reorg_pass_number = 0;
3891 reorg_pass_number < MAX_REORG_PASSES;
3892 reorg_pass_number++)
3893 {
3894 fill_simple_delay_slots (1);
3895 fill_simple_delay_slots (0);
3896 fill_eager_delay_slots ();
3897 relax_delay_slots (first);
3898 }
3899
3900 /* If we made an end of function label, indicate that it is now
3901 safe to delete it by undoing our prior adjustment to LABEL_NUSES.
3902 If it is now unused, delete it. */
3903 if (end_of_function_label && --LABEL_NUSES (end_of_function_label) == 0)
3904 delete_related_insns (end_of_function_label);
3905
3906 #ifdef HAVE_return
3907 if (HAVE_return && end_of_function_label != 0)
3908 make_return_insns (first);
3909 #endif
3910
3911 /* Delete any USE insns made by update_block; subsequent passes don't need
3912 them or know how to deal with them. */
3913 for (insn = first; insn; insn = next)
3914 {
3915 next = NEXT_INSN (insn);
3916
3917 if (NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == USE
3918 && INSN_P (XEXP (PATTERN (insn), 0)))
3919 next = delete_related_insns (insn);
3920 }
3921
3922 obstack_free (&unfilled_slots_obstack, unfilled_firstobj);
3923
3924 /* It is not clear why the line below is needed, but it does seem to be. */
3925 unfilled_firstobj = XOBNEWVAR (&unfilled_slots_obstack, rtx, 0);
3926
3927 if (dump_file)
3928 {
3929 int i, j, need_comma;
3930 int total_delay_slots[MAX_DELAY_HISTOGRAM + 1];
3931 int total_annul_slots[MAX_DELAY_HISTOGRAM + 1];
3932
3933 for (reorg_pass_number = 0;
3934 reorg_pass_number < MAX_REORG_PASSES;
3935 reorg_pass_number++)
3936 {
3937 fprintf (dump_file, ";; Reorg pass #%d:\n", reorg_pass_number + 1);
3938 for (i = 0; i < NUM_REORG_FUNCTIONS; i++)
3939 {
3940 need_comma = 0;
3941 fprintf (dump_file, ";; Reorg function #%d\n", i);
3942
3943 fprintf (dump_file, ";; %d insns needing delay slots\n;; ",
3944 num_insns_needing_delays[i][reorg_pass_number]);
3945
3946 for (j = 0; j < MAX_DELAY_HISTOGRAM + 1; j++)
3947 if (num_filled_delays[i][j][reorg_pass_number])
3948 {
3949 if (need_comma)
3950 fprintf (dump_file, ", ");
3951 need_comma = 1;
3952 fprintf (dump_file, "%d got %d delays",
3953 num_filled_delays[i][j][reorg_pass_number], j);
3954 }
3955 fprintf (dump_file, "\n");
3956 }
3957 }
3958 memset (total_delay_slots, 0, sizeof total_delay_slots);
3959 memset (total_annul_slots, 0, sizeof total_annul_slots);
3960 for (insn = first; insn; insn = NEXT_INSN (insn))
3961 {
3962 if (! INSN_DELETED_P (insn)
3963 && NONJUMP_INSN_P (insn)
3964 && GET_CODE (PATTERN (insn)) != USE
3965 && GET_CODE (PATTERN (insn)) != CLOBBER)
3966 {
3967 if (GET_CODE (PATTERN (insn)) == SEQUENCE)
3968 {
3969 j = XVECLEN (PATTERN (insn), 0) - 1;
3970 if (j > MAX_DELAY_HISTOGRAM)
3971 j = MAX_DELAY_HISTOGRAM;
3972 if (INSN_ANNULLED_BRANCH_P (XVECEXP (PATTERN (insn), 0, 0)))
3973 total_annul_slots[j]++;
3974 else
3975 total_delay_slots[j]++;
3976 }
3977 else if (num_delay_slots (insn) > 0)
3978 total_delay_slots[0]++;
3979 }
3980 }
3981 fprintf (dump_file, ";; Reorg totals: ");
3982 need_comma = 0;
3983 for (j = 0; j < MAX_DELAY_HISTOGRAM + 1; j++)
3984 {
3985 if (total_delay_slots[j])
3986 {
3987 if (need_comma)
3988 fprintf (dump_file, ", ");
3989 need_comma = 1;
3990 fprintf (dump_file, "%d got %d delays", total_delay_slots[j], j);
3991 }
3992 }
3993 fprintf (dump_file, "\n");
3994 #if defined (ANNUL_IFTRUE_SLOTS) || defined (ANNUL_IFFALSE_SLOTS)
3995 fprintf (dump_file, ";; Reorg annuls: ");
3996 need_comma = 0;
3997 for (j = 0; j < MAX_DELAY_HISTOGRAM + 1; j++)
3998 {
3999 if (total_annul_slots[j])
4000 {
4001 if (need_comma)
4002 fprintf (dump_file, ", ");
4003 need_comma = 1;
4004 fprintf (dump_file, "%d got %d delays", total_annul_slots[j], j);
4005 }
4006 }
4007 fprintf (dump_file, "\n");
4008 #endif
4009 fprintf (dump_file, "\n");
4010 }
4011
4012 /* For all JUMP insns, fill in branch prediction notes, so that during
4013 assembler output a target can set branch prediction bits in the code.
4014 We have to do this now, as up until this point the destinations of
4015 JUMPS can be moved around and changed, but past right here that cannot
4016 happen. */
4017 for (insn = first; insn; insn = NEXT_INSN (insn))
4018 {
4019 int pred_flags;
4020
4021 if (NONJUMP_INSN_P (insn))
4022 {
4023 rtx pat = PATTERN (insn);
4024
4025 if (GET_CODE (pat) == SEQUENCE)
4026 insn = XVECEXP (pat, 0, 0);
4027 }
4028 if (!JUMP_P (insn))
4029 continue;
4030
4031 pred_flags = get_jump_flags (insn, JUMP_LABEL (insn));
4032 add_reg_note (insn, REG_BR_PRED, GEN_INT (pred_flags));
4033 }
4034 free_resource_info ();
4035 free (uid_to_ruid);
4036 #ifdef DELAY_SLOTS_FOR_EPILOGUE
4037 /* SPARC assembler, for instance, emit warning when debug info is output
4038 into the delay slot. */
4039 {
4040 rtx link;
4041
4042 for (link = crtl->epilogue_delay_list;
4043 link;
4044 link = XEXP (link, 1))
4045 INSN_LOCATOR (XEXP (link, 0)) = 0;
4046 }
4047
4048 #endif
4049 crtl->dbr_scheduled_p = true;
4050 }
4051 #endif /* DELAY_SLOTS */
4052 \f
4053 static bool
4054 gate_handle_delay_slots (void)
4055 {
4056 #ifdef DELAY_SLOTS
4057 /* At -O0 dataflow info isn't updated after RA. */
4058 return optimize > 0 && flag_delayed_branch && !crtl->dbr_scheduled_p;
4059 #else
4060 return 0;
4061 #endif
4062 }
4063
4064 /* Run delay slot optimization. */
4065 static unsigned int
4066 rest_of_handle_delay_slots (void)
4067 {
4068 #ifdef DELAY_SLOTS
4069 dbr_schedule (get_insns ());
4070 #endif
4071 return 0;
4072 }
4073
4074 struct rtl_opt_pass pass_delay_slots =
4075 {
4076 {
4077 RTL_PASS,
4078 "dbr", /* name */
4079 gate_handle_delay_slots, /* gate */
4080 rest_of_handle_delay_slots, /* execute */
4081 NULL, /* sub */
4082 NULL, /* next */
4083 0, /* static_pass_number */
4084 TV_DBR_SCHED, /* tv_id */
4085 0, /* properties_required */
4086 0, /* properties_provided */
4087 0, /* properties_destroyed */
4088 0, /* todo_flags_start */
4089 TODO_dump_func |
4090 TODO_ggc_collect /* todo_flags_finish */
4091 }
4092 };
4093
4094 /* Machine dependent reorg pass. */
4095 static bool
4096 gate_handle_machine_reorg (void)
4097 {
4098 return targetm.machine_dependent_reorg != 0;
4099 }
4100
4101
4102 static unsigned int
4103 rest_of_handle_machine_reorg (void)
4104 {
4105 targetm.machine_dependent_reorg ();
4106 return 0;
4107 }
4108
4109 struct rtl_opt_pass pass_machine_reorg =
4110 {
4111 {
4112 RTL_PASS,
4113 "mach", /* name */
4114 gate_handle_machine_reorg, /* gate */
4115 rest_of_handle_machine_reorg, /* execute */
4116 NULL, /* sub */
4117 NULL, /* next */
4118 0, /* static_pass_number */
4119 TV_MACH_DEP, /* tv_id */
4120 0, /* properties_required */
4121 0, /* properties_provided */
4122 0, /* properties_destroyed */
4123 0, /* todo_flags_start */
4124 TODO_dump_func |
4125 TODO_ggc_collect /* todo_flags_finish */
4126 }
4127 };