re PR testsuite/79026 (The tests changed by revision r244006 now fail on darwin)
[gcc.git] / gcc / ifcvt.c
1 /* If-conversion support.
2 Copyright (C) 2000-2017 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
13 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
14 License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "target.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "cfghooks.h"
28 #include "df.h"
29 #include "memmodel.h"
30 #include "tm_p.h"
31 #include "expmed.h"
32 #include "optabs.h"
33 #include "regs.h"
34 #include "emit-rtl.h"
35 #include "recog.h"
36
37 #include "cfgrtl.h"
38 #include "cfganal.h"
39 #include "cfgcleanup.h"
40 #include "expr.h"
41 #include "output.h"
42 #include "cfgloop.h"
43 #include "tree-pass.h"
44 #include "dbgcnt.h"
45 #include "shrink-wrap.h"
46 #include "rtl-iter.h"
47 #include "ifcvt.h"
48 #include "params.h"
49
50 #ifndef MAX_CONDITIONAL_EXECUTE
51 #define MAX_CONDITIONAL_EXECUTE \
52 (BRANCH_COST (optimize_function_for_speed_p (cfun), false) \
53 + 1)
54 #endif
55
56 #define IFCVT_MULTIPLE_DUMPS 1
57
58 #define NULL_BLOCK ((basic_block) NULL)
59
60 /* True if after combine pass. */
61 static bool ifcvt_after_combine;
62
63 /* True if the target has the cbranchcc4 optab. */
64 static bool have_cbranchcc4;
65
66 /* # of IF-THEN or IF-THEN-ELSE blocks we looked at */
67 static int num_possible_if_blocks;
68
69 /* # of IF-THEN or IF-THEN-ELSE blocks were converted to conditional
70 execution. */
71 static int num_updated_if_blocks;
72
73 /* # of changes made. */
74 static int num_true_changes;
75
76 /* Whether conditional execution changes were made. */
77 static int cond_exec_changed_p;
78
79 /* Forward references. */
80 static int count_bb_insns (const_basic_block);
81 static bool cheap_bb_rtx_cost_p (const_basic_block, int, int);
82 static rtx_insn *first_active_insn (basic_block);
83 static rtx_insn *last_active_insn (basic_block, int);
84 static rtx_insn *find_active_insn_before (basic_block, rtx_insn *);
85 static rtx_insn *find_active_insn_after (basic_block, rtx_insn *);
86 static basic_block block_fallthru (basic_block);
87 static int cond_exec_process_insns (ce_if_block *, rtx_insn *, rtx, rtx, int,
88 int);
89 static rtx cond_exec_get_condition (rtx_insn *);
90 static rtx noce_get_condition (rtx_insn *, rtx_insn **, bool);
91 static int noce_operand_ok (const_rtx);
92 static void merge_if_block (ce_if_block *);
93 static int find_cond_trap (basic_block, edge, edge);
94 static basic_block find_if_header (basic_block, int);
95 static int block_jumps_and_fallthru_p (basic_block, basic_block);
96 static int noce_find_if_block (basic_block, edge, edge, int);
97 static int cond_exec_find_if_block (ce_if_block *);
98 static int find_if_case_1 (basic_block, edge, edge);
99 static int find_if_case_2 (basic_block, edge, edge);
100 static int dead_or_predicable (basic_block, basic_block, basic_block,
101 edge, int);
102 static void noce_emit_move_insn (rtx, rtx);
103 static rtx_insn *block_has_only_trap (basic_block);
104 \f
105 /* Count the number of non-jump active insns in BB. */
106
107 static int
108 count_bb_insns (const_basic_block bb)
109 {
110 int count = 0;
111 rtx_insn *insn = BB_HEAD (bb);
112
113 while (1)
114 {
115 if (active_insn_p (insn) && !JUMP_P (insn))
116 count++;
117
118 if (insn == BB_END (bb))
119 break;
120 insn = NEXT_INSN (insn);
121 }
122
123 return count;
124 }
125
126 /* Determine whether the total insn_rtx_cost on non-jump insns in
127 basic block BB is less than MAX_COST. This function returns
128 false if the cost of any instruction could not be estimated.
129
130 The cost of the non-jump insns in BB is scaled by REG_BR_PROB_BASE
131 as those insns are being speculated. MAX_COST is scaled with SCALE
132 plus a small fudge factor. */
133
134 static bool
135 cheap_bb_rtx_cost_p (const_basic_block bb, int scale, int max_cost)
136 {
137 int count = 0;
138 rtx_insn *insn = BB_HEAD (bb);
139 bool speed = optimize_bb_for_speed_p (bb);
140
141 /* Set scale to REG_BR_PROB_BASE to void the identical scaling
142 applied to insn_rtx_cost when optimizing for size. Only do
143 this after combine because if-conversion might interfere with
144 passes before combine.
145
146 Use optimize_function_for_speed_p instead of the pre-defined
147 variable speed to make sure it is set to same value for all
148 basic blocks in one if-conversion transformation. */
149 if (!optimize_function_for_speed_p (cfun) && ifcvt_after_combine)
150 scale = REG_BR_PROB_BASE;
151 /* Our branch probability/scaling factors are just estimates and don't
152 account for cases where we can get speculation for free and other
153 secondary benefits. So we fudge the scale factor to make speculating
154 appear a little more profitable when optimizing for performance. */
155 else
156 scale += REG_BR_PROB_BASE / 8;
157
158
159 max_cost *= scale;
160
161 while (1)
162 {
163 if (NONJUMP_INSN_P (insn))
164 {
165 int cost = insn_rtx_cost (PATTERN (insn), speed) * REG_BR_PROB_BASE;
166 if (cost == 0)
167 return false;
168
169 /* If this instruction is the load or set of a "stack" register,
170 such as a floating point register on x87, then the cost of
171 speculatively executing this insn may need to include
172 the additional cost of popping its result off of the
173 register stack. Unfortunately, correctly recognizing and
174 accounting for this additional overhead is tricky, so for
175 now we simply prohibit such speculative execution. */
176 #ifdef STACK_REGS
177 {
178 rtx set = single_set (insn);
179 if (set && STACK_REG_P (SET_DEST (set)))
180 return false;
181 }
182 #endif
183
184 count += cost;
185 if (count >= max_cost)
186 return false;
187 }
188 else if (CALL_P (insn))
189 return false;
190
191 if (insn == BB_END (bb))
192 break;
193 insn = NEXT_INSN (insn);
194 }
195
196 return true;
197 }
198
199 /* Return the first non-jump active insn in the basic block. */
200
201 static rtx_insn *
202 first_active_insn (basic_block bb)
203 {
204 rtx_insn *insn = BB_HEAD (bb);
205
206 if (LABEL_P (insn))
207 {
208 if (insn == BB_END (bb))
209 return NULL;
210 insn = NEXT_INSN (insn);
211 }
212
213 while (NOTE_P (insn) || DEBUG_INSN_P (insn))
214 {
215 if (insn == BB_END (bb))
216 return NULL;
217 insn = NEXT_INSN (insn);
218 }
219
220 if (JUMP_P (insn))
221 return NULL;
222
223 return insn;
224 }
225
226 /* Return the last non-jump active (non-jump) insn in the basic block. */
227
228 static rtx_insn *
229 last_active_insn (basic_block bb, int skip_use_p)
230 {
231 rtx_insn *insn = BB_END (bb);
232 rtx_insn *head = BB_HEAD (bb);
233
234 while (NOTE_P (insn)
235 || JUMP_P (insn)
236 || DEBUG_INSN_P (insn)
237 || (skip_use_p
238 && NONJUMP_INSN_P (insn)
239 && GET_CODE (PATTERN (insn)) == USE))
240 {
241 if (insn == head)
242 return NULL;
243 insn = PREV_INSN (insn);
244 }
245
246 if (LABEL_P (insn))
247 return NULL;
248
249 return insn;
250 }
251
252 /* Return the active insn before INSN inside basic block CURR_BB. */
253
254 static rtx_insn *
255 find_active_insn_before (basic_block curr_bb, rtx_insn *insn)
256 {
257 if (!insn || insn == BB_HEAD (curr_bb))
258 return NULL;
259
260 while ((insn = PREV_INSN (insn)) != NULL_RTX)
261 {
262 if (NONJUMP_INSN_P (insn) || JUMP_P (insn) || CALL_P (insn))
263 break;
264
265 /* No other active insn all the way to the start of the basic block. */
266 if (insn == BB_HEAD (curr_bb))
267 return NULL;
268 }
269
270 return insn;
271 }
272
273 /* Return the active insn after INSN inside basic block CURR_BB. */
274
275 static rtx_insn *
276 find_active_insn_after (basic_block curr_bb, rtx_insn *insn)
277 {
278 if (!insn || insn == BB_END (curr_bb))
279 return NULL;
280
281 while ((insn = NEXT_INSN (insn)) != NULL_RTX)
282 {
283 if (NONJUMP_INSN_P (insn) || JUMP_P (insn) || CALL_P (insn))
284 break;
285
286 /* No other active insn all the way to the end of the basic block. */
287 if (insn == BB_END (curr_bb))
288 return NULL;
289 }
290
291 return insn;
292 }
293
294 /* Return the basic block reached by falling though the basic block BB. */
295
296 static basic_block
297 block_fallthru (basic_block bb)
298 {
299 edge e = find_fallthru_edge (bb->succs);
300
301 return (e) ? e->dest : NULL_BLOCK;
302 }
303
304 /* Return true if RTXs A and B can be safely interchanged. */
305
306 static bool
307 rtx_interchangeable_p (const_rtx a, const_rtx b)
308 {
309 if (!rtx_equal_p (a, b))
310 return false;
311
312 if (GET_CODE (a) != MEM)
313 return true;
314
315 /* A dead type-unsafe memory reference is legal, but a live type-unsafe memory
316 reference is not. Interchanging a dead type-unsafe memory reference with
317 a live type-safe one creates a live type-unsafe memory reference, in other
318 words, it makes the program illegal.
319 We check here conservatively whether the two memory references have equal
320 memory attributes. */
321
322 return mem_attrs_eq_p (get_mem_attrs (a), get_mem_attrs (b));
323 }
324
325 \f
326 /* Go through a bunch of insns, converting them to conditional
327 execution format if possible. Return TRUE if all of the non-note
328 insns were processed. */
329
330 static int
331 cond_exec_process_insns (ce_if_block *ce_info ATTRIBUTE_UNUSED,
332 /* if block information */rtx_insn *start,
333 /* first insn to look at */rtx end,
334 /* last insn to look at */rtx test,
335 /* conditional execution test */int prob_val,
336 /* probability of branch taken. */int mod_ok)
337 {
338 int must_be_last = FALSE;
339 rtx_insn *insn;
340 rtx xtest;
341 rtx pattern;
342
343 if (!start || !end)
344 return FALSE;
345
346 for (insn = start; ; insn = NEXT_INSN (insn))
347 {
348 /* dwarf2out can't cope with conditional prologues. */
349 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_PROLOGUE_END)
350 return FALSE;
351
352 if (NOTE_P (insn) || DEBUG_INSN_P (insn))
353 goto insn_done;
354
355 gcc_assert (NONJUMP_INSN_P (insn) || CALL_P (insn));
356
357 /* dwarf2out can't cope with conditional unwind info. */
358 if (RTX_FRAME_RELATED_P (insn))
359 return FALSE;
360
361 /* Remove USE insns that get in the way. */
362 if (reload_completed && GET_CODE (PATTERN (insn)) == USE)
363 {
364 /* ??? Ug. Actually unlinking the thing is problematic,
365 given what we'd have to coordinate with our callers. */
366 SET_INSN_DELETED (insn);
367 goto insn_done;
368 }
369
370 /* Last insn wasn't last? */
371 if (must_be_last)
372 return FALSE;
373
374 if (modified_in_p (test, insn))
375 {
376 if (!mod_ok)
377 return FALSE;
378 must_be_last = TRUE;
379 }
380
381 /* Now build the conditional form of the instruction. */
382 pattern = PATTERN (insn);
383 xtest = copy_rtx (test);
384
385 /* If this is already a COND_EXEC, rewrite the test to be an AND of the
386 two conditions. */
387 if (GET_CODE (pattern) == COND_EXEC)
388 {
389 if (GET_MODE (xtest) != GET_MODE (COND_EXEC_TEST (pattern)))
390 return FALSE;
391
392 xtest = gen_rtx_AND (GET_MODE (xtest), xtest,
393 COND_EXEC_TEST (pattern));
394 pattern = COND_EXEC_CODE (pattern);
395 }
396
397 pattern = gen_rtx_COND_EXEC (VOIDmode, xtest, pattern);
398
399 /* If the machine needs to modify the insn being conditionally executed,
400 say for example to force a constant integer operand into a temp
401 register, do so here. */
402 #ifdef IFCVT_MODIFY_INSN
403 IFCVT_MODIFY_INSN (ce_info, pattern, insn);
404 if (! pattern)
405 return FALSE;
406 #endif
407
408 validate_change (insn, &PATTERN (insn), pattern, 1);
409
410 if (CALL_P (insn) && prob_val >= 0)
411 validate_change (insn, &REG_NOTES (insn),
412 gen_rtx_INT_LIST ((machine_mode) REG_BR_PROB,
413 prob_val, REG_NOTES (insn)), 1);
414
415 insn_done:
416 if (insn == end)
417 break;
418 }
419
420 return TRUE;
421 }
422
423 /* Return the condition for a jump. Do not do any special processing. */
424
425 static rtx
426 cond_exec_get_condition (rtx_insn *jump)
427 {
428 rtx test_if, cond;
429
430 if (any_condjump_p (jump))
431 test_if = SET_SRC (pc_set (jump));
432 else
433 return NULL_RTX;
434 cond = XEXP (test_if, 0);
435
436 /* If this branches to JUMP_LABEL when the condition is false,
437 reverse the condition. */
438 if (GET_CODE (XEXP (test_if, 2)) == LABEL_REF
439 && label_ref_label (XEXP (test_if, 2)) == JUMP_LABEL (jump))
440 {
441 enum rtx_code rev = reversed_comparison_code (cond, jump);
442 if (rev == UNKNOWN)
443 return NULL_RTX;
444
445 cond = gen_rtx_fmt_ee (rev, GET_MODE (cond), XEXP (cond, 0),
446 XEXP (cond, 1));
447 }
448
449 return cond;
450 }
451
452 /* Given a simple IF-THEN or IF-THEN-ELSE block, attempt to convert it
453 to conditional execution. Return TRUE if we were successful at
454 converting the block. */
455
456 static int
457 cond_exec_process_if_block (ce_if_block * ce_info,
458 /* if block information */int do_multiple_p)
459 {
460 basic_block test_bb = ce_info->test_bb; /* last test block */
461 basic_block then_bb = ce_info->then_bb; /* THEN */
462 basic_block else_bb = ce_info->else_bb; /* ELSE or NULL */
463 rtx test_expr; /* expression in IF_THEN_ELSE that is tested */
464 rtx_insn *then_start; /* first insn in THEN block */
465 rtx_insn *then_end; /* last insn + 1 in THEN block */
466 rtx_insn *else_start = NULL; /* first insn in ELSE block or NULL */
467 rtx_insn *else_end = NULL; /* last insn + 1 in ELSE block */
468 int max; /* max # of insns to convert. */
469 int then_mod_ok; /* whether conditional mods are ok in THEN */
470 rtx true_expr; /* test for else block insns */
471 rtx false_expr; /* test for then block insns */
472 int true_prob_val; /* probability of else block */
473 int false_prob_val; /* probability of then block */
474 rtx_insn *then_last_head = NULL; /* Last match at the head of THEN */
475 rtx_insn *else_last_head = NULL; /* Last match at the head of ELSE */
476 rtx_insn *then_first_tail = NULL; /* First match at the tail of THEN */
477 rtx_insn *else_first_tail = NULL; /* First match at the tail of ELSE */
478 int then_n_insns, else_n_insns, n_insns;
479 enum rtx_code false_code;
480 rtx note;
481
482 /* If test is comprised of && or || elements, and we've failed at handling
483 all of them together, just use the last test if it is the special case of
484 && elements without an ELSE block. */
485 if (!do_multiple_p && ce_info->num_multiple_test_blocks)
486 {
487 if (else_bb || ! ce_info->and_and_p)
488 return FALSE;
489
490 ce_info->test_bb = test_bb = ce_info->last_test_bb;
491 ce_info->num_multiple_test_blocks = 0;
492 ce_info->num_and_and_blocks = 0;
493 ce_info->num_or_or_blocks = 0;
494 }
495
496 /* Find the conditional jump to the ELSE or JOIN part, and isolate
497 the test. */
498 test_expr = cond_exec_get_condition (BB_END (test_bb));
499 if (! test_expr)
500 return FALSE;
501
502 /* If the conditional jump is more than just a conditional jump,
503 then we can not do conditional execution conversion on this block. */
504 if (! onlyjump_p (BB_END (test_bb)))
505 return FALSE;
506
507 /* Collect the bounds of where we're to search, skipping any labels, jumps
508 and notes at the beginning and end of the block. Then count the total
509 number of insns and see if it is small enough to convert. */
510 then_start = first_active_insn (then_bb);
511 then_end = last_active_insn (then_bb, TRUE);
512 then_n_insns = ce_info->num_then_insns = count_bb_insns (then_bb);
513 n_insns = then_n_insns;
514 max = MAX_CONDITIONAL_EXECUTE;
515
516 if (else_bb)
517 {
518 int n_matching;
519
520 max *= 2;
521 else_start = first_active_insn (else_bb);
522 else_end = last_active_insn (else_bb, TRUE);
523 else_n_insns = ce_info->num_else_insns = count_bb_insns (else_bb);
524 n_insns += else_n_insns;
525
526 /* Look for matching sequences at the head and tail of the two blocks,
527 and limit the range of insns to be converted if possible. */
528 n_matching = flow_find_cross_jump (then_bb, else_bb,
529 &then_first_tail, &else_first_tail,
530 NULL);
531 if (then_first_tail == BB_HEAD (then_bb))
532 then_start = then_end = NULL;
533 if (else_first_tail == BB_HEAD (else_bb))
534 else_start = else_end = NULL;
535
536 if (n_matching > 0)
537 {
538 if (then_end)
539 then_end = find_active_insn_before (then_bb, then_first_tail);
540 if (else_end)
541 else_end = find_active_insn_before (else_bb, else_first_tail);
542 n_insns -= 2 * n_matching;
543 }
544
545 if (then_start
546 && else_start
547 && then_n_insns > n_matching
548 && else_n_insns > n_matching)
549 {
550 int longest_match = MIN (then_n_insns - n_matching,
551 else_n_insns - n_matching);
552 n_matching
553 = flow_find_head_matching_sequence (then_bb, else_bb,
554 &then_last_head,
555 &else_last_head,
556 longest_match);
557
558 if (n_matching > 0)
559 {
560 rtx_insn *insn;
561
562 /* We won't pass the insns in the head sequence to
563 cond_exec_process_insns, so we need to test them here
564 to make sure that they don't clobber the condition. */
565 for (insn = BB_HEAD (then_bb);
566 insn != NEXT_INSN (then_last_head);
567 insn = NEXT_INSN (insn))
568 if (!LABEL_P (insn) && !NOTE_P (insn)
569 && !DEBUG_INSN_P (insn)
570 && modified_in_p (test_expr, insn))
571 return FALSE;
572 }
573
574 if (then_last_head == then_end)
575 then_start = then_end = NULL;
576 if (else_last_head == else_end)
577 else_start = else_end = NULL;
578
579 if (n_matching > 0)
580 {
581 if (then_start)
582 then_start = find_active_insn_after (then_bb, then_last_head);
583 if (else_start)
584 else_start = find_active_insn_after (else_bb, else_last_head);
585 n_insns -= 2 * n_matching;
586 }
587 }
588 }
589
590 if (n_insns > max)
591 return FALSE;
592
593 /* Map test_expr/test_jump into the appropriate MD tests to use on
594 the conditionally executed code. */
595
596 true_expr = test_expr;
597
598 false_code = reversed_comparison_code (true_expr, BB_END (test_bb));
599 if (false_code != UNKNOWN)
600 false_expr = gen_rtx_fmt_ee (false_code, GET_MODE (true_expr),
601 XEXP (true_expr, 0), XEXP (true_expr, 1));
602 else
603 false_expr = NULL_RTX;
604
605 #ifdef IFCVT_MODIFY_TESTS
606 /* If the machine description needs to modify the tests, such as setting a
607 conditional execution register from a comparison, it can do so here. */
608 IFCVT_MODIFY_TESTS (ce_info, true_expr, false_expr);
609
610 /* See if the conversion failed. */
611 if (!true_expr || !false_expr)
612 goto fail;
613 #endif
614
615 note = find_reg_note (BB_END (test_bb), REG_BR_PROB, NULL_RTX);
616 if (note)
617 {
618 true_prob_val = XINT (note, 0);
619 false_prob_val = REG_BR_PROB_BASE - true_prob_val;
620 }
621 else
622 {
623 true_prob_val = -1;
624 false_prob_val = -1;
625 }
626
627 /* If we have && or || tests, do them here. These tests are in the adjacent
628 blocks after the first block containing the test. */
629 if (ce_info->num_multiple_test_blocks > 0)
630 {
631 basic_block bb = test_bb;
632 basic_block last_test_bb = ce_info->last_test_bb;
633
634 if (! false_expr)
635 goto fail;
636
637 do
638 {
639 rtx_insn *start, *end;
640 rtx t, f;
641 enum rtx_code f_code;
642
643 bb = block_fallthru (bb);
644 start = first_active_insn (bb);
645 end = last_active_insn (bb, TRUE);
646 if (start
647 && ! cond_exec_process_insns (ce_info, start, end, false_expr,
648 false_prob_val, FALSE))
649 goto fail;
650
651 /* If the conditional jump is more than just a conditional jump, then
652 we can not do conditional execution conversion on this block. */
653 if (! onlyjump_p (BB_END (bb)))
654 goto fail;
655
656 /* Find the conditional jump and isolate the test. */
657 t = cond_exec_get_condition (BB_END (bb));
658 if (! t)
659 goto fail;
660
661 f_code = reversed_comparison_code (t, BB_END (bb));
662 if (f_code == UNKNOWN)
663 goto fail;
664
665 f = gen_rtx_fmt_ee (f_code, GET_MODE (t), XEXP (t, 0), XEXP (t, 1));
666 if (ce_info->and_and_p)
667 {
668 t = gen_rtx_AND (GET_MODE (t), true_expr, t);
669 f = gen_rtx_IOR (GET_MODE (t), false_expr, f);
670 }
671 else
672 {
673 t = gen_rtx_IOR (GET_MODE (t), true_expr, t);
674 f = gen_rtx_AND (GET_MODE (t), false_expr, f);
675 }
676
677 /* If the machine description needs to modify the tests, such as
678 setting a conditional execution register from a comparison, it can
679 do so here. */
680 #ifdef IFCVT_MODIFY_MULTIPLE_TESTS
681 IFCVT_MODIFY_MULTIPLE_TESTS (ce_info, bb, t, f);
682
683 /* See if the conversion failed. */
684 if (!t || !f)
685 goto fail;
686 #endif
687
688 true_expr = t;
689 false_expr = f;
690 }
691 while (bb != last_test_bb);
692 }
693
694 /* For IF-THEN-ELSE blocks, we don't allow modifications of the test
695 on then THEN block. */
696 then_mod_ok = (else_bb == NULL_BLOCK);
697
698 /* Go through the THEN and ELSE blocks converting the insns if possible
699 to conditional execution. */
700
701 if (then_end
702 && (! false_expr
703 || ! cond_exec_process_insns (ce_info, then_start, then_end,
704 false_expr, false_prob_val,
705 then_mod_ok)))
706 goto fail;
707
708 if (else_bb && else_end
709 && ! cond_exec_process_insns (ce_info, else_start, else_end,
710 true_expr, true_prob_val, TRUE))
711 goto fail;
712
713 /* If we cannot apply the changes, fail. Do not go through the normal fail
714 processing, since apply_change_group will call cancel_changes. */
715 if (! apply_change_group ())
716 {
717 #ifdef IFCVT_MODIFY_CANCEL
718 /* Cancel any machine dependent changes. */
719 IFCVT_MODIFY_CANCEL (ce_info);
720 #endif
721 return FALSE;
722 }
723
724 #ifdef IFCVT_MODIFY_FINAL
725 /* Do any machine dependent final modifications. */
726 IFCVT_MODIFY_FINAL (ce_info);
727 #endif
728
729 /* Conversion succeeded. */
730 if (dump_file)
731 fprintf (dump_file, "%d insn%s converted to conditional execution.\n",
732 n_insns, (n_insns == 1) ? " was" : "s were");
733
734 /* Merge the blocks! If we had matching sequences, make sure to delete one
735 copy at the appropriate location first: delete the copy in the THEN branch
736 for a tail sequence so that the remaining one is executed last for both
737 branches, and delete the copy in the ELSE branch for a head sequence so
738 that the remaining one is executed first for both branches. */
739 if (then_first_tail)
740 {
741 rtx_insn *from = then_first_tail;
742 if (!INSN_P (from))
743 from = find_active_insn_after (then_bb, from);
744 delete_insn_chain (from, get_last_bb_insn (then_bb), false);
745 }
746 if (else_last_head)
747 delete_insn_chain (first_active_insn (else_bb), else_last_head, false);
748
749 merge_if_block (ce_info);
750 cond_exec_changed_p = TRUE;
751 return TRUE;
752
753 fail:
754 #ifdef IFCVT_MODIFY_CANCEL
755 /* Cancel any machine dependent changes. */
756 IFCVT_MODIFY_CANCEL (ce_info);
757 #endif
758
759 cancel_changes (0);
760 return FALSE;
761 }
762 \f
763 /* Used by noce_process_if_block to communicate with its subroutines.
764
765 The subroutines know that A and B may be evaluated freely. They
766 know that X is a register. They should insert new instructions
767 before cond_earliest. */
768
769 struct noce_if_info
770 {
771 /* The basic blocks that make up the IF-THEN-{ELSE-,}JOIN block. */
772 basic_block test_bb, then_bb, else_bb, join_bb;
773
774 /* The jump that ends TEST_BB. */
775 rtx_insn *jump;
776
777 /* The jump condition. */
778 rtx cond;
779
780 /* New insns should be inserted before this one. */
781 rtx_insn *cond_earliest;
782
783 /* Insns in the THEN and ELSE block. There is always just this
784 one insns in those blocks. The insns are single_set insns.
785 If there was no ELSE block, INSN_B is the last insn before
786 COND_EARLIEST, or NULL_RTX. In the former case, the insn
787 operands are still valid, as if INSN_B was moved down below
788 the jump. */
789 rtx_insn *insn_a, *insn_b;
790
791 /* The SET_SRC of INSN_A and INSN_B. */
792 rtx a, b;
793
794 /* The SET_DEST of INSN_A. */
795 rtx x;
796
797 /* The original set destination that the THEN and ELSE basic blocks finally
798 write their result to. */
799 rtx orig_x;
800 /* True if this if block is not canonical. In the canonical form of
801 if blocks, the THEN_BB is the block reached via the fallthru edge
802 from TEST_BB. For the noce transformations, we allow the symmetric
803 form as well. */
804 bool then_else_reversed;
805
806 /* True if the contents of then_bb and else_bb are a
807 simple single set instruction. */
808 bool then_simple;
809 bool else_simple;
810
811 /* True if we're optimisizing the control block for speed, false if
812 we're optimizing for size. */
813 bool speed_p;
814
815 /* An estimate of the original costs. When optimizing for size, this is the
816 combined cost of COND, JUMP and the costs for THEN_BB and ELSE_BB.
817 When optimizing for speed, we use the costs of COND plus the minimum of
818 the costs for THEN_BB and ELSE_BB, as computed in the next field. */
819 unsigned int original_cost;
820
821 /* Maximum permissible cost for the unconditional sequence we should
822 generate to replace this branch. */
823 unsigned int max_seq_cost;
824
825 /* The name of the noce transform that succeeded in if-converting
826 this structure. Used for debugging. */
827 const char *transform_name;
828 };
829
830 static rtx noce_emit_store_flag (struct noce_if_info *, rtx, int, int);
831 static int noce_try_move (struct noce_if_info *);
832 static int noce_try_ifelse_collapse (struct noce_if_info *);
833 static int noce_try_store_flag (struct noce_if_info *);
834 static int noce_try_addcc (struct noce_if_info *);
835 static int noce_try_store_flag_constants (struct noce_if_info *);
836 static int noce_try_store_flag_mask (struct noce_if_info *);
837 static rtx noce_emit_cmove (struct noce_if_info *, rtx, enum rtx_code, rtx,
838 rtx, rtx, rtx);
839 static int noce_try_cmove (struct noce_if_info *);
840 static int noce_try_cmove_arith (struct noce_if_info *);
841 static rtx noce_get_alt_condition (struct noce_if_info *, rtx, rtx_insn **);
842 static int noce_try_minmax (struct noce_if_info *);
843 static int noce_try_abs (struct noce_if_info *);
844 static int noce_try_sign_mask (struct noce_if_info *);
845
846 /* Return TRUE if SEQ is a good candidate as a replacement for the
847 if-convertible sequence described in IF_INFO. */
848
849 inline static bool
850 noce_conversion_profitable_p (rtx_insn *seq, struct noce_if_info *if_info)
851 {
852 bool speed_p = if_info->speed_p;
853
854 /* Cost up the new sequence. */
855 unsigned int cost = seq_cost (seq, speed_p);
856
857 if (cost <= if_info->original_cost)
858 return true;
859
860 /* When compiling for size, we can make a reasonably accurately guess
861 at the size growth. When compiling for speed, use the maximum. */
862 return speed_p && cost <= if_info->max_seq_cost;
863 }
864
865 /* Helper function for noce_try_store_flag*. */
866
867 static rtx
868 noce_emit_store_flag (struct noce_if_info *if_info, rtx x, int reversep,
869 int normalize)
870 {
871 rtx cond = if_info->cond;
872 int cond_complex;
873 enum rtx_code code;
874
875 cond_complex = (! general_operand (XEXP (cond, 0), VOIDmode)
876 || ! general_operand (XEXP (cond, 1), VOIDmode));
877
878 /* If earliest == jump, or when the condition is complex, try to
879 build the store_flag insn directly. */
880
881 if (cond_complex)
882 {
883 rtx set = pc_set (if_info->jump);
884 cond = XEXP (SET_SRC (set), 0);
885 if (GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
886 && label_ref_label (XEXP (SET_SRC (set), 2)) == JUMP_LABEL (if_info->jump))
887 reversep = !reversep;
888 if (if_info->then_else_reversed)
889 reversep = !reversep;
890 }
891
892 if (reversep)
893 code = reversed_comparison_code (cond, if_info->jump);
894 else
895 code = GET_CODE (cond);
896
897 if ((if_info->cond_earliest == if_info->jump || cond_complex)
898 && (normalize == 0 || STORE_FLAG_VALUE == normalize))
899 {
900 rtx src = gen_rtx_fmt_ee (code, GET_MODE (x), XEXP (cond, 0),
901 XEXP (cond, 1));
902 rtx set = gen_rtx_SET (x, src);
903
904 start_sequence ();
905 rtx_insn *insn = emit_insn (set);
906
907 if (recog_memoized (insn) >= 0)
908 {
909 rtx_insn *seq = get_insns ();
910 end_sequence ();
911 emit_insn (seq);
912
913 if_info->cond_earliest = if_info->jump;
914
915 return x;
916 }
917
918 end_sequence ();
919 }
920
921 /* Don't even try if the comparison operands or the mode of X are weird. */
922 if (cond_complex || !SCALAR_INT_MODE_P (GET_MODE (x)))
923 return NULL_RTX;
924
925 return emit_store_flag (x, code, XEXP (cond, 0),
926 XEXP (cond, 1), VOIDmode,
927 (code == LTU || code == LEU
928 || code == GEU || code == GTU), normalize);
929 }
930
931 /* Emit instruction to move an rtx, possibly into STRICT_LOW_PART.
932 X is the destination/target and Y is the value to copy. */
933
934 static void
935 noce_emit_move_insn (rtx x, rtx y)
936 {
937 machine_mode outmode;
938 rtx outer, inner;
939 int bitpos;
940
941 if (GET_CODE (x) != STRICT_LOW_PART)
942 {
943 rtx_insn *seq, *insn;
944 rtx target;
945 optab ot;
946
947 start_sequence ();
948 /* Check that the SET_SRC is reasonable before calling emit_move_insn,
949 otherwise construct a suitable SET pattern ourselves. */
950 insn = (OBJECT_P (y) || CONSTANT_P (y) || GET_CODE (y) == SUBREG)
951 ? emit_move_insn (x, y)
952 : emit_insn (gen_rtx_SET (x, y));
953 seq = get_insns ();
954 end_sequence ();
955
956 if (recog_memoized (insn) <= 0)
957 {
958 if (GET_CODE (x) == ZERO_EXTRACT)
959 {
960 rtx op = XEXP (x, 0);
961 unsigned HOST_WIDE_INT size = INTVAL (XEXP (x, 1));
962 unsigned HOST_WIDE_INT start = INTVAL (XEXP (x, 2));
963
964 /* store_bit_field expects START to be relative to
965 BYTES_BIG_ENDIAN and adjusts this value for machines with
966 BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN. In order to be able to
967 invoke store_bit_field again it is necessary to have the START
968 value from the first call. */
969 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
970 {
971 if (MEM_P (op))
972 start = BITS_PER_UNIT - start - size;
973 else
974 {
975 gcc_assert (REG_P (op));
976 start = BITS_PER_WORD - start - size;
977 }
978 }
979
980 gcc_assert (start < (MEM_P (op) ? BITS_PER_UNIT : BITS_PER_WORD));
981 store_bit_field (op, size, start, 0, 0, GET_MODE (x), y, false);
982 return;
983 }
984
985 switch (GET_RTX_CLASS (GET_CODE (y)))
986 {
987 case RTX_UNARY:
988 ot = code_to_optab (GET_CODE (y));
989 if (ot)
990 {
991 start_sequence ();
992 target = expand_unop (GET_MODE (y), ot, XEXP (y, 0), x, 0);
993 if (target != NULL_RTX)
994 {
995 if (target != x)
996 emit_move_insn (x, target);
997 seq = get_insns ();
998 }
999 end_sequence ();
1000 }
1001 break;
1002
1003 case RTX_BIN_ARITH:
1004 case RTX_COMM_ARITH:
1005 ot = code_to_optab (GET_CODE (y));
1006 if (ot)
1007 {
1008 start_sequence ();
1009 target = expand_binop (GET_MODE (y), ot,
1010 XEXP (y, 0), XEXP (y, 1),
1011 x, 0, OPTAB_DIRECT);
1012 if (target != NULL_RTX)
1013 {
1014 if (target != x)
1015 emit_move_insn (x, target);
1016 seq = get_insns ();
1017 }
1018 end_sequence ();
1019 }
1020 break;
1021
1022 default:
1023 break;
1024 }
1025 }
1026
1027 emit_insn (seq);
1028 return;
1029 }
1030
1031 outer = XEXP (x, 0);
1032 inner = XEXP (outer, 0);
1033 outmode = GET_MODE (outer);
1034 bitpos = SUBREG_BYTE (outer) * BITS_PER_UNIT;
1035 store_bit_field (inner, GET_MODE_BITSIZE (outmode), bitpos,
1036 0, 0, outmode, y, false);
1037 }
1038
1039 /* Return the CC reg if it is used in COND. */
1040
1041 static rtx
1042 cc_in_cond (rtx cond)
1043 {
1044 if (have_cbranchcc4 && cond
1045 && GET_MODE_CLASS (GET_MODE (XEXP (cond, 0))) == MODE_CC)
1046 return XEXP (cond, 0);
1047
1048 return NULL_RTX;
1049 }
1050
1051 /* Return sequence of instructions generated by if conversion. This
1052 function calls end_sequence() to end the current stream, ensures
1053 that the instructions are unshared, recognizable non-jump insns.
1054 On failure, this function returns a NULL_RTX. */
1055
1056 static rtx_insn *
1057 end_ifcvt_sequence (struct noce_if_info *if_info)
1058 {
1059 rtx_insn *insn;
1060 rtx_insn *seq = get_insns ();
1061 rtx cc = cc_in_cond (if_info->cond);
1062
1063 set_used_flags (if_info->x);
1064 set_used_flags (if_info->cond);
1065 set_used_flags (if_info->a);
1066 set_used_flags (if_info->b);
1067
1068 for (insn = seq; insn; insn = NEXT_INSN (insn))
1069 set_used_flags (insn);
1070
1071 unshare_all_rtl_in_chain (seq);
1072 end_sequence ();
1073
1074 /* Make sure that all of the instructions emitted are recognizable,
1075 and that we haven't introduced a new jump instruction.
1076 As an exercise for the reader, build a general mechanism that
1077 allows proper placement of required clobbers. */
1078 for (insn = seq; insn; insn = NEXT_INSN (insn))
1079 if (JUMP_P (insn)
1080 || recog_memoized (insn) == -1
1081 /* Make sure new generated code does not clobber CC. */
1082 || (cc && set_of (cc, insn)))
1083 return NULL;
1084
1085 return seq;
1086 }
1087
1088 /* Return true iff the then and else basic block (if it exists)
1089 consist of a single simple set instruction. */
1090
1091 static bool
1092 noce_simple_bbs (struct noce_if_info *if_info)
1093 {
1094 if (!if_info->then_simple)
1095 return false;
1096
1097 if (if_info->else_bb)
1098 return if_info->else_simple;
1099
1100 return true;
1101 }
1102
1103 /* Convert "if (a != b) x = a; else x = b" into "x = a" and
1104 "if (a == b) x = a; else x = b" into "x = b". */
1105
1106 static int
1107 noce_try_move (struct noce_if_info *if_info)
1108 {
1109 rtx cond = if_info->cond;
1110 enum rtx_code code = GET_CODE (cond);
1111 rtx y;
1112 rtx_insn *seq;
1113
1114 if (code != NE && code != EQ)
1115 return FALSE;
1116
1117 if (!noce_simple_bbs (if_info))
1118 return FALSE;
1119
1120 /* This optimization isn't valid if either A or B could be a NaN
1121 or a signed zero. */
1122 if (HONOR_NANS (if_info->x)
1123 || HONOR_SIGNED_ZEROS (if_info->x))
1124 return FALSE;
1125
1126 /* Check whether the operands of the comparison are A and in
1127 either order. */
1128 if ((rtx_equal_p (if_info->a, XEXP (cond, 0))
1129 && rtx_equal_p (if_info->b, XEXP (cond, 1)))
1130 || (rtx_equal_p (if_info->a, XEXP (cond, 1))
1131 && rtx_equal_p (if_info->b, XEXP (cond, 0))))
1132 {
1133 if (!rtx_interchangeable_p (if_info->a, if_info->b))
1134 return FALSE;
1135
1136 y = (code == EQ) ? if_info->a : if_info->b;
1137
1138 /* Avoid generating the move if the source is the destination. */
1139 if (! rtx_equal_p (if_info->x, y))
1140 {
1141 start_sequence ();
1142 noce_emit_move_insn (if_info->x, y);
1143 seq = end_ifcvt_sequence (if_info);
1144 if (!seq)
1145 return FALSE;
1146
1147 emit_insn_before_setloc (seq, if_info->jump,
1148 INSN_LOCATION (if_info->insn_a));
1149 }
1150 if_info->transform_name = "noce_try_move";
1151 return TRUE;
1152 }
1153 return FALSE;
1154 }
1155
1156 /* Try forming an IF_THEN_ELSE (cond, b, a) and collapsing that
1157 through simplify_rtx. Sometimes that can eliminate the IF_THEN_ELSE.
1158 If that is the case, emit the result into x. */
1159
1160 static int
1161 noce_try_ifelse_collapse (struct noce_if_info * if_info)
1162 {
1163 if (!noce_simple_bbs (if_info))
1164 return FALSE;
1165
1166 machine_mode mode = GET_MODE (if_info->x);
1167 rtx if_then_else = simplify_gen_ternary (IF_THEN_ELSE, mode, mode,
1168 if_info->cond, if_info->b,
1169 if_info->a);
1170
1171 if (GET_CODE (if_then_else) == IF_THEN_ELSE)
1172 return FALSE;
1173
1174 rtx_insn *seq;
1175 start_sequence ();
1176 noce_emit_move_insn (if_info->x, if_then_else);
1177 seq = end_ifcvt_sequence (if_info);
1178 if (!seq)
1179 return FALSE;
1180
1181 emit_insn_before_setloc (seq, if_info->jump,
1182 INSN_LOCATION (if_info->insn_a));
1183
1184 if_info->transform_name = "noce_try_ifelse_collapse";
1185 return TRUE;
1186 }
1187
1188
1189 /* Convert "if (test) x = 1; else x = 0".
1190
1191 Only try 0 and STORE_FLAG_VALUE here. Other combinations will be
1192 tried in noce_try_store_flag_constants after noce_try_cmove has had
1193 a go at the conversion. */
1194
1195 static int
1196 noce_try_store_flag (struct noce_if_info *if_info)
1197 {
1198 int reversep;
1199 rtx target;
1200 rtx_insn *seq;
1201
1202 if (!noce_simple_bbs (if_info))
1203 return FALSE;
1204
1205 if (CONST_INT_P (if_info->b)
1206 && INTVAL (if_info->b) == STORE_FLAG_VALUE
1207 && if_info->a == const0_rtx)
1208 reversep = 0;
1209 else if (if_info->b == const0_rtx
1210 && CONST_INT_P (if_info->a)
1211 && INTVAL (if_info->a) == STORE_FLAG_VALUE
1212 && (reversed_comparison_code (if_info->cond, if_info->jump)
1213 != UNKNOWN))
1214 reversep = 1;
1215 else
1216 return FALSE;
1217
1218 start_sequence ();
1219
1220 target = noce_emit_store_flag (if_info, if_info->x, reversep, 0);
1221 if (target)
1222 {
1223 if (target != if_info->x)
1224 noce_emit_move_insn (if_info->x, target);
1225
1226 seq = end_ifcvt_sequence (if_info);
1227 if (! seq)
1228 return FALSE;
1229
1230 emit_insn_before_setloc (seq, if_info->jump,
1231 INSN_LOCATION (if_info->insn_a));
1232 if_info->transform_name = "noce_try_store_flag";
1233 return TRUE;
1234 }
1235 else
1236 {
1237 end_sequence ();
1238 return FALSE;
1239 }
1240 }
1241
1242
1243 /* Convert "if (test) x = -A; else x = A" into
1244 x = A; if (test) x = -x if the machine can do the
1245 conditional negate form of this cheaply.
1246 Try this before noce_try_cmove that will just load the
1247 immediates into two registers and do a conditional select
1248 between them. If the target has a conditional negate or
1249 conditional invert operation we can save a potentially
1250 expensive constant synthesis. */
1251
1252 static bool
1253 noce_try_inverse_constants (struct noce_if_info *if_info)
1254 {
1255 if (!noce_simple_bbs (if_info))
1256 return false;
1257
1258 if (!CONST_INT_P (if_info->a)
1259 || !CONST_INT_P (if_info->b)
1260 || !REG_P (if_info->x))
1261 return false;
1262
1263 machine_mode mode = GET_MODE (if_info->x);
1264
1265 HOST_WIDE_INT val_a = INTVAL (if_info->a);
1266 HOST_WIDE_INT val_b = INTVAL (if_info->b);
1267
1268 rtx cond = if_info->cond;
1269
1270 rtx x = if_info->x;
1271 rtx target;
1272
1273 start_sequence ();
1274
1275 rtx_code code;
1276 if (val_b != HOST_WIDE_INT_MIN && val_a == -val_b)
1277 code = NEG;
1278 else if (val_a == ~val_b)
1279 code = NOT;
1280 else
1281 {
1282 end_sequence ();
1283 return false;
1284 }
1285
1286 rtx tmp = gen_reg_rtx (mode);
1287 noce_emit_move_insn (tmp, if_info->a);
1288
1289 target = emit_conditional_neg_or_complement (x, code, mode, cond, tmp, tmp);
1290
1291 if (target)
1292 {
1293 rtx_insn *seq = get_insns ();
1294
1295 if (!seq)
1296 {
1297 end_sequence ();
1298 return false;
1299 }
1300
1301 if (target != if_info->x)
1302 noce_emit_move_insn (if_info->x, target);
1303
1304 seq = end_ifcvt_sequence (if_info);
1305
1306 if (!seq)
1307 return false;
1308
1309 emit_insn_before_setloc (seq, if_info->jump,
1310 INSN_LOCATION (if_info->insn_a));
1311 if_info->transform_name = "noce_try_inverse_constants";
1312 return true;
1313 }
1314
1315 end_sequence ();
1316 return false;
1317 }
1318
1319
1320 /* Convert "if (test) x = a; else x = b", for A and B constant.
1321 Also allow A = y + c1, B = y + c2, with a common y between A
1322 and B. */
1323
1324 static int
1325 noce_try_store_flag_constants (struct noce_if_info *if_info)
1326 {
1327 rtx target;
1328 rtx_insn *seq;
1329 bool reversep;
1330 HOST_WIDE_INT itrue, ifalse, diff, tmp;
1331 int normalize;
1332 bool can_reverse;
1333 machine_mode mode = GET_MODE (if_info->x);;
1334 rtx common = NULL_RTX;
1335
1336 rtx a = if_info->a;
1337 rtx b = if_info->b;
1338
1339 /* Handle cases like x := test ? y + 3 : y + 4. */
1340 if (GET_CODE (a) == PLUS
1341 && GET_CODE (b) == PLUS
1342 && CONST_INT_P (XEXP (a, 1))
1343 && CONST_INT_P (XEXP (b, 1))
1344 && rtx_equal_p (XEXP (a, 0), XEXP (b, 0))
1345 /* Allow expressions that are not using the result or plain
1346 registers where we handle overlap below. */
1347 && (REG_P (XEXP (a, 0))
1348 || (noce_operand_ok (XEXP (a, 0))
1349 && ! reg_overlap_mentioned_p (if_info->x, XEXP (a, 0)))))
1350 {
1351 common = XEXP (a, 0);
1352 a = XEXP (a, 1);
1353 b = XEXP (b, 1);
1354 }
1355
1356 if (!noce_simple_bbs (if_info))
1357 return FALSE;
1358
1359 if (CONST_INT_P (a)
1360 && CONST_INT_P (b))
1361 {
1362 ifalse = INTVAL (a);
1363 itrue = INTVAL (b);
1364 bool subtract_flag_p = false;
1365
1366 diff = (unsigned HOST_WIDE_INT) itrue - ifalse;
1367 /* Make sure we can represent the difference between the two values. */
1368 if ((diff > 0)
1369 != ((ifalse < 0) != (itrue < 0) ? ifalse < 0 : ifalse < itrue))
1370 return FALSE;
1371
1372 diff = trunc_int_for_mode (diff, mode);
1373
1374 can_reverse = (reversed_comparison_code (if_info->cond, if_info->jump)
1375 != UNKNOWN);
1376
1377 reversep = false;
1378 if (diff == STORE_FLAG_VALUE || diff == -STORE_FLAG_VALUE)
1379 {
1380 normalize = 0;
1381 /* We could collapse these cases but it is easier to follow the
1382 diff/STORE_FLAG_VALUE combinations when they are listed
1383 explicitly. */
1384
1385 /* test ? 3 : 4
1386 => 4 + (test != 0). */
1387 if (diff < 0 && STORE_FLAG_VALUE < 0)
1388 reversep = false;
1389 /* test ? 4 : 3
1390 => can_reverse | 4 + (test == 0)
1391 !can_reverse | 3 - (test != 0). */
1392 else if (diff > 0 && STORE_FLAG_VALUE < 0)
1393 {
1394 reversep = can_reverse;
1395 subtract_flag_p = !can_reverse;
1396 /* If we need to subtract the flag and we have PLUS-immediate
1397 A and B then it is unlikely to be beneficial to play tricks
1398 here. */
1399 if (subtract_flag_p && common)
1400 return FALSE;
1401 }
1402 /* test ? 3 : 4
1403 => can_reverse | 3 + (test == 0)
1404 !can_reverse | 4 - (test != 0). */
1405 else if (diff < 0 && STORE_FLAG_VALUE > 0)
1406 {
1407 reversep = can_reverse;
1408 subtract_flag_p = !can_reverse;
1409 /* If we need to subtract the flag and we have PLUS-immediate
1410 A and B then it is unlikely to be beneficial to play tricks
1411 here. */
1412 if (subtract_flag_p && common)
1413 return FALSE;
1414 }
1415 /* test ? 4 : 3
1416 => 4 + (test != 0). */
1417 else if (diff > 0 && STORE_FLAG_VALUE > 0)
1418 reversep = false;
1419 else
1420 gcc_unreachable ();
1421 }
1422 /* Is this (cond) ? 2^n : 0? */
1423 else if (ifalse == 0 && pow2p_hwi (itrue)
1424 && STORE_FLAG_VALUE == 1)
1425 normalize = 1;
1426 /* Is this (cond) ? 0 : 2^n? */
1427 else if (itrue == 0 && pow2p_hwi (ifalse) && can_reverse
1428 && STORE_FLAG_VALUE == 1)
1429 {
1430 normalize = 1;
1431 reversep = true;
1432 }
1433 /* Is this (cond) ? -1 : x? */
1434 else if (itrue == -1
1435 && STORE_FLAG_VALUE == -1)
1436 normalize = -1;
1437 /* Is this (cond) ? x : -1? */
1438 else if (ifalse == -1 && can_reverse
1439 && STORE_FLAG_VALUE == -1)
1440 {
1441 normalize = -1;
1442 reversep = true;
1443 }
1444 else
1445 return FALSE;
1446
1447 if (reversep)
1448 {
1449 std::swap (itrue, ifalse);
1450 diff = trunc_int_for_mode (-(unsigned HOST_WIDE_INT) diff, mode);
1451 }
1452
1453 start_sequence ();
1454
1455 /* If we have x := test ? x + 3 : x + 4 then move the original
1456 x out of the way while we store flags. */
1457 if (common && rtx_equal_p (common, if_info->x))
1458 {
1459 common = gen_reg_rtx (mode);
1460 noce_emit_move_insn (common, if_info->x);
1461 }
1462
1463 target = noce_emit_store_flag (if_info, if_info->x, reversep, normalize);
1464 if (! target)
1465 {
1466 end_sequence ();
1467 return FALSE;
1468 }
1469
1470 /* if (test) x = 3; else x = 4;
1471 => x = 3 + (test == 0); */
1472 if (diff == STORE_FLAG_VALUE || diff == -STORE_FLAG_VALUE)
1473 {
1474 /* Add the common part now. This may allow combine to merge this
1475 with the store flag operation earlier into some sort of conditional
1476 increment/decrement if the target allows it. */
1477 if (common)
1478 target = expand_simple_binop (mode, PLUS,
1479 target, common,
1480 target, 0, OPTAB_WIDEN);
1481
1482 /* Always use ifalse here. It should have been swapped with itrue
1483 when appropriate when reversep is true. */
1484 target = expand_simple_binop (mode, subtract_flag_p ? MINUS : PLUS,
1485 gen_int_mode (ifalse, mode), target,
1486 if_info->x, 0, OPTAB_WIDEN);
1487 }
1488 /* Other cases are not beneficial when the original A and B are PLUS
1489 expressions. */
1490 else if (common)
1491 {
1492 end_sequence ();
1493 return FALSE;
1494 }
1495 /* if (test) x = 8; else x = 0;
1496 => x = (test != 0) << 3; */
1497 else if (ifalse == 0 && (tmp = exact_log2 (itrue)) >= 0)
1498 {
1499 target = expand_simple_binop (mode, ASHIFT,
1500 target, GEN_INT (tmp), if_info->x, 0,
1501 OPTAB_WIDEN);
1502 }
1503
1504 /* if (test) x = -1; else x = b;
1505 => x = -(test != 0) | b; */
1506 else if (itrue == -1)
1507 {
1508 target = expand_simple_binop (mode, IOR,
1509 target, gen_int_mode (ifalse, mode),
1510 if_info->x, 0, OPTAB_WIDEN);
1511 }
1512 else
1513 {
1514 end_sequence ();
1515 return FALSE;
1516 }
1517
1518 if (! target)
1519 {
1520 end_sequence ();
1521 return FALSE;
1522 }
1523
1524 if (target != if_info->x)
1525 noce_emit_move_insn (if_info->x, target);
1526
1527 seq = end_ifcvt_sequence (if_info);
1528 if (!seq || !noce_conversion_profitable_p (seq, if_info))
1529 return FALSE;
1530
1531 emit_insn_before_setloc (seq, if_info->jump,
1532 INSN_LOCATION (if_info->insn_a));
1533 if_info->transform_name = "noce_try_store_flag_constants";
1534
1535 return TRUE;
1536 }
1537
1538 return FALSE;
1539 }
1540
1541 /* Convert "if (test) foo++" into "foo += (test != 0)", and
1542 similarly for "foo--". */
1543
1544 static int
1545 noce_try_addcc (struct noce_if_info *if_info)
1546 {
1547 rtx target;
1548 rtx_insn *seq;
1549 int subtract, normalize;
1550
1551 if (!noce_simple_bbs (if_info))
1552 return FALSE;
1553
1554 if (GET_CODE (if_info->a) == PLUS
1555 && rtx_equal_p (XEXP (if_info->a, 0), if_info->b)
1556 && (reversed_comparison_code (if_info->cond, if_info->jump)
1557 != UNKNOWN))
1558 {
1559 rtx cond = if_info->cond;
1560 enum rtx_code code = reversed_comparison_code (cond, if_info->jump);
1561
1562 /* First try to use addcc pattern. */
1563 if (general_operand (XEXP (cond, 0), VOIDmode)
1564 && general_operand (XEXP (cond, 1), VOIDmode))
1565 {
1566 start_sequence ();
1567 target = emit_conditional_add (if_info->x, code,
1568 XEXP (cond, 0),
1569 XEXP (cond, 1),
1570 VOIDmode,
1571 if_info->b,
1572 XEXP (if_info->a, 1),
1573 GET_MODE (if_info->x),
1574 (code == LTU || code == GEU
1575 || code == LEU || code == GTU));
1576 if (target)
1577 {
1578 if (target != if_info->x)
1579 noce_emit_move_insn (if_info->x, target);
1580
1581 seq = end_ifcvt_sequence (if_info);
1582 if (!seq || !noce_conversion_profitable_p (seq, if_info))
1583 return FALSE;
1584
1585 emit_insn_before_setloc (seq, if_info->jump,
1586 INSN_LOCATION (if_info->insn_a));
1587 if_info->transform_name = "noce_try_addcc";
1588
1589 return TRUE;
1590 }
1591 end_sequence ();
1592 }
1593
1594 /* If that fails, construct conditional increment or decrement using
1595 setcc. We're changing a branch and an increment to a comparison and
1596 an ADD/SUB. */
1597 if (XEXP (if_info->a, 1) == const1_rtx
1598 || XEXP (if_info->a, 1) == constm1_rtx)
1599 {
1600 start_sequence ();
1601 if (STORE_FLAG_VALUE == INTVAL (XEXP (if_info->a, 1)))
1602 subtract = 0, normalize = 0;
1603 else if (-STORE_FLAG_VALUE == INTVAL (XEXP (if_info->a, 1)))
1604 subtract = 1, normalize = 0;
1605 else
1606 subtract = 0, normalize = INTVAL (XEXP (if_info->a, 1));
1607
1608
1609 target = noce_emit_store_flag (if_info,
1610 gen_reg_rtx (GET_MODE (if_info->x)),
1611 1, normalize);
1612
1613 if (target)
1614 target = expand_simple_binop (GET_MODE (if_info->x),
1615 subtract ? MINUS : PLUS,
1616 if_info->b, target, if_info->x,
1617 0, OPTAB_WIDEN);
1618 if (target)
1619 {
1620 if (target != if_info->x)
1621 noce_emit_move_insn (if_info->x, target);
1622
1623 seq = end_ifcvt_sequence (if_info);
1624 if (!seq || !noce_conversion_profitable_p (seq, if_info))
1625 return FALSE;
1626
1627 emit_insn_before_setloc (seq, if_info->jump,
1628 INSN_LOCATION (if_info->insn_a));
1629 if_info->transform_name = "noce_try_addcc";
1630 return TRUE;
1631 }
1632 end_sequence ();
1633 }
1634 }
1635
1636 return FALSE;
1637 }
1638
1639 /* Convert "if (test) x = 0;" to "x &= -(test == 0);" */
1640
1641 static int
1642 noce_try_store_flag_mask (struct noce_if_info *if_info)
1643 {
1644 rtx target;
1645 rtx_insn *seq;
1646 int reversep;
1647
1648 if (!noce_simple_bbs (if_info))
1649 return FALSE;
1650
1651 reversep = 0;
1652
1653 if ((if_info->a == const0_rtx
1654 && rtx_equal_p (if_info->b, if_info->x))
1655 || ((reversep = (reversed_comparison_code (if_info->cond,
1656 if_info->jump)
1657 != UNKNOWN))
1658 && if_info->b == const0_rtx
1659 && rtx_equal_p (if_info->a, if_info->x)))
1660 {
1661 start_sequence ();
1662 target = noce_emit_store_flag (if_info,
1663 gen_reg_rtx (GET_MODE (if_info->x)),
1664 reversep, -1);
1665 if (target)
1666 target = expand_simple_binop (GET_MODE (if_info->x), AND,
1667 if_info->x,
1668 target, if_info->x, 0,
1669 OPTAB_WIDEN);
1670
1671 if (target)
1672 {
1673 if (target != if_info->x)
1674 noce_emit_move_insn (if_info->x, target);
1675
1676 seq = end_ifcvt_sequence (if_info);
1677 if (!seq || !noce_conversion_profitable_p (seq, if_info))
1678 return FALSE;
1679
1680 emit_insn_before_setloc (seq, if_info->jump,
1681 INSN_LOCATION (if_info->insn_a));
1682 if_info->transform_name = "noce_try_store_flag_mask";
1683
1684 return TRUE;
1685 }
1686
1687 end_sequence ();
1688 }
1689
1690 return FALSE;
1691 }
1692
1693 /* Helper function for noce_try_cmove and noce_try_cmove_arith. */
1694
1695 static rtx
1696 noce_emit_cmove (struct noce_if_info *if_info, rtx x, enum rtx_code code,
1697 rtx cmp_a, rtx cmp_b, rtx vfalse, rtx vtrue)
1698 {
1699 rtx target ATTRIBUTE_UNUSED;
1700 int unsignedp ATTRIBUTE_UNUSED;
1701
1702 /* If earliest == jump, try to build the cmove insn directly.
1703 This is helpful when combine has created some complex condition
1704 (like for alpha's cmovlbs) that we can't hope to regenerate
1705 through the normal interface. */
1706
1707 if (if_info->cond_earliest == if_info->jump)
1708 {
1709 rtx cond = gen_rtx_fmt_ee (code, GET_MODE (if_info->cond), cmp_a, cmp_b);
1710 rtx if_then_else = gen_rtx_IF_THEN_ELSE (GET_MODE (x),
1711 cond, vtrue, vfalse);
1712 rtx set = gen_rtx_SET (x, if_then_else);
1713
1714 start_sequence ();
1715 rtx_insn *insn = emit_insn (set);
1716
1717 if (recog_memoized (insn) >= 0)
1718 {
1719 rtx_insn *seq = get_insns ();
1720 end_sequence ();
1721 emit_insn (seq);
1722
1723 return x;
1724 }
1725
1726 end_sequence ();
1727 }
1728
1729 /* Don't even try if the comparison operands are weird
1730 except that the target supports cbranchcc4. */
1731 if (! general_operand (cmp_a, GET_MODE (cmp_a))
1732 || ! general_operand (cmp_b, GET_MODE (cmp_b)))
1733 {
1734 if (!have_cbranchcc4
1735 || GET_MODE_CLASS (GET_MODE (cmp_a)) != MODE_CC
1736 || cmp_b != const0_rtx)
1737 return NULL_RTX;
1738 }
1739
1740 unsignedp = (code == LTU || code == GEU
1741 || code == LEU || code == GTU);
1742
1743 target = emit_conditional_move (x, code, cmp_a, cmp_b, VOIDmode,
1744 vtrue, vfalse, GET_MODE (x),
1745 unsignedp);
1746 if (target)
1747 return target;
1748
1749 /* We might be faced with a situation like:
1750
1751 x = (reg:M TARGET)
1752 vtrue = (subreg:M (reg:N VTRUE) BYTE)
1753 vfalse = (subreg:M (reg:N VFALSE) BYTE)
1754
1755 We can't do a conditional move in mode M, but it's possible that we
1756 could do a conditional move in mode N instead and take a subreg of
1757 the result.
1758
1759 If we can't create new pseudos, though, don't bother. */
1760 if (reload_completed)
1761 return NULL_RTX;
1762
1763 if (GET_CODE (vtrue) == SUBREG && GET_CODE (vfalse) == SUBREG)
1764 {
1765 rtx reg_vtrue = SUBREG_REG (vtrue);
1766 rtx reg_vfalse = SUBREG_REG (vfalse);
1767 unsigned int byte_vtrue = SUBREG_BYTE (vtrue);
1768 unsigned int byte_vfalse = SUBREG_BYTE (vfalse);
1769 rtx promoted_target;
1770
1771 if (GET_MODE (reg_vtrue) != GET_MODE (reg_vfalse)
1772 || byte_vtrue != byte_vfalse
1773 || (SUBREG_PROMOTED_VAR_P (vtrue)
1774 != SUBREG_PROMOTED_VAR_P (vfalse))
1775 || (SUBREG_PROMOTED_GET (vtrue)
1776 != SUBREG_PROMOTED_GET (vfalse)))
1777 return NULL_RTX;
1778
1779 promoted_target = gen_reg_rtx (GET_MODE (reg_vtrue));
1780
1781 target = emit_conditional_move (promoted_target, code, cmp_a, cmp_b,
1782 VOIDmode, reg_vtrue, reg_vfalse,
1783 GET_MODE (reg_vtrue), unsignedp);
1784 /* Nope, couldn't do it in that mode either. */
1785 if (!target)
1786 return NULL_RTX;
1787
1788 target = gen_rtx_SUBREG (GET_MODE (vtrue), promoted_target, byte_vtrue);
1789 SUBREG_PROMOTED_VAR_P (target) = SUBREG_PROMOTED_VAR_P (vtrue);
1790 SUBREG_PROMOTED_SET (target, SUBREG_PROMOTED_GET (vtrue));
1791 emit_move_insn (x, target);
1792 return x;
1793 }
1794 else
1795 return NULL_RTX;
1796 }
1797
1798 /* Try only simple constants and registers here. More complex cases
1799 are handled in noce_try_cmove_arith after noce_try_store_flag_arith
1800 has had a go at it. */
1801
1802 static int
1803 noce_try_cmove (struct noce_if_info *if_info)
1804 {
1805 enum rtx_code code;
1806 rtx target;
1807 rtx_insn *seq;
1808
1809 if (!noce_simple_bbs (if_info))
1810 return FALSE;
1811
1812 if ((CONSTANT_P (if_info->a) || register_operand (if_info->a, VOIDmode))
1813 && (CONSTANT_P (if_info->b) || register_operand (if_info->b, VOIDmode)))
1814 {
1815 start_sequence ();
1816
1817 code = GET_CODE (if_info->cond);
1818 target = noce_emit_cmove (if_info, if_info->x, code,
1819 XEXP (if_info->cond, 0),
1820 XEXP (if_info->cond, 1),
1821 if_info->a, if_info->b);
1822
1823 if (target)
1824 {
1825 if (target != if_info->x)
1826 noce_emit_move_insn (if_info->x, target);
1827
1828 seq = end_ifcvt_sequence (if_info);
1829 if (!seq)
1830 return FALSE;
1831
1832 emit_insn_before_setloc (seq, if_info->jump,
1833 INSN_LOCATION (if_info->insn_a));
1834 if_info->transform_name = "noce_try_cmove";
1835
1836 return TRUE;
1837 }
1838 /* If both a and b are constants try a last-ditch transformation:
1839 if (test) x = a; else x = b;
1840 => x = (-(test != 0) & (b - a)) + a;
1841 Try this only if the target-specific expansion above has failed.
1842 The target-specific expander may want to generate sequences that
1843 we don't know about, so give them a chance before trying this
1844 approach. */
1845 else if (!targetm.have_conditional_execution ()
1846 && CONST_INT_P (if_info->a) && CONST_INT_P (if_info->b))
1847 {
1848 machine_mode mode = GET_MODE (if_info->x);
1849 HOST_WIDE_INT ifalse = INTVAL (if_info->a);
1850 HOST_WIDE_INT itrue = INTVAL (if_info->b);
1851 rtx target = noce_emit_store_flag (if_info, if_info->x, false, -1);
1852 if (!target)
1853 {
1854 end_sequence ();
1855 return FALSE;
1856 }
1857
1858 HOST_WIDE_INT diff = (unsigned HOST_WIDE_INT) itrue - ifalse;
1859 /* Make sure we can represent the difference
1860 between the two values. */
1861 if ((diff > 0)
1862 != ((ifalse < 0) != (itrue < 0) ? ifalse < 0 : ifalse < itrue))
1863 {
1864 end_sequence ();
1865 return FALSE;
1866 }
1867
1868 diff = trunc_int_for_mode (diff, mode);
1869 target = expand_simple_binop (mode, AND,
1870 target, gen_int_mode (diff, mode),
1871 if_info->x, 0, OPTAB_WIDEN);
1872 if (target)
1873 target = expand_simple_binop (mode, PLUS,
1874 target, gen_int_mode (ifalse, mode),
1875 if_info->x, 0, OPTAB_WIDEN);
1876 if (target)
1877 {
1878 if (target != if_info->x)
1879 noce_emit_move_insn (if_info->x, target);
1880
1881 seq = end_ifcvt_sequence (if_info);
1882 if (!seq || !noce_conversion_profitable_p (seq, if_info))
1883 return FALSE;
1884
1885 emit_insn_before_setloc (seq, if_info->jump,
1886 INSN_LOCATION (if_info->insn_a));
1887 if_info->transform_name = "noce_try_cmove";
1888 return TRUE;
1889 }
1890 else
1891 {
1892 end_sequence ();
1893 return FALSE;
1894 }
1895 }
1896 else
1897 end_sequence ();
1898 }
1899
1900 return FALSE;
1901 }
1902
1903 /* Return true if X contains a conditional code mode rtx. */
1904
1905 static bool
1906 contains_ccmode_rtx_p (rtx x)
1907 {
1908 subrtx_iterator::array_type array;
1909 FOR_EACH_SUBRTX (iter, array, x, ALL)
1910 if (GET_MODE_CLASS (GET_MODE (*iter)) == MODE_CC)
1911 return true;
1912
1913 return false;
1914 }
1915
1916 /* Helper for bb_valid_for_noce_process_p. Validate that
1917 the rtx insn INSN is a single set that does not set
1918 the conditional register CC and is in general valid for
1919 if-conversion. */
1920
1921 static bool
1922 insn_valid_noce_process_p (rtx_insn *insn, rtx cc)
1923 {
1924 if (!insn
1925 || !NONJUMP_INSN_P (insn)
1926 || (cc && set_of (cc, insn)))
1927 return false;
1928
1929 rtx sset = single_set (insn);
1930
1931 /* Currently support only simple single sets in test_bb. */
1932 if (!sset
1933 || !noce_operand_ok (SET_DEST (sset))
1934 || contains_ccmode_rtx_p (SET_DEST (sset))
1935 || !noce_operand_ok (SET_SRC (sset)))
1936 return false;
1937
1938 return true;
1939 }
1940
1941
1942 /* Return true iff the registers that the insns in BB_A set do not get
1943 used in BB_B. If TO_RENAME is non-NULL then it is a location that will be
1944 renamed later by the caller and so conflicts on it should be ignored
1945 in this function. */
1946
1947 static bool
1948 bbs_ok_for_cmove_arith (basic_block bb_a, basic_block bb_b, rtx to_rename)
1949 {
1950 rtx_insn *a_insn;
1951 bitmap bba_sets = BITMAP_ALLOC (&reg_obstack);
1952
1953 df_ref def;
1954 df_ref use;
1955
1956 FOR_BB_INSNS (bb_a, a_insn)
1957 {
1958 if (!active_insn_p (a_insn))
1959 continue;
1960
1961 rtx sset_a = single_set (a_insn);
1962
1963 if (!sset_a)
1964 {
1965 BITMAP_FREE (bba_sets);
1966 return false;
1967 }
1968 /* Record all registers that BB_A sets. */
1969 FOR_EACH_INSN_DEF (def, a_insn)
1970 if (!(to_rename && DF_REF_REG (def) == to_rename))
1971 bitmap_set_bit (bba_sets, DF_REF_REGNO (def));
1972 }
1973
1974 rtx_insn *b_insn;
1975
1976 FOR_BB_INSNS (bb_b, b_insn)
1977 {
1978 if (!active_insn_p (b_insn))
1979 continue;
1980
1981 rtx sset_b = single_set (b_insn);
1982
1983 if (!sset_b)
1984 {
1985 BITMAP_FREE (bba_sets);
1986 return false;
1987 }
1988
1989 /* Make sure this is a REG and not some instance
1990 of ZERO_EXTRACT or SUBREG or other dangerous stuff.
1991 If we have a memory destination then we have a pair of simple
1992 basic blocks performing an operation of the form [addr] = c ? a : b.
1993 bb_valid_for_noce_process_p will have ensured that these are
1994 the only stores present. In that case [addr] should be the location
1995 to be renamed. Assert that the callers set this up properly. */
1996 if (MEM_P (SET_DEST (sset_b)))
1997 gcc_assert (rtx_equal_p (SET_DEST (sset_b), to_rename));
1998 else if (!REG_P (SET_DEST (sset_b)))
1999 {
2000 BITMAP_FREE (bba_sets);
2001 return false;
2002 }
2003
2004 /* If the insn uses a reg set in BB_A return false. */
2005 FOR_EACH_INSN_USE (use, b_insn)
2006 {
2007 if (bitmap_bit_p (bba_sets, DF_REF_REGNO (use)))
2008 {
2009 BITMAP_FREE (bba_sets);
2010 return false;
2011 }
2012 }
2013
2014 }
2015
2016 BITMAP_FREE (bba_sets);
2017 return true;
2018 }
2019
2020 /* Emit copies of all the active instructions in BB except the last.
2021 This is a helper for noce_try_cmove_arith. */
2022
2023 static void
2024 noce_emit_all_but_last (basic_block bb)
2025 {
2026 rtx_insn *last = last_active_insn (bb, FALSE);
2027 rtx_insn *insn;
2028 FOR_BB_INSNS (bb, insn)
2029 {
2030 if (insn != last && active_insn_p (insn))
2031 {
2032 rtx_insn *to_emit = as_a <rtx_insn *> (copy_rtx (insn));
2033
2034 emit_insn (PATTERN (to_emit));
2035 }
2036 }
2037 }
2038
2039 /* Helper for noce_try_cmove_arith. Emit the pattern TO_EMIT and return
2040 the resulting insn or NULL if it's not a valid insn. */
2041
2042 static rtx_insn *
2043 noce_emit_insn (rtx to_emit)
2044 {
2045 gcc_assert (to_emit);
2046 rtx_insn *insn = emit_insn (to_emit);
2047
2048 if (recog_memoized (insn) < 0)
2049 return NULL;
2050
2051 return insn;
2052 }
2053
2054 /* Helper for noce_try_cmove_arith. Emit a copy of the insns up to
2055 and including the penultimate one in BB if it is not simple
2056 (as indicated by SIMPLE). Then emit LAST_INSN as the last
2057 insn in the block. The reason for that is that LAST_INSN may
2058 have been modified by the preparation in noce_try_cmove_arith. */
2059
2060 static bool
2061 noce_emit_bb (rtx last_insn, basic_block bb, bool simple)
2062 {
2063 if (bb && !simple)
2064 noce_emit_all_but_last (bb);
2065
2066 if (last_insn && !noce_emit_insn (last_insn))
2067 return false;
2068
2069 return true;
2070 }
2071
2072 /* Try more complex cases involving conditional_move. */
2073
2074 static int
2075 noce_try_cmove_arith (struct noce_if_info *if_info)
2076 {
2077 rtx a = if_info->a;
2078 rtx b = if_info->b;
2079 rtx x = if_info->x;
2080 rtx orig_a, orig_b;
2081 rtx_insn *insn_a, *insn_b;
2082 bool a_simple = if_info->then_simple;
2083 bool b_simple = if_info->else_simple;
2084 basic_block then_bb = if_info->then_bb;
2085 basic_block else_bb = if_info->else_bb;
2086 rtx target;
2087 int is_mem = 0;
2088 enum rtx_code code;
2089 rtx_insn *ifcvt_seq;
2090
2091 /* A conditional move from two memory sources is equivalent to a
2092 conditional on their addresses followed by a load. Don't do this
2093 early because it'll screw alias analysis. Note that we've
2094 already checked for no side effects. */
2095 if (cse_not_expected
2096 && MEM_P (a) && MEM_P (b)
2097 && MEM_ADDR_SPACE (a) == MEM_ADDR_SPACE (b))
2098 {
2099 machine_mode address_mode = get_address_mode (a);
2100
2101 a = XEXP (a, 0);
2102 b = XEXP (b, 0);
2103 x = gen_reg_rtx (address_mode);
2104 is_mem = 1;
2105 }
2106
2107 /* ??? We could handle this if we knew that a load from A or B could
2108 not trap or fault. This is also true if we've already loaded
2109 from the address along the path from ENTRY. */
2110 else if (may_trap_or_fault_p (a) || may_trap_or_fault_p (b))
2111 return FALSE;
2112
2113 /* if (test) x = a + b; else x = c - d;
2114 => y = a + b;
2115 x = c - d;
2116 if (test)
2117 x = y;
2118 */
2119
2120 code = GET_CODE (if_info->cond);
2121 insn_a = if_info->insn_a;
2122 insn_b = if_info->insn_b;
2123
2124 machine_mode x_mode = GET_MODE (x);
2125
2126 if (!can_conditionally_move_p (x_mode))
2127 return FALSE;
2128
2129 /* Possibly rearrange operands to make things come out more natural. */
2130 if (reversed_comparison_code (if_info->cond, if_info->jump) != UNKNOWN)
2131 {
2132 int reversep = 0;
2133 if (rtx_equal_p (b, x))
2134 reversep = 1;
2135 else if (general_operand (b, GET_MODE (b)))
2136 reversep = 1;
2137
2138 if (reversep)
2139 {
2140 code = reversed_comparison_code (if_info->cond, if_info->jump);
2141 std::swap (a, b);
2142 std::swap (insn_a, insn_b);
2143 std::swap (a_simple, b_simple);
2144 std::swap (then_bb, else_bb);
2145 }
2146 }
2147
2148 if (then_bb && else_bb
2149 && (!bbs_ok_for_cmove_arith (then_bb, else_bb, if_info->orig_x)
2150 || !bbs_ok_for_cmove_arith (else_bb, then_bb, if_info->orig_x)))
2151 return FALSE;
2152
2153 start_sequence ();
2154
2155 /* If one of the blocks is empty then the corresponding B or A value
2156 came from the test block. The non-empty complex block that we will
2157 emit might clobber the register used by B or A, so move it to a pseudo
2158 first. */
2159
2160 rtx tmp_a = NULL_RTX;
2161 rtx tmp_b = NULL_RTX;
2162
2163 if (b_simple || !else_bb)
2164 tmp_b = gen_reg_rtx (x_mode);
2165
2166 if (a_simple || !then_bb)
2167 tmp_a = gen_reg_rtx (x_mode);
2168
2169 orig_a = a;
2170 orig_b = b;
2171
2172 rtx emit_a = NULL_RTX;
2173 rtx emit_b = NULL_RTX;
2174 rtx_insn *tmp_insn = NULL;
2175 bool modified_in_a = false;
2176 bool modified_in_b = false;
2177 /* If either operand is complex, load it into a register first.
2178 The best way to do this is to copy the original insn. In this
2179 way we preserve any clobbers etc that the insn may have had.
2180 This is of course not possible in the IS_MEM case. */
2181
2182 if (! general_operand (a, GET_MODE (a)) || tmp_a)
2183 {
2184
2185 if (is_mem)
2186 {
2187 rtx reg = gen_reg_rtx (GET_MODE (a));
2188 emit_a = gen_rtx_SET (reg, a);
2189 }
2190 else
2191 {
2192 if (insn_a)
2193 {
2194 a = tmp_a ? tmp_a : gen_reg_rtx (GET_MODE (a));
2195
2196 rtx_insn *copy_of_a = as_a <rtx_insn *> (copy_rtx (insn_a));
2197 rtx set = single_set (copy_of_a);
2198 SET_DEST (set) = a;
2199
2200 emit_a = PATTERN (copy_of_a);
2201 }
2202 else
2203 {
2204 rtx tmp_reg = tmp_a ? tmp_a : gen_reg_rtx (GET_MODE (a));
2205 emit_a = gen_rtx_SET (tmp_reg, a);
2206 a = tmp_reg;
2207 }
2208 }
2209 }
2210
2211 if (! general_operand (b, GET_MODE (b)) || tmp_b)
2212 {
2213 if (is_mem)
2214 {
2215 rtx reg = gen_reg_rtx (GET_MODE (b));
2216 emit_b = gen_rtx_SET (reg, b);
2217 }
2218 else
2219 {
2220 if (insn_b)
2221 {
2222 b = tmp_b ? tmp_b : gen_reg_rtx (GET_MODE (b));
2223 rtx_insn *copy_of_b = as_a <rtx_insn *> (copy_rtx (insn_b));
2224 rtx set = single_set (copy_of_b);
2225
2226 SET_DEST (set) = b;
2227 emit_b = PATTERN (copy_of_b);
2228 }
2229 else
2230 {
2231 rtx tmp_reg = tmp_b ? tmp_b : gen_reg_rtx (GET_MODE (b));
2232 emit_b = gen_rtx_SET (tmp_reg, b);
2233 b = tmp_reg;
2234 }
2235 }
2236 }
2237
2238 modified_in_a = emit_a != NULL_RTX && modified_in_p (orig_b, emit_a);
2239 if (tmp_b && then_bb)
2240 {
2241 FOR_BB_INSNS (then_bb, tmp_insn)
2242 /* Don't check inside insn_a. We will have changed it to emit_a
2243 with a destination that doesn't conflict. */
2244 if (!(insn_a && tmp_insn == insn_a)
2245 && modified_in_p (orig_b, tmp_insn))
2246 {
2247 modified_in_a = true;
2248 break;
2249 }
2250
2251 }
2252
2253 modified_in_b = emit_b != NULL_RTX && modified_in_p (orig_a, emit_b);
2254 if (tmp_a && else_bb)
2255 {
2256 FOR_BB_INSNS (else_bb, tmp_insn)
2257 /* Don't check inside insn_b. We will have changed it to emit_b
2258 with a destination that doesn't conflict. */
2259 if (!(insn_b && tmp_insn == insn_b)
2260 && modified_in_p (orig_a, tmp_insn))
2261 {
2262 modified_in_b = true;
2263 break;
2264 }
2265 }
2266
2267 /* If insn to set up A clobbers any registers B depends on, try to
2268 swap insn that sets up A with the one that sets up B. If even
2269 that doesn't help, punt. */
2270 if (modified_in_a && !modified_in_b)
2271 {
2272 if (!noce_emit_bb (emit_b, else_bb, b_simple))
2273 goto end_seq_and_fail;
2274
2275 if (!noce_emit_bb (emit_a, then_bb, a_simple))
2276 goto end_seq_and_fail;
2277 }
2278 else if (!modified_in_a)
2279 {
2280 if (!noce_emit_bb (emit_a, then_bb, a_simple))
2281 goto end_seq_and_fail;
2282
2283 if (!noce_emit_bb (emit_b, else_bb, b_simple))
2284 goto end_seq_and_fail;
2285 }
2286 else
2287 goto end_seq_and_fail;
2288
2289 target = noce_emit_cmove (if_info, x, code, XEXP (if_info->cond, 0),
2290 XEXP (if_info->cond, 1), a, b);
2291
2292 if (! target)
2293 goto end_seq_and_fail;
2294
2295 /* If we're handling a memory for above, emit the load now. */
2296 if (is_mem)
2297 {
2298 rtx mem = gen_rtx_MEM (GET_MODE (if_info->x), target);
2299
2300 /* Copy over flags as appropriate. */
2301 if (MEM_VOLATILE_P (if_info->a) || MEM_VOLATILE_P (if_info->b))
2302 MEM_VOLATILE_P (mem) = 1;
2303 if (MEM_ALIAS_SET (if_info->a) == MEM_ALIAS_SET (if_info->b))
2304 set_mem_alias_set (mem, MEM_ALIAS_SET (if_info->a));
2305 set_mem_align (mem,
2306 MIN (MEM_ALIGN (if_info->a), MEM_ALIGN (if_info->b)));
2307
2308 gcc_assert (MEM_ADDR_SPACE (if_info->a) == MEM_ADDR_SPACE (if_info->b));
2309 set_mem_addr_space (mem, MEM_ADDR_SPACE (if_info->a));
2310
2311 noce_emit_move_insn (if_info->x, mem);
2312 }
2313 else if (target != x)
2314 noce_emit_move_insn (x, target);
2315
2316 ifcvt_seq = end_ifcvt_sequence (if_info);
2317 if (!ifcvt_seq || !noce_conversion_profitable_p (ifcvt_seq, if_info))
2318 return FALSE;
2319
2320 emit_insn_before_setloc (ifcvt_seq, if_info->jump,
2321 INSN_LOCATION (if_info->insn_a));
2322 if_info->transform_name = "noce_try_cmove_arith";
2323 return TRUE;
2324
2325 end_seq_and_fail:
2326 end_sequence ();
2327 return FALSE;
2328 }
2329
2330 /* For most cases, the simplified condition we found is the best
2331 choice, but this is not the case for the min/max/abs transforms.
2332 For these we wish to know that it is A or B in the condition. */
2333
2334 static rtx
2335 noce_get_alt_condition (struct noce_if_info *if_info, rtx target,
2336 rtx_insn **earliest)
2337 {
2338 rtx cond, set;
2339 rtx_insn *insn;
2340 int reverse;
2341
2342 /* If target is already mentioned in the known condition, return it. */
2343 if (reg_mentioned_p (target, if_info->cond))
2344 {
2345 *earliest = if_info->cond_earliest;
2346 return if_info->cond;
2347 }
2348
2349 set = pc_set (if_info->jump);
2350 cond = XEXP (SET_SRC (set), 0);
2351 reverse
2352 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
2353 && label_ref_label (XEXP (SET_SRC (set), 2)) == JUMP_LABEL (if_info->jump);
2354 if (if_info->then_else_reversed)
2355 reverse = !reverse;
2356
2357 /* If we're looking for a constant, try to make the conditional
2358 have that constant in it. There are two reasons why it may
2359 not have the constant we want:
2360
2361 1. GCC may have needed to put the constant in a register, because
2362 the target can't compare directly against that constant. For
2363 this case, we look for a SET immediately before the comparison
2364 that puts a constant in that register.
2365
2366 2. GCC may have canonicalized the conditional, for example
2367 replacing "if x < 4" with "if x <= 3". We can undo that (or
2368 make equivalent types of changes) to get the constants we need
2369 if they're off by one in the right direction. */
2370
2371 if (CONST_INT_P (target))
2372 {
2373 enum rtx_code code = GET_CODE (if_info->cond);
2374 rtx op_a = XEXP (if_info->cond, 0);
2375 rtx op_b = XEXP (if_info->cond, 1);
2376 rtx_insn *prev_insn;
2377
2378 /* First, look to see if we put a constant in a register. */
2379 prev_insn = prev_nonnote_insn (if_info->cond_earliest);
2380 if (prev_insn
2381 && BLOCK_FOR_INSN (prev_insn)
2382 == BLOCK_FOR_INSN (if_info->cond_earliest)
2383 && INSN_P (prev_insn)
2384 && GET_CODE (PATTERN (prev_insn)) == SET)
2385 {
2386 rtx src = find_reg_equal_equiv_note (prev_insn);
2387 if (!src)
2388 src = SET_SRC (PATTERN (prev_insn));
2389 if (CONST_INT_P (src))
2390 {
2391 if (rtx_equal_p (op_a, SET_DEST (PATTERN (prev_insn))))
2392 op_a = src;
2393 else if (rtx_equal_p (op_b, SET_DEST (PATTERN (prev_insn))))
2394 op_b = src;
2395
2396 if (CONST_INT_P (op_a))
2397 {
2398 std::swap (op_a, op_b);
2399 code = swap_condition (code);
2400 }
2401 }
2402 }
2403
2404 /* Now, look to see if we can get the right constant by
2405 adjusting the conditional. */
2406 if (CONST_INT_P (op_b))
2407 {
2408 HOST_WIDE_INT desired_val = INTVAL (target);
2409 HOST_WIDE_INT actual_val = INTVAL (op_b);
2410
2411 switch (code)
2412 {
2413 case LT:
2414 if (desired_val != HOST_WIDE_INT_MAX
2415 && actual_val == desired_val + 1)
2416 {
2417 code = LE;
2418 op_b = GEN_INT (desired_val);
2419 }
2420 break;
2421 case LE:
2422 if (desired_val != HOST_WIDE_INT_MIN
2423 && actual_val == desired_val - 1)
2424 {
2425 code = LT;
2426 op_b = GEN_INT (desired_val);
2427 }
2428 break;
2429 case GT:
2430 if (desired_val != HOST_WIDE_INT_MIN
2431 && actual_val == desired_val - 1)
2432 {
2433 code = GE;
2434 op_b = GEN_INT (desired_val);
2435 }
2436 break;
2437 case GE:
2438 if (desired_val != HOST_WIDE_INT_MAX
2439 && actual_val == desired_val + 1)
2440 {
2441 code = GT;
2442 op_b = GEN_INT (desired_val);
2443 }
2444 break;
2445 default:
2446 break;
2447 }
2448 }
2449
2450 /* If we made any changes, generate a new conditional that is
2451 equivalent to what we started with, but has the right
2452 constants in it. */
2453 if (code != GET_CODE (if_info->cond)
2454 || op_a != XEXP (if_info->cond, 0)
2455 || op_b != XEXP (if_info->cond, 1))
2456 {
2457 cond = gen_rtx_fmt_ee (code, GET_MODE (cond), op_a, op_b);
2458 *earliest = if_info->cond_earliest;
2459 return cond;
2460 }
2461 }
2462
2463 cond = canonicalize_condition (if_info->jump, cond, reverse,
2464 earliest, target, have_cbranchcc4, true);
2465 if (! cond || ! reg_mentioned_p (target, cond))
2466 return NULL;
2467
2468 /* We almost certainly searched back to a different place.
2469 Need to re-verify correct lifetimes. */
2470
2471 /* X may not be mentioned in the range (cond_earliest, jump]. */
2472 for (insn = if_info->jump; insn != *earliest; insn = PREV_INSN (insn))
2473 if (INSN_P (insn) && reg_overlap_mentioned_p (if_info->x, PATTERN (insn)))
2474 return NULL;
2475
2476 /* A and B may not be modified in the range [cond_earliest, jump). */
2477 for (insn = *earliest; insn != if_info->jump; insn = NEXT_INSN (insn))
2478 if (INSN_P (insn)
2479 && (modified_in_p (if_info->a, insn)
2480 || modified_in_p (if_info->b, insn)))
2481 return NULL;
2482
2483 return cond;
2484 }
2485
2486 /* Convert "if (a < b) x = a; else x = b;" to "x = min(a, b);", etc. */
2487
2488 static int
2489 noce_try_minmax (struct noce_if_info *if_info)
2490 {
2491 rtx cond, target;
2492 rtx_insn *earliest, *seq;
2493 enum rtx_code code, op;
2494 int unsignedp;
2495
2496 if (!noce_simple_bbs (if_info))
2497 return FALSE;
2498
2499 /* ??? Reject modes with NaNs or signed zeros since we don't know how
2500 they will be resolved with an SMIN/SMAX. It wouldn't be too hard
2501 to get the target to tell us... */
2502 if (HONOR_SIGNED_ZEROS (if_info->x)
2503 || HONOR_NANS (if_info->x))
2504 return FALSE;
2505
2506 cond = noce_get_alt_condition (if_info, if_info->a, &earliest);
2507 if (!cond)
2508 return FALSE;
2509
2510 /* Verify the condition is of the form we expect, and canonicalize
2511 the comparison code. */
2512 code = GET_CODE (cond);
2513 if (rtx_equal_p (XEXP (cond, 0), if_info->a))
2514 {
2515 if (! rtx_equal_p (XEXP (cond, 1), if_info->b))
2516 return FALSE;
2517 }
2518 else if (rtx_equal_p (XEXP (cond, 1), if_info->a))
2519 {
2520 if (! rtx_equal_p (XEXP (cond, 0), if_info->b))
2521 return FALSE;
2522 code = swap_condition (code);
2523 }
2524 else
2525 return FALSE;
2526
2527 /* Determine what sort of operation this is. Note that the code is for
2528 a taken branch, so the code->operation mapping appears backwards. */
2529 switch (code)
2530 {
2531 case LT:
2532 case LE:
2533 case UNLT:
2534 case UNLE:
2535 op = SMAX;
2536 unsignedp = 0;
2537 break;
2538 case GT:
2539 case GE:
2540 case UNGT:
2541 case UNGE:
2542 op = SMIN;
2543 unsignedp = 0;
2544 break;
2545 case LTU:
2546 case LEU:
2547 op = UMAX;
2548 unsignedp = 1;
2549 break;
2550 case GTU:
2551 case GEU:
2552 op = UMIN;
2553 unsignedp = 1;
2554 break;
2555 default:
2556 return FALSE;
2557 }
2558
2559 start_sequence ();
2560
2561 target = expand_simple_binop (GET_MODE (if_info->x), op,
2562 if_info->a, if_info->b,
2563 if_info->x, unsignedp, OPTAB_WIDEN);
2564 if (! target)
2565 {
2566 end_sequence ();
2567 return FALSE;
2568 }
2569 if (target != if_info->x)
2570 noce_emit_move_insn (if_info->x, target);
2571
2572 seq = end_ifcvt_sequence (if_info);
2573 if (!seq)
2574 return FALSE;
2575
2576 emit_insn_before_setloc (seq, if_info->jump, INSN_LOCATION (if_info->insn_a));
2577 if_info->cond = cond;
2578 if_info->cond_earliest = earliest;
2579 if_info->transform_name = "noce_try_minmax";
2580
2581 return TRUE;
2582 }
2583
2584 /* Convert "if (a < 0) x = -a; else x = a;" to "x = abs(a);",
2585 "if (a < 0) x = ~a; else x = a;" to "x = one_cmpl_abs(a);",
2586 etc. */
2587
2588 static int
2589 noce_try_abs (struct noce_if_info *if_info)
2590 {
2591 rtx cond, target, a, b, c;
2592 rtx_insn *earliest, *seq;
2593 int negate;
2594 bool one_cmpl = false;
2595
2596 if (!noce_simple_bbs (if_info))
2597 return FALSE;
2598
2599 /* Reject modes with signed zeros. */
2600 if (HONOR_SIGNED_ZEROS (if_info->x))
2601 return FALSE;
2602
2603 /* Recognize A and B as constituting an ABS or NABS. The canonical
2604 form is a branch around the negation, taken when the object is the
2605 first operand of a comparison against 0 that evaluates to true. */
2606 a = if_info->a;
2607 b = if_info->b;
2608 if (GET_CODE (a) == NEG && rtx_equal_p (XEXP (a, 0), b))
2609 negate = 0;
2610 else if (GET_CODE (b) == NEG && rtx_equal_p (XEXP (b, 0), a))
2611 {
2612 std::swap (a, b);
2613 negate = 1;
2614 }
2615 else if (GET_CODE (a) == NOT && rtx_equal_p (XEXP (a, 0), b))
2616 {
2617 negate = 0;
2618 one_cmpl = true;
2619 }
2620 else if (GET_CODE (b) == NOT && rtx_equal_p (XEXP (b, 0), a))
2621 {
2622 std::swap (a, b);
2623 negate = 1;
2624 one_cmpl = true;
2625 }
2626 else
2627 return FALSE;
2628
2629 cond = noce_get_alt_condition (if_info, b, &earliest);
2630 if (!cond)
2631 return FALSE;
2632
2633 /* Verify the condition is of the form we expect. */
2634 if (rtx_equal_p (XEXP (cond, 0), b))
2635 c = XEXP (cond, 1);
2636 else if (rtx_equal_p (XEXP (cond, 1), b))
2637 {
2638 c = XEXP (cond, 0);
2639 negate = !negate;
2640 }
2641 else
2642 return FALSE;
2643
2644 /* Verify that C is zero. Search one step backward for a
2645 REG_EQUAL note or a simple source if necessary. */
2646 if (REG_P (c))
2647 {
2648 rtx set;
2649 rtx_insn *insn = prev_nonnote_insn (earliest);
2650 if (insn
2651 && BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (earliest)
2652 && (set = single_set (insn))
2653 && rtx_equal_p (SET_DEST (set), c))
2654 {
2655 rtx note = find_reg_equal_equiv_note (insn);
2656 if (note)
2657 c = XEXP (note, 0);
2658 else
2659 c = SET_SRC (set);
2660 }
2661 else
2662 return FALSE;
2663 }
2664 if (MEM_P (c)
2665 && GET_CODE (XEXP (c, 0)) == SYMBOL_REF
2666 && CONSTANT_POOL_ADDRESS_P (XEXP (c, 0)))
2667 c = get_pool_constant (XEXP (c, 0));
2668
2669 /* Work around funny ideas get_condition has wrt canonicalization.
2670 Note that these rtx constants are known to be CONST_INT, and
2671 therefore imply integer comparisons.
2672 The one_cmpl case is more complicated, as we want to handle
2673 only x < 0 ? ~x : x or x >= 0 ? x : ~x to one_cmpl_abs (x)
2674 and x < 0 ? x : ~x or x >= 0 ? ~x : x to ~one_cmpl_abs (x),
2675 but not other cases (x > -1 is equivalent of x >= 0). */
2676 if (c == constm1_rtx && GET_CODE (cond) == GT)
2677 ;
2678 else if (c == const1_rtx && GET_CODE (cond) == LT)
2679 {
2680 if (one_cmpl)
2681 return FALSE;
2682 }
2683 else if (c == CONST0_RTX (GET_MODE (b)))
2684 {
2685 if (one_cmpl
2686 && GET_CODE (cond) != GE
2687 && GET_CODE (cond) != LT)
2688 return FALSE;
2689 }
2690 else
2691 return FALSE;
2692
2693 /* Determine what sort of operation this is. */
2694 switch (GET_CODE (cond))
2695 {
2696 case LT:
2697 case LE:
2698 case UNLT:
2699 case UNLE:
2700 negate = !negate;
2701 break;
2702 case GT:
2703 case GE:
2704 case UNGT:
2705 case UNGE:
2706 break;
2707 default:
2708 return FALSE;
2709 }
2710
2711 start_sequence ();
2712 if (one_cmpl)
2713 target = expand_one_cmpl_abs_nojump (GET_MODE (if_info->x), b,
2714 if_info->x);
2715 else
2716 target = expand_abs_nojump (GET_MODE (if_info->x), b, if_info->x, 1);
2717
2718 /* ??? It's a quandary whether cmove would be better here, especially
2719 for integers. Perhaps combine will clean things up. */
2720 if (target && negate)
2721 {
2722 if (one_cmpl)
2723 target = expand_simple_unop (GET_MODE (target), NOT, target,
2724 if_info->x, 0);
2725 else
2726 target = expand_simple_unop (GET_MODE (target), NEG, target,
2727 if_info->x, 0);
2728 }
2729
2730 if (! target)
2731 {
2732 end_sequence ();
2733 return FALSE;
2734 }
2735
2736 if (target != if_info->x)
2737 noce_emit_move_insn (if_info->x, target);
2738
2739 seq = end_ifcvt_sequence (if_info);
2740 if (!seq)
2741 return FALSE;
2742
2743 emit_insn_before_setloc (seq, if_info->jump, INSN_LOCATION (if_info->insn_a));
2744 if_info->cond = cond;
2745 if_info->cond_earliest = earliest;
2746 if_info->transform_name = "noce_try_abs";
2747
2748 return TRUE;
2749 }
2750
2751 /* Convert "if (m < 0) x = b; else x = 0;" to "x = (m >> C) & b;". */
2752
2753 static int
2754 noce_try_sign_mask (struct noce_if_info *if_info)
2755 {
2756 rtx cond, t, m, c;
2757 rtx_insn *seq;
2758 machine_mode mode;
2759 enum rtx_code code;
2760 bool t_unconditional;
2761
2762 if (!noce_simple_bbs (if_info))
2763 return FALSE;
2764
2765 cond = if_info->cond;
2766 code = GET_CODE (cond);
2767 m = XEXP (cond, 0);
2768 c = XEXP (cond, 1);
2769
2770 t = NULL_RTX;
2771 if (if_info->a == const0_rtx)
2772 {
2773 if ((code == LT && c == const0_rtx)
2774 || (code == LE && c == constm1_rtx))
2775 t = if_info->b;
2776 }
2777 else if (if_info->b == const0_rtx)
2778 {
2779 if ((code == GE && c == const0_rtx)
2780 || (code == GT && c == constm1_rtx))
2781 t = if_info->a;
2782 }
2783
2784 if (! t || side_effects_p (t))
2785 return FALSE;
2786
2787 /* We currently don't handle different modes. */
2788 mode = GET_MODE (t);
2789 if (GET_MODE (m) != mode)
2790 return FALSE;
2791
2792 /* This is only profitable if T is unconditionally executed/evaluated in the
2793 original insn sequence or T is cheap. The former happens if B is the
2794 non-zero (T) value and if INSN_B was taken from TEST_BB, or there was no
2795 INSN_B which can happen for e.g. conditional stores to memory. For the
2796 cost computation use the block TEST_BB where the evaluation will end up
2797 after the transformation. */
2798 t_unconditional =
2799 (t == if_info->b
2800 && (if_info->insn_b == NULL_RTX
2801 || BLOCK_FOR_INSN (if_info->insn_b) == if_info->test_bb));
2802 if (!(t_unconditional
2803 || (set_src_cost (t, mode, if_info->speed_p)
2804 < COSTS_N_INSNS (2))))
2805 return FALSE;
2806
2807 start_sequence ();
2808 /* Use emit_store_flag to generate "m < 0 ? -1 : 0" instead of expanding
2809 "(signed) m >> 31" directly. This benefits targets with specialized
2810 insns to obtain the signmask, but still uses ashr_optab otherwise. */
2811 m = emit_store_flag (gen_reg_rtx (mode), LT, m, const0_rtx, mode, 0, -1);
2812 t = m ? expand_binop (mode, and_optab, m, t, NULL_RTX, 0, OPTAB_DIRECT)
2813 : NULL_RTX;
2814
2815 if (!t)
2816 {
2817 end_sequence ();
2818 return FALSE;
2819 }
2820
2821 noce_emit_move_insn (if_info->x, t);
2822
2823 seq = end_ifcvt_sequence (if_info);
2824 if (!seq)
2825 return FALSE;
2826
2827 emit_insn_before_setloc (seq, if_info->jump, INSN_LOCATION (if_info->insn_a));
2828 if_info->transform_name = "noce_try_sign_mask";
2829
2830 return TRUE;
2831 }
2832
2833
2834 /* Optimize away "if (x & C) x |= C" and similar bit manipulation
2835 transformations. */
2836
2837 static int
2838 noce_try_bitop (struct noce_if_info *if_info)
2839 {
2840 rtx cond, x, a, result;
2841 rtx_insn *seq;
2842 machine_mode mode;
2843 enum rtx_code code;
2844 int bitnum;
2845
2846 x = if_info->x;
2847 cond = if_info->cond;
2848 code = GET_CODE (cond);
2849
2850 if (!noce_simple_bbs (if_info))
2851 return FALSE;
2852
2853 /* Check for no else condition. */
2854 if (! rtx_equal_p (x, if_info->b))
2855 return FALSE;
2856
2857 /* Check for a suitable condition. */
2858 if (code != NE && code != EQ)
2859 return FALSE;
2860 if (XEXP (cond, 1) != const0_rtx)
2861 return FALSE;
2862 cond = XEXP (cond, 0);
2863
2864 /* ??? We could also handle AND here. */
2865 if (GET_CODE (cond) == ZERO_EXTRACT)
2866 {
2867 if (XEXP (cond, 1) != const1_rtx
2868 || !CONST_INT_P (XEXP (cond, 2))
2869 || ! rtx_equal_p (x, XEXP (cond, 0)))
2870 return FALSE;
2871 bitnum = INTVAL (XEXP (cond, 2));
2872 mode = GET_MODE (x);
2873 if (BITS_BIG_ENDIAN)
2874 bitnum = GET_MODE_BITSIZE (mode) - 1 - bitnum;
2875 if (bitnum < 0 || bitnum >= HOST_BITS_PER_WIDE_INT)
2876 return FALSE;
2877 }
2878 else
2879 return FALSE;
2880
2881 a = if_info->a;
2882 if (GET_CODE (a) == IOR || GET_CODE (a) == XOR)
2883 {
2884 /* Check for "if (X & C) x = x op C". */
2885 if (! rtx_equal_p (x, XEXP (a, 0))
2886 || !CONST_INT_P (XEXP (a, 1))
2887 || (INTVAL (XEXP (a, 1)) & GET_MODE_MASK (mode))
2888 != HOST_WIDE_INT_1U << bitnum)
2889 return FALSE;
2890
2891 /* if ((x & C) == 0) x |= C; is transformed to x |= C. */
2892 /* if ((x & C) != 0) x |= C; is transformed to nothing. */
2893 if (GET_CODE (a) == IOR)
2894 result = (code == NE) ? a : NULL_RTX;
2895 else if (code == NE)
2896 {
2897 /* if ((x & C) == 0) x ^= C; is transformed to x |= C. */
2898 result = gen_int_mode (HOST_WIDE_INT_1 << bitnum, mode);
2899 result = simplify_gen_binary (IOR, mode, x, result);
2900 }
2901 else
2902 {
2903 /* if ((x & C) != 0) x ^= C; is transformed to x &= ~C. */
2904 result = gen_int_mode (~(HOST_WIDE_INT_1 << bitnum), mode);
2905 result = simplify_gen_binary (AND, mode, x, result);
2906 }
2907 }
2908 else if (GET_CODE (a) == AND)
2909 {
2910 /* Check for "if (X & C) x &= ~C". */
2911 if (! rtx_equal_p (x, XEXP (a, 0))
2912 || !CONST_INT_P (XEXP (a, 1))
2913 || (INTVAL (XEXP (a, 1)) & GET_MODE_MASK (mode))
2914 != (~(HOST_WIDE_INT_1 << bitnum) & GET_MODE_MASK (mode)))
2915 return FALSE;
2916
2917 /* if ((x & C) == 0) x &= ~C; is transformed to nothing. */
2918 /* if ((x & C) != 0) x &= ~C; is transformed to x &= ~C. */
2919 result = (code == EQ) ? a : NULL_RTX;
2920 }
2921 else
2922 return FALSE;
2923
2924 if (result)
2925 {
2926 start_sequence ();
2927 noce_emit_move_insn (x, result);
2928 seq = end_ifcvt_sequence (if_info);
2929 if (!seq)
2930 return FALSE;
2931
2932 emit_insn_before_setloc (seq, if_info->jump,
2933 INSN_LOCATION (if_info->insn_a));
2934 }
2935 if_info->transform_name = "noce_try_bitop";
2936 return TRUE;
2937 }
2938
2939
2940 /* Similar to get_condition, only the resulting condition must be
2941 valid at JUMP, instead of at EARLIEST.
2942
2943 If THEN_ELSE_REVERSED is true, the fallthrough does not go to the
2944 THEN block of the caller, and we have to reverse the condition. */
2945
2946 static rtx
2947 noce_get_condition (rtx_insn *jump, rtx_insn **earliest, bool then_else_reversed)
2948 {
2949 rtx cond, set, tmp;
2950 bool reverse;
2951
2952 if (! any_condjump_p (jump))
2953 return NULL_RTX;
2954
2955 set = pc_set (jump);
2956
2957 /* If this branches to JUMP_LABEL when the condition is false,
2958 reverse the condition. */
2959 reverse = (GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
2960 && label_ref_label (XEXP (SET_SRC (set), 2)) == JUMP_LABEL (jump));
2961
2962 /* We may have to reverse because the caller's if block is not canonical,
2963 i.e. the THEN block isn't the fallthrough block for the TEST block
2964 (see find_if_header). */
2965 if (then_else_reversed)
2966 reverse = !reverse;
2967
2968 /* If the condition variable is a register and is MODE_INT, accept it. */
2969
2970 cond = XEXP (SET_SRC (set), 0);
2971 tmp = XEXP (cond, 0);
2972 if (REG_P (tmp) && GET_MODE_CLASS (GET_MODE (tmp)) == MODE_INT
2973 && (GET_MODE (tmp) != BImode
2974 || !targetm.small_register_classes_for_mode_p (BImode)))
2975 {
2976 *earliest = jump;
2977
2978 if (reverse)
2979 cond = gen_rtx_fmt_ee (reverse_condition (GET_CODE (cond)),
2980 GET_MODE (cond), tmp, XEXP (cond, 1));
2981 return cond;
2982 }
2983
2984 /* Otherwise, fall back on canonicalize_condition to do the dirty
2985 work of manipulating MODE_CC values and COMPARE rtx codes. */
2986 tmp = canonicalize_condition (jump, cond, reverse, earliest,
2987 NULL_RTX, have_cbranchcc4, true);
2988
2989 /* We don't handle side-effects in the condition, like handling
2990 REG_INC notes and making sure no duplicate conditions are emitted. */
2991 if (tmp != NULL_RTX && side_effects_p (tmp))
2992 return NULL_RTX;
2993
2994 return tmp;
2995 }
2996
2997 /* Return true if OP is ok for if-then-else processing. */
2998
2999 static int
3000 noce_operand_ok (const_rtx op)
3001 {
3002 if (side_effects_p (op))
3003 return FALSE;
3004
3005 /* We special-case memories, so handle any of them with
3006 no address side effects. */
3007 if (MEM_P (op))
3008 return ! side_effects_p (XEXP (op, 0));
3009
3010 return ! may_trap_p (op);
3011 }
3012
3013 /* Return true iff basic block TEST_BB is valid for noce if-conversion.
3014 The condition used in this if-conversion is in COND.
3015 In practice, check that TEST_BB ends with a single set
3016 x := a and all previous computations
3017 in TEST_BB don't produce any values that are live after TEST_BB.
3018 In other words, all the insns in TEST_BB are there only
3019 to compute a value for x. Add the rtx cost of the insns
3020 in TEST_BB to COST. Record whether TEST_BB is a single simple
3021 set instruction in SIMPLE_P. */
3022
3023 static bool
3024 bb_valid_for_noce_process_p (basic_block test_bb, rtx cond,
3025 unsigned int *cost, bool *simple_p)
3026 {
3027 if (!test_bb)
3028 return false;
3029
3030 rtx_insn *last_insn = last_active_insn (test_bb, FALSE);
3031 rtx last_set = NULL_RTX;
3032
3033 rtx cc = cc_in_cond (cond);
3034
3035 if (!insn_valid_noce_process_p (last_insn, cc))
3036 return false;
3037 last_set = single_set (last_insn);
3038
3039 rtx x = SET_DEST (last_set);
3040 rtx_insn *first_insn = first_active_insn (test_bb);
3041 rtx first_set = single_set (first_insn);
3042
3043 if (!first_set)
3044 return false;
3045
3046 /* We have a single simple set, that's okay. */
3047 bool speed_p = optimize_bb_for_speed_p (test_bb);
3048
3049 if (first_insn == last_insn)
3050 {
3051 *simple_p = noce_operand_ok (SET_DEST (first_set));
3052 *cost += insn_rtx_cost (first_set, speed_p);
3053 return *simple_p;
3054 }
3055
3056 rtx_insn *prev_last_insn = PREV_INSN (last_insn);
3057 gcc_assert (prev_last_insn);
3058
3059 /* For now, disallow setting x multiple times in test_bb. */
3060 if (REG_P (x) && reg_set_between_p (x, first_insn, prev_last_insn))
3061 return false;
3062
3063 bitmap test_bb_temps = BITMAP_ALLOC (&reg_obstack);
3064
3065 /* The regs that are live out of test_bb. */
3066 bitmap test_bb_live_out = df_get_live_out (test_bb);
3067
3068 int potential_cost = insn_rtx_cost (last_set, speed_p);
3069 rtx_insn *insn;
3070 FOR_BB_INSNS (test_bb, insn)
3071 {
3072 if (insn != last_insn)
3073 {
3074 if (!active_insn_p (insn))
3075 continue;
3076
3077 if (!insn_valid_noce_process_p (insn, cc))
3078 goto free_bitmap_and_fail;
3079
3080 rtx sset = single_set (insn);
3081 gcc_assert (sset);
3082
3083 if (contains_mem_rtx_p (SET_SRC (sset))
3084 || !REG_P (SET_DEST (sset))
3085 || reg_overlap_mentioned_p (SET_DEST (sset), cond))
3086 goto free_bitmap_and_fail;
3087
3088 potential_cost += insn_rtx_cost (sset, speed_p);
3089 bitmap_set_bit (test_bb_temps, REGNO (SET_DEST (sset)));
3090 }
3091 }
3092
3093 /* If any of the intermediate results in test_bb are live after test_bb
3094 then fail. */
3095 if (bitmap_intersect_p (test_bb_live_out, test_bb_temps))
3096 goto free_bitmap_and_fail;
3097
3098 BITMAP_FREE (test_bb_temps);
3099 *cost += potential_cost;
3100 *simple_p = false;
3101 return true;
3102
3103 free_bitmap_and_fail:
3104 BITMAP_FREE (test_bb_temps);
3105 return false;
3106 }
3107
3108 /* We have something like:
3109
3110 if (x > y)
3111 { i = a; j = b; k = c; }
3112
3113 Make it:
3114
3115 tmp_i = (x > y) ? a : i;
3116 tmp_j = (x > y) ? b : j;
3117 tmp_k = (x > y) ? c : k;
3118 i = tmp_i;
3119 j = tmp_j;
3120 k = tmp_k;
3121
3122 Subsequent passes are expected to clean up the extra moves.
3123
3124 Look for special cases such as writes to one register which are
3125 read back in another SET, as might occur in a swap idiom or
3126 similar.
3127
3128 These look like:
3129
3130 if (x > y)
3131 i = a;
3132 j = i;
3133
3134 Which we want to rewrite to:
3135
3136 tmp_i = (x > y) ? a : i;
3137 tmp_j = (x > y) ? tmp_i : j;
3138 i = tmp_i;
3139 j = tmp_j;
3140
3141 We can catch these when looking at (SET x y) by keeping a list of the
3142 registers we would have targeted before if-conversion and looking back
3143 through it for an overlap with Y. If we find one, we rewire the
3144 conditional set to use the temporary we introduced earlier.
3145
3146 IF_INFO contains the useful information about the block structure and
3147 jump instructions. */
3148
3149 static int
3150 noce_convert_multiple_sets (struct noce_if_info *if_info)
3151 {
3152 basic_block test_bb = if_info->test_bb;
3153 basic_block then_bb = if_info->then_bb;
3154 basic_block join_bb = if_info->join_bb;
3155 rtx_insn *jump = if_info->jump;
3156 rtx_insn *cond_earliest;
3157 rtx_insn *insn;
3158
3159 start_sequence ();
3160
3161 /* Decompose the condition attached to the jump. */
3162 rtx cond = noce_get_condition (jump, &cond_earliest, false);
3163 rtx x = XEXP (cond, 0);
3164 rtx y = XEXP (cond, 1);
3165 rtx_code cond_code = GET_CODE (cond);
3166
3167 /* The true targets for a conditional move. */
3168 auto_vec<rtx> targets;
3169 /* The temporaries introduced to allow us to not consider register
3170 overlap. */
3171 auto_vec<rtx> temporaries;
3172 /* The insns we've emitted. */
3173 auto_vec<rtx_insn *> unmodified_insns;
3174 int count = 0;
3175
3176 FOR_BB_INSNS (then_bb, insn)
3177 {
3178 /* Skip over non-insns. */
3179 if (!active_insn_p (insn))
3180 continue;
3181
3182 rtx set = single_set (insn);
3183 gcc_checking_assert (set);
3184
3185 rtx target = SET_DEST (set);
3186 rtx temp = gen_reg_rtx (GET_MODE (target));
3187 rtx new_val = SET_SRC (set);
3188 rtx old_val = target;
3189
3190 /* If we were supposed to read from an earlier write in this block,
3191 we've changed the register allocation. Rewire the read. While
3192 we are looking, also try to catch a swap idiom. */
3193 for (int i = count - 1; i >= 0; --i)
3194 if (reg_overlap_mentioned_p (new_val, targets[i]))
3195 {
3196 /* Catch a "swap" style idiom. */
3197 if (find_reg_note (insn, REG_DEAD, new_val) != NULL_RTX)
3198 /* The write to targets[i] is only live until the read
3199 here. As the condition codes match, we can propagate
3200 the set to here. */
3201 new_val = SET_SRC (single_set (unmodified_insns[i]));
3202 else
3203 new_val = temporaries[i];
3204 break;
3205 }
3206
3207 /* If we had a non-canonical conditional jump (i.e. one where
3208 the fallthrough is to the "else" case) we need to reverse
3209 the conditional select. */
3210 if (if_info->then_else_reversed)
3211 std::swap (old_val, new_val);
3212
3213
3214 /* We allow simple lowpart register subreg SET sources in
3215 bb_ok_for_noce_convert_multiple_sets. Be careful when processing
3216 sequences like:
3217 (set (reg:SI r1) (reg:SI r2))
3218 (set (reg:HI r3) (subreg:HI (r1)))
3219 For the second insn new_val or old_val (r1 in this example) will be
3220 taken from the temporaries and have the wider mode which will not
3221 match with the mode of the other source of the conditional move, so
3222 we'll end up trying to emit r4:HI = cond ? (r1:SI) : (r3:HI).
3223 Wrap the two cmove operands into subregs if appropriate to prevent
3224 that. */
3225 if (GET_MODE (new_val) != GET_MODE (temp))
3226 {
3227 machine_mode src_mode = GET_MODE (new_val);
3228 machine_mode dst_mode = GET_MODE (temp);
3229 if (GET_MODE_SIZE (src_mode) <= GET_MODE_SIZE (dst_mode))
3230 {
3231 end_sequence ();
3232 return FALSE;
3233 }
3234 new_val = lowpart_subreg (dst_mode, new_val, src_mode);
3235 }
3236 if (GET_MODE (old_val) != GET_MODE (temp))
3237 {
3238 machine_mode src_mode = GET_MODE (old_val);
3239 machine_mode dst_mode = GET_MODE (temp);
3240 if (GET_MODE_SIZE (src_mode) <= GET_MODE_SIZE (dst_mode))
3241 {
3242 end_sequence ();
3243 return FALSE;
3244 }
3245 old_val = lowpart_subreg (dst_mode, old_val, src_mode);
3246 }
3247
3248 /* Actually emit the conditional move. */
3249 rtx temp_dest = noce_emit_cmove (if_info, temp, cond_code,
3250 x, y, new_val, old_val);
3251
3252 /* If we failed to expand the conditional move, drop out and don't
3253 try to continue. */
3254 if (temp_dest == NULL_RTX)
3255 {
3256 end_sequence ();
3257 return FALSE;
3258 }
3259
3260 /* Bookkeeping. */
3261 count++;
3262 targets.safe_push (target);
3263 temporaries.safe_push (temp_dest);
3264 unmodified_insns.safe_push (insn);
3265 }
3266
3267 /* We must have seen some sort of insn to insert, otherwise we were
3268 given an empty BB to convert, and we can't handle that. */
3269 gcc_assert (!unmodified_insns.is_empty ());
3270
3271 /* Now fixup the assignments. */
3272 for (int i = 0; i < count; i++)
3273 noce_emit_move_insn (targets[i], temporaries[i]);
3274
3275 /* Actually emit the sequence if it isn't too expensive. */
3276 rtx_insn *seq = get_insns ();
3277
3278 if (!noce_conversion_profitable_p (seq, if_info))
3279 {
3280 end_sequence ();
3281 return FALSE;
3282 }
3283
3284 for (insn = seq; insn; insn = NEXT_INSN (insn))
3285 set_used_flags (insn);
3286
3287 /* Mark all our temporaries and targets as used. */
3288 for (int i = 0; i < count; i++)
3289 {
3290 set_used_flags (temporaries[i]);
3291 set_used_flags (targets[i]);
3292 }
3293
3294 set_used_flags (cond);
3295 set_used_flags (x);
3296 set_used_flags (y);
3297
3298 unshare_all_rtl_in_chain (seq);
3299 end_sequence ();
3300
3301 if (!seq)
3302 return FALSE;
3303
3304 for (insn = seq; insn; insn = NEXT_INSN (insn))
3305 if (JUMP_P (insn)
3306 || recog_memoized (insn) == -1)
3307 return FALSE;
3308
3309 emit_insn_before_setloc (seq, if_info->jump,
3310 INSN_LOCATION (unmodified_insns.last ()));
3311
3312 /* Clean up THEN_BB and the edges in and out of it. */
3313 remove_edge (find_edge (test_bb, join_bb));
3314 remove_edge (find_edge (then_bb, join_bb));
3315 redirect_edge_and_branch_force (single_succ_edge (test_bb), join_bb);
3316 delete_basic_block (then_bb);
3317 num_true_changes++;
3318
3319 /* Maybe merge blocks now the jump is simple enough. */
3320 if (can_merge_blocks_p (test_bb, join_bb))
3321 {
3322 merge_blocks (test_bb, join_bb);
3323 num_true_changes++;
3324 }
3325
3326 num_updated_if_blocks++;
3327 if_info->transform_name = "noce_convert_multiple_sets";
3328 return TRUE;
3329 }
3330
3331 /* Return true iff basic block TEST_BB is comprised of only
3332 (SET (REG) (REG)) insns suitable for conversion to a series
3333 of conditional moves. Also check that we have more than one set
3334 (other routines can handle a single set better than we would), and
3335 fewer than PARAM_MAX_RTL_IF_CONVERSION_INSNS sets. */
3336
3337 static bool
3338 bb_ok_for_noce_convert_multiple_sets (basic_block test_bb)
3339 {
3340 rtx_insn *insn;
3341 unsigned count = 0;
3342 unsigned param = PARAM_VALUE (PARAM_MAX_RTL_IF_CONVERSION_INSNS);
3343
3344 FOR_BB_INSNS (test_bb, insn)
3345 {
3346 /* Skip over notes etc. */
3347 if (!active_insn_p (insn))
3348 continue;
3349
3350 /* We only handle SET insns. */
3351 rtx set = single_set (insn);
3352 if (set == NULL_RTX)
3353 return false;
3354
3355 rtx dest = SET_DEST (set);
3356 rtx src = SET_SRC (set);
3357
3358 /* We can possibly relax this, but for now only handle REG to REG
3359 (including subreg) moves. This avoids any issues that might come
3360 from introducing loads/stores that might violate data-race-freedom
3361 guarantees. */
3362 if (!REG_P (dest))
3363 return false;
3364
3365 if (!(REG_P (src)
3366 || (GET_CODE (src) == SUBREG && REG_P (SUBREG_REG (src))
3367 && subreg_lowpart_p (src))))
3368 return false;
3369
3370 /* Destination must be appropriate for a conditional write. */
3371 if (!noce_operand_ok (dest))
3372 return false;
3373
3374 /* We must be able to conditionally move in this mode. */
3375 if (!can_conditionally_move_p (GET_MODE (dest)))
3376 return false;
3377
3378 count++;
3379 }
3380
3381 /* If we would only put out one conditional move, the other strategies
3382 this pass tries are better optimized and will be more appropriate.
3383 Some targets want to strictly limit the number of conditional moves
3384 that are emitted, they set this through PARAM, we need to respect
3385 that. */
3386 return count > 1 && count <= param;
3387 }
3388
3389 /* Given a simple IF-THEN-JOIN or IF-THEN-ELSE-JOIN block, attempt to convert
3390 it without using conditional execution. Return TRUE if we were successful
3391 at converting the block. */
3392
3393 static int
3394 noce_process_if_block (struct noce_if_info *if_info)
3395 {
3396 basic_block test_bb = if_info->test_bb; /* test block */
3397 basic_block then_bb = if_info->then_bb; /* THEN */
3398 basic_block else_bb = if_info->else_bb; /* ELSE or NULL */
3399 basic_block join_bb = if_info->join_bb; /* JOIN */
3400 rtx_insn *jump = if_info->jump;
3401 rtx cond = if_info->cond;
3402 rtx_insn *insn_a, *insn_b;
3403 rtx set_a, set_b;
3404 rtx orig_x, x, a, b;
3405
3406 /* We're looking for patterns of the form
3407
3408 (1) if (...) x = a; else x = b;
3409 (2) x = b; if (...) x = a;
3410 (3) if (...) x = a; // as if with an initial x = x.
3411 (4) if (...) { x = a; y = b; z = c; } // Like 3, for multiple SETS.
3412 The later patterns require jumps to be more expensive.
3413 For the if (...) x = a; else x = b; case we allow multiple insns
3414 inside the then and else blocks as long as their only effect is
3415 to calculate a value for x.
3416 ??? For future expansion, further expand the "multiple X" rules. */
3417
3418 /* First look for multiple SETS. */
3419 if (!else_bb
3420 && HAVE_conditional_move
3421 && !HAVE_cc0
3422 && bb_ok_for_noce_convert_multiple_sets (then_bb))
3423 {
3424 if (noce_convert_multiple_sets (if_info))
3425 {
3426 if (dump_file && if_info->transform_name)
3427 fprintf (dump_file, "if-conversion succeeded through %s\n",
3428 if_info->transform_name);
3429 return TRUE;
3430 }
3431 }
3432
3433 bool speed_p = optimize_bb_for_speed_p (test_bb);
3434 unsigned int then_cost = 0, else_cost = 0;
3435 if (!bb_valid_for_noce_process_p (then_bb, cond, &then_cost,
3436 &if_info->then_simple))
3437 return false;
3438
3439 if (else_bb
3440 && !bb_valid_for_noce_process_p (else_bb, cond, &else_cost,
3441 &if_info->else_simple))
3442 return false;
3443
3444 if (else_bb == NULL)
3445 if_info->original_cost += then_cost;
3446 else if (speed_p)
3447 if_info->original_cost += MIN (then_cost, else_cost);
3448 else
3449 if_info->original_cost += then_cost + else_cost;
3450
3451 insn_a = last_active_insn (then_bb, FALSE);
3452 set_a = single_set (insn_a);
3453 gcc_assert (set_a);
3454
3455 x = SET_DEST (set_a);
3456 a = SET_SRC (set_a);
3457
3458 /* Look for the other potential set. Make sure we've got equivalent
3459 destinations. */
3460 /* ??? This is overconservative. Storing to two different mems is
3461 as easy as conditionally computing the address. Storing to a
3462 single mem merely requires a scratch memory to use as one of the
3463 destination addresses; often the memory immediately below the
3464 stack pointer is available for this. */
3465 set_b = NULL_RTX;
3466 if (else_bb)
3467 {
3468 insn_b = last_active_insn (else_bb, FALSE);
3469 set_b = single_set (insn_b);
3470 gcc_assert (set_b);
3471
3472 if (!rtx_interchangeable_p (x, SET_DEST (set_b)))
3473 return FALSE;
3474 }
3475 else
3476 {
3477 insn_b = prev_nonnote_nondebug_insn (if_info->cond_earliest);
3478 /* We're going to be moving the evaluation of B down from above
3479 COND_EARLIEST to JUMP. Make sure the relevant data is still
3480 intact. */
3481 if (! insn_b
3482 || BLOCK_FOR_INSN (insn_b) != BLOCK_FOR_INSN (if_info->cond_earliest)
3483 || !NONJUMP_INSN_P (insn_b)
3484 || (set_b = single_set (insn_b)) == NULL_RTX
3485 || ! rtx_interchangeable_p (x, SET_DEST (set_b))
3486 || ! noce_operand_ok (SET_SRC (set_b))
3487 || reg_overlap_mentioned_p (x, SET_SRC (set_b))
3488 || modified_between_p (SET_SRC (set_b), insn_b, jump)
3489 /* Avoid extending the lifetime of hard registers on small
3490 register class machines. */
3491 || (REG_P (SET_SRC (set_b))
3492 && HARD_REGISTER_P (SET_SRC (set_b))
3493 && targetm.small_register_classes_for_mode_p
3494 (GET_MODE (SET_SRC (set_b))))
3495 /* Likewise with X. In particular this can happen when
3496 noce_get_condition looks farther back in the instruction
3497 stream than one might expect. */
3498 || reg_overlap_mentioned_p (x, cond)
3499 || reg_overlap_mentioned_p (x, a)
3500 || modified_between_p (x, insn_b, jump))
3501 {
3502 insn_b = NULL;
3503 set_b = NULL_RTX;
3504 }
3505 }
3506
3507 /* If x has side effects then only the if-then-else form is safe to
3508 convert. But even in that case we would need to restore any notes
3509 (such as REG_INC) at then end. That can be tricky if
3510 noce_emit_move_insn expands to more than one insn, so disable the
3511 optimization entirely for now if there are side effects. */
3512 if (side_effects_p (x))
3513 return FALSE;
3514
3515 b = (set_b ? SET_SRC (set_b) : x);
3516
3517 /* Only operate on register destinations, and even then avoid extending
3518 the lifetime of hard registers on small register class machines. */
3519 orig_x = x;
3520 if_info->orig_x = orig_x;
3521 if (!REG_P (x)
3522 || (HARD_REGISTER_P (x)
3523 && targetm.small_register_classes_for_mode_p (GET_MODE (x))))
3524 {
3525 if (GET_MODE (x) == BLKmode)
3526 return FALSE;
3527
3528 if (GET_CODE (x) == ZERO_EXTRACT
3529 && (!CONST_INT_P (XEXP (x, 1))
3530 || !CONST_INT_P (XEXP (x, 2))))
3531 return FALSE;
3532
3533 x = gen_reg_rtx (GET_MODE (GET_CODE (x) == STRICT_LOW_PART
3534 ? XEXP (x, 0) : x));
3535 }
3536
3537 /* Don't operate on sources that may trap or are volatile. */
3538 if (! noce_operand_ok (a) || ! noce_operand_ok (b))
3539 return FALSE;
3540
3541 retry:
3542 /* Set up the info block for our subroutines. */
3543 if_info->insn_a = insn_a;
3544 if_info->insn_b = insn_b;
3545 if_info->x = x;
3546 if_info->a = a;
3547 if_info->b = b;
3548
3549 /* Try optimizations in some approximation of a useful order. */
3550 /* ??? Should first look to see if X is live incoming at all. If it
3551 isn't, we don't need anything but an unconditional set. */
3552
3553 /* Look and see if A and B are really the same. Avoid creating silly
3554 cmove constructs that no one will fix up later. */
3555 if (noce_simple_bbs (if_info)
3556 && rtx_interchangeable_p (a, b))
3557 {
3558 /* If we have an INSN_B, we don't have to create any new rtl. Just
3559 move the instruction that we already have. If we don't have an
3560 INSN_B, that means that A == X, and we've got a noop move. In
3561 that case don't do anything and let the code below delete INSN_A. */
3562 if (insn_b && else_bb)
3563 {
3564 rtx note;
3565
3566 if (else_bb && insn_b == BB_END (else_bb))
3567 BB_END (else_bb) = PREV_INSN (insn_b);
3568 reorder_insns (insn_b, insn_b, PREV_INSN (jump));
3569
3570 /* If there was a REG_EQUAL note, delete it since it may have been
3571 true due to this insn being after a jump. */
3572 if ((note = find_reg_note (insn_b, REG_EQUAL, NULL_RTX)) != 0)
3573 remove_note (insn_b, note);
3574
3575 insn_b = NULL;
3576 }
3577 /* If we have "x = b; if (...) x = a;", and x has side-effects, then
3578 x must be executed twice. */
3579 else if (insn_b && side_effects_p (orig_x))
3580 return FALSE;
3581
3582 x = orig_x;
3583 goto success;
3584 }
3585
3586 if (!set_b && MEM_P (orig_x))
3587 /* We want to avoid store speculation to avoid cases like
3588 if (pthread_mutex_trylock(mutex))
3589 ++global_variable;
3590 Rather than go to much effort here, we rely on the SSA optimizers,
3591 which do a good enough job these days. */
3592 return FALSE;
3593
3594 if (noce_try_move (if_info))
3595 goto success;
3596 if (noce_try_ifelse_collapse (if_info))
3597 goto success;
3598 if (noce_try_store_flag (if_info))
3599 goto success;
3600 if (noce_try_bitop (if_info))
3601 goto success;
3602 if (noce_try_minmax (if_info))
3603 goto success;
3604 if (noce_try_abs (if_info))
3605 goto success;
3606 if (noce_try_inverse_constants (if_info))
3607 goto success;
3608 if (!targetm.have_conditional_execution ()
3609 && noce_try_store_flag_constants (if_info))
3610 goto success;
3611 if (HAVE_conditional_move
3612 && noce_try_cmove (if_info))
3613 goto success;
3614 if (! targetm.have_conditional_execution ())
3615 {
3616 if (noce_try_addcc (if_info))
3617 goto success;
3618 if (noce_try_store_flag_mask (if_info))
3619 goto success;
3620 if (HAVE_conditional_move
3621 && noce_try_cmove_arith (if_info))
3622 goto success;
3623 if (noce_try_sign_mask (if_info))
3624 goto success;
3625 }
3626
3627 if (!else_bb && set_b)
3628 {
3629 insn_b = NULL;
3630 set_b = NULL_RTX;
3631 b = orig_x;
3632 goto retry;
3633 }
3634
3635 return FALSE;
3636
3637 success:
3638 if (dump_file && if_info->transform_name)
3639 fprintf (dump_file, "if-conversion succeeded through %s\n",
3640 if_info->transform_name);
3641
3642 /* If we used a temporary, fix it up now. */
3643 if (orig_x != x)
3644 {
3645 rtx_insn *seq;
3646
3647 start_sequence ();
3648 noce_emit_move_insn (orig_x, x);
3649 seq = get_insns ();
3650 set_used_flags (orig_x);
3651 unshare_all_rtl_in_chain (seq);
3652 end_sequence ();
3653
3654 emit_insn_before_setloc (seq, BB_END (test_bb), INSN_LOCATION (insn_a));
3655 }
3656
3657 /* The original THEN and ELSE blocks may now be removed. The test block
3658 must now jump to the join block. If the test block and the join block
3659 can be merged, do so. */
3660 if (else_bb)
3661 {
3662 delete_basic_block (else_bb);
3663 num_true_changes++;
3664 }
3665 else
3666 remove_edge (find_edge (test_bb, join_bb));
3667
3668 remove_edge (find_edge (then_bb, join_bb));
3669 redirect_edge_and_branch_force (single_succ_edge (test_bb), join_bb);
3670 delete_basic_block (then_bb);
3671 num_true_changes++;
3672
3673 if (can_merge_blocks_p (test_bb, join_bb))
3674 {
3675 merge_blocks (test_bb, join_bb);
3676 num_true_changes++;
3677 }
3678
3679 num_updated_if_blocks++;
3680 return TRUE;
3681 }
3682
3683 /* Check whether a block is suitable for conditional move conversion.
3684 Every insn must be a simple set of a register to a constant or a
3685 register. For each assignment, store the value in the pointer map
3686 VALS, keyed indexed by register pointer, then store the register
3687 pointer in REGS. COND is the condition we will test. */
3688
3689 static int
3690 check_cond_move_block (basic_block bb,
3691 hash_map<rtx, rtx> *vals,
3692 vec<rtx> *regs,
3693 rtx cond)
3694 {
3695 rtx_insn *insn;
3696 rtx cc = cc_in_cond (cond);
3697
3698 /* We can only handle simple jumps at the end of the basic block.
3699 It is almost impossible to update the CFG otherwise. */
3700 insn = BB_END (bb);
3701 if (JUMP_P (insn) && !onlyjump_p (insn))
3702 return FALSE;
3703
3704 FOR_BB_INSNS (bb, insn)
3705 {
3706 rtx set, dest, src;
3707
3708 if (!NONDEBUG_INSN_P (insn) || JUMP_P (insn))
3709 continue;
3710 set = single_set (insn);
3711 if (!set)
3712 return FALSE;
3713
3714 dest = SET_DEST (set);
3715 src = SET_SRC (set);
3716 if (!REG_P (dest)
3717 || (HARD_REGISTER_P (dest)
3718 && targetm.small_register_classes_for_mode_p (GET_MODE (dest))))
3719 return FALSE;
3720
3721 if (!CONSTANT_P (src) && !register_operand (src, VOIDmode))
3722 return FALSE;
3723
3724 if (side_effects_p (src) || side_effects_p (dest))
3725 return FALSE;
3726
3727 if (may_trap_p (src) || may_trap_p (dest))
3728 return FALSE;
3729
3730 /* Don't try to handle this if the source register was
3731 modified earlier in the block. */
3732 if ((REG_P (src)
3733 && vals->get (src))
3734 || (GET_CODE (src) == SUBREG && REG_P (SUBREG_REG (src))
3735 && vals->get (SUBREG_REG (src))))
3736 return FALSE;
3737
3738 /* Don't try to handle this if the destination register was
3739 modified earlier in the block. */
3740 if (vals->get (dest))
3741 return FALSE;
3742
3743 /* Don't try to handle this if the condition uses the
3744 destination register. */
3745 if (reg_overlap_mentioned_p (dest, cond))
3746 return FALSE;
3747
3748 /* Don't try to handle this if the source register is modified
3749 later in the block. */
3750 if (!CONSTANT_P (src)
3751 && modified_between_p (src, insn, NEXT_INSN (BB_END (bb))))
3752 return FALSE;
3753
3754 /* Skip it if the instruction to be moved might clobber CC. */
3755 if (cc && set_of (cc, insn))
3756 return FALSE;
3757
3758 vals->put (dest, src);
3759
3760 regs->safe_push (dest);
3761 }
3762
3763 return TRUE;
3764 }
3765
3766 /* Given a basic block BB suitable for conditional move conversion,
3767 a condition COND, and pointer maps THEN_VALS and ELSE_VALS containing
3768 the register values depending on COND, emit the insns in the block as
3769 conditional moves. If ELSE_BLOCK is true, THEN_BB was already
3770 processed. The caller has started a sequence for the conversion.
3771 Return true if successful, false if something goes wrong. */
3772
3773 static bool
3774 cond_move_convert_if_block (struct noce_if_info *if_infop,
3775 basic_block bb, rtx cond,
3776 hash_map<rtx, rtx> *then_vals,
3777 hash_map<rtx, rtx> *else_vals,
3778 bool else_block_p)
3779 {
3780 enum rtx_code code;
3781 rtx_insn *insn;
3782 rtx cond_arg0, cond_arg1;
3783
3784 code = GET_CODE (cond);
3785 cond_arg0 = XEXP (cond, 0);
3786 cond_arg1 = XEXP (cond, 1);
3787
3788 FOR_BB_INSNS (bb, insn)
3789 {
3790 rtx set, target, dest, t, e;
3791
3792 /* ??? Maybe emit conditional debug insn? */
3793 if (!NONDEBUG_INSN_P (insn) || JUMP_P (insn))
3794 continue;
3795 set = single_set (insn);
3796 gcc_assert (set && REG_P (SET_DEST (set)));
3797
3798 dest = SET_DEST (set);
3799
3800 rtx *then_slot = then_vals->get (dest);
3801 rtx *else_slot = else_vals->get (dest);
3802 t = then_slot ? *then_slot : NULL_RTX;
3803 e = else_slot ? *else_slot : NULL_RTX;
3804
3805 if (else_block_p)
3806 {
3807 /* If this register was set in the then block, we already
3808 handled this case there. */
3809 if (t)
3810 continue;
3811 t = dest;
3812 gcc_assert (e);
3813 }
3814 else
3815 {
3816 gcc_assert (t);
3817 if (!e)
3818 e = dest;
3819 }
3820
3821 target = noce_emit_cmove (if_infop, dest, code, cond_arg0, cond_arg1,
3822 t, e);
3823 if (!target)
3824 return false;
3825
3826 if (target != dest)
3827 noce_emit_move_insn (dest, target);
3828 }
3829
3830 return true;
3831 }
3832
3833 /* Given a simple IF-THEN-JOIN or IF-THEN-ELSE-JOIN block, attempt to convert
3834 it using only conditional moves. Return TRUE if we were successful at
3835 converting the block. */
3836
3837 static int
3838 cond_move_process_if_block (struct noce_if_info *if_info)
3839 {
3840 basic_block test_bb = if_info->test_bb;
3841 basic_block then_bb = if_info->then_bb;
3842 basic_block else_bb = if_info->else_bb;
3843 basic_block join_bb = if_info->join_bb;
3844 rtx_insn *jump = if_info->jump;
3845 rtx cond = if_info->cond;
3846 rtx_insn *seq, *loc_insn;
3847 rtx reg;
3848 int c;
3849 vec<rtx> then_regs = vNULL;
3850 vec<rtx> else_regs = vNULL;
3851 unsigned int i;
3852 int success_p = FALSE;
3853 int limit = PARAM_VALUE (PARAM_MAX_RTL_IF_CONVERSION_INSNS);
3854
3855 /* Build a mapping for each block to the value used for each
3856 register. */
3857 hash_map<rtx, rtx> then_vals;
3858 hash_map<rtx, rtx> else_vals;
3859
3860 /* Make sure the blocks are suitable. */
3861 if (!check_cond_move_block (then_bb, &then_vals, &then_regs, cond)
3862 || (else_bb
3863 && !check_cond_move_block (else_bb, &else_vals, &else_regs, cond)))
3864 goto done;
3865
3866 /* Make sure the blocks can be used together. If the same register
3867 is set in both blocks, and is not set to a constant in both
3868 cases, then both blocks must set it to the same register. We
3869 have already verified that if it is set to a register, that the
3870 source register does not change after the assignment. Also count
3871 the number of registers set in only one of the blocks. */
3872 c = 0;
3873 FOR_EACH_VEC_ELT (then_regs, i, reg)
3874 {
3875 rtx *then_slot = then_vals.get (reg);
3876 rtx *else_slot = else_vals.get (reg);
3877
3878 gcc_checking_assert (then_slot);
3879 if (!else_slot)
3880 ++c;
3881 else
3882 {
3883 rtx then_val = *then_slot;
3884 rtx else_val = *else_slot;
3885 if (!CONSTANT_P (then_val) && !CONSTANT_P (else_val)
3886 && !rtx_equal_p (then_val, else_val))
3887 goto done;
3888 }
3889 }
3890
3891 /* Finish off c for MAX_CONDITIONAL_EXECUTE. */
3892 FOR_EACH_VEC_ELT (else_regs, i, reg)
3893 {
3894 gcc_checking_assert (else_vals.get (reg));
3895 if (!then_vals.get (reg))
3896 ++c;
3897 }
3898
3899 /* Make sure it is reasonable to convert this block. What matters
3900 is the number of assignments currently made in only one of the
3901 branches, since if we convert we are going to always execute
3902 them. */
3903 if (c > MAX_CONDITIONAL_EXECUTE
3904 || c > limit)
3905 goto done;
3906
3907 /* Try to emit the conditional moves. First do the then block,
3908 then do anything left in the else blocks. */
3909 start_sequence ();
3910 if (!cond_move_convert_if_block (if_info, then_bb, cond,
3911 &then_vals, &else_vals, false)
3912 || (else_bb
3913 && !cond_move_convert_if_block (if_info, else_bb, cond,
3914 &then_vals, &else_vals, true)))
3915 {
3916 end_sequence ();
3917 goto done;
3918 }
3919 seq = end_ifcvt_sequence (if_info);
3920 if (!seq)
3921 goto done;
3922
3923 loc_insn = first_active_insn (then_bb);
3924 if (!loc_insn)
3925 {
3926 loc_insn = first_active_insn (else_bb);
3927 gcc_assert (loc_insn);
3928 }
3929 emit_insn_before_setloc (seq, jump, INSN_LOCATION (loc_insn));
3930
3931 if (else_bb)
3932 {
3933 delete_basic_block (else_bb);
3934 num_true_changes++;
3935 }
3936 else
3937 remove_edge (find_edge (test_bb, join_bb));
3938
3939 remove_edge (find_edge (then_bb, join_bb));
3940 redirect_edge_and_branch_force (single_succ_edge (test_bb), join_bb);
3941 delete_basic_block (then_bb);
3942 num_true_changes++;
3943
3944 if (can_merge_blocks_p (test_bb, join_bb))
3945 {
3946 merge_blocks (test_bb, join_bb);
3947 num_true_changes++;
3948 }
3949
3950 num_updated_if_blocks++;
3951 success_p = TRUE;
3952
3953 done:
3954 then_regs.release ();
3955 else_regs.release ();
3956 return success_p;
3957 }
3958
3959 \f
3960 /* Determine if a given basic block heads a simple IF-THEN-JOIN or an
3961 IF-THEN-ELSE-JOIN block.
3962
3963 If so, we'll try to convert the insns to not require the branch,
3964 using only transformations that do not require conditional execution.
3965
3966 Return TRUE if we were successful at converting the block. */
3967
3968 static int
3969 noce_find_if_block (basic_block test_bb, edge then_edge, edge else_edge,
3970 int pass)
3971 {
3972 basic_block then_bb, else_bb, join_bb;
3973 bool then_else_reversed = false;
3974 rtx_insn *jump;
3975 rtx cond;
3976 rtx_insn *cond_earliest;
3977 struct noce_if_info if_info;
3978 bool speed_p = optimize_bb_for_speed_p (test_bb);
3979
3980 /* We only ever should get here before reload. */
3981 gcc_assert (!reload_completed);
3982
3983 /* Recognize an IF-THEN-ELSE-JOIN block. */
3984 if (single_pred_p (then_edge->dest)
3985 && single_succ_p (then_edge->dest)
3986 && single_pred_p (else_edge->dest)
3987 && single_succ_p (else_edge->dest)
3988 && single_succ (then_edge->dest) == single_succ (else_edge->dest))
3989 {
3990 then_bb = then_edge->dest;
3991 else_bb = else_edge->dest;
3992 join_bb = single_succ (then_bb);
3993 }
3994 /* Recognize an IF-THEN-JOIN block. */
3995 else if (single_pred_p (then_edge->dest)
3996 && single_succ_p (then_edge->dest)
3997 && single_succ (then_edge->dest) == else_edge->dest)
3998 {
3999 then_bb = then_edge->dest;
4000 else_bb = NULL_BLOCK;
4001 join_bb = else_edge->dest;
4002 }
4003 /* Recognize an IF-ELSE-JOIN block. We can have those because the order
4004 of basic blocks in cfglayout mode does not matter, so the fallthrough
4005 edge can go to any basic block (and not just to bb->next_bb, like in
4006 cfgrtl mode). */
4007 else if (single_pred_p (else_edge->dest)
4008 && single_succ_p (else_edge->dest)
4009 && single_succ (else_edge->dest) == then_edge->dest)
4010 {
4011 /* The noce transformations do not apply to IF-ELSE-JOIN blocks.
4012 To make this work, we have to invert the THEN and ELSE blocks
4013 and reverse the jump condition. */
4014 then_bb = else_edge->dest;
4015 else_bb = NULL_BLOCK;
4016 join_bb = single_succ (then_bb);
4017 then_else_reversed = true;
4018 }
4019 else
4020 /* Not a form we can handle. */
4021 return FALSE;
4022
4023 /* The edges of the THEN and ELSE blocks cannot have complex edges. */
4024 if (single_succ_edge (then_bb)->flags & EDGE_COMPLEX)
4025 return FALSE;
4026 if (else_bb
4027 && single_succ_edge (else_bb)->flags & EDGE_COMPLEX)
4028 return FALSE;
4029
4030 num_possible_if_blocks++;
4031
4032 if (dump_file)
4033 {
4034 fprintf (dump_file,
4035 "\nIF-THEN%s-JOIN block found, pass %d, test %d, then %d",
4036 (else_bb) ? "-ELSE" : "",
4037 pass, test_bb->index, then_bb->index);
4038
4039 if (else_bb)
4040 fprintf (dump_file, ", else %d", else_bb->index);
4041
4042 fprintf (dump_file, ", join %d\n", join_bb->index);
4043 }
4044
4045 /* If the conditional jump is more than just a conditional
4046 jump, then we can not do if-conversion on this block. */
4047 jump = BB_END (test_bb);
4048 if (! onlyjump_p (jump))
4049 return FALSE;
4050
4051 /* If this is not a standard conditional jump, we can't parse it. */
4052 cond = noce_get_condition (jump, &cond_earliest, then_else_reversed);
4053 if (!cond)
4054 return FALSE;
4055
4056 /* We must be comparing objects whose modes imply the size. */
4057 if (GET_MODE (XEXP (cond, 0)) == BLKmode)
4058 return FALSE;
4059
4060 /* Initialize an IF_INFO struct to pass around. */
4061 memset (&if_info, 0, sizeof if_info);
4062 if_info.test_bb = test_bb;
4063 if_info.then_bb = then_bb;
4064 if_info.else_bb = else_bb;
4065 if_info.join_bb = join_bb;
4066 if_info.cond = cond;
4067 if_info.cond_earliest = cond_earliest;
4068 if_info.jump = jump;
4069 if_info.then_else_reversed = then_else_reversed;
4070 if_info.speed_p = speed_p;
4071 if_info.max_seq_cost
4072 = targetm.max_noce_ifcvt_seq_cost (then_edge);
4073 /* We'll add in the cost of THEN_BB and ELSE_BB later, when we check
4074 that they are valid to transform. We can't easily get back to the insn
4075 for COND (and it may not exist if we had to canonicalize to get COND),
4076 and jump_insns are always given a cost of 1 by seq_cost, so treat
4077 both instructions as having cost COSTS_N_INSNS (1). */
4078 if_info.original_cost = COSTS_N_INSNS (2);
4079
4080
4081 /* Do the real work. */
4082
4083 if (noce_process_if_block (&if_info))
4084 return TRUE;
4085
4086 if (HAVE_conditional_move
4087 && cond_move_process_if_block (&if_info))
4088 return TRUE;
4089
4090 return FALSE;
4091 }
4092 \f
4093
4094 /* Merge the blocks and mark for local life update. */
4095
4096 static void
4097 merge_if_block (struct ce_if_block * ce_info)
4098 {
4099 basic_block test_bb = ce_info->test_bb; /* last test block */
4100 basic_block then_bb = ce_info->then_bb; /* THEN */
4101 basic_block else_bb = ce_info->else_bb; /* ELSE or NULL */
4102 basic_block join_bb = ce_info->join_bb; /* join block */
4103 basic_block combo_bb;
4104
4105 /* All block merging is done into the lower block numbers. */
4106
4107 combo_bb = test_bb;
4108 df_set_bb_dirty (test_bb);
4109
4110 /* Merge any basic blocks to handle && and || subtests. Each of
4111 the blocks are on the fallthru path from the predecessor block. */
4112 if (ce_info->num_multiple_test_blocks > 0)
4113 {
4114 basic_block bb = test_bb;
4115 basic_block last_test_bb = ce_info->last_test_bb;
4116 basic_block fallthru = block_fallthru (bb);
4117
4118 do
4119 {
4120 bb = fallthru;
4121 fallthru = block_fallthru (bb);
4122 merge_blocks (combo_bb, bb);
4123 num_true_changes++;
4124 }
4125 while (bb != last_test_bb);
4126 }
4127
4128 /* Merge TEST block into THEN block. Normally the THEN block won't have a
4129 label, but it might if there were || tests. That label's count should be
4130 zero, and it normally should be removed. */
4131
4132 if (then_bb)
4133 {
4134 /* If THEN_BB has no successors, then there's a BARRIER after it.
4135 If COMBO_BB has more than one successor (THEN_BB), then that BARRIER
4136 is no longer needed, and in fact it is incorrect to leave it in
4137 the insn stream. */
4138 if (EDGE_COUNT (then_bb->succs) == 0
4139 && EDGE_COUNT (combo_bb->succs) > 1)
4140 {
4141 rtx_insn *end = NEXT_INSN (BB_END (then_bb));
4142 while (end && NOTE_P (end) && !NOTE_INSN_BASIC_BLOCK_P (end))
4143 end = NEXT_INSN (end);
4144
4145 if (end && BARRIER_P (end))
4146 delete_insn (end);
4147 }
4148 merge_blocks (combo_bb, then_bb);
4149 num_true_changes++;
4150 }
4151
4152 /* The ELSE block, if it existed, had a label. That label count
4153 will almost always be zero, but odd things can happen when labels
4154 get their addresses taken. */
4155 if (else_bb)
4156 {
4157 /* If ELSE_BB has no successors, then there's a BARRIER after it.
4158 If COMBO_BB has more than one successor (ELSE_BB), then that BARRIER
4159 is no longer needed, and in fact it is incorrect to leave it in
4160 the insn stream. */
4161 if (EDGE_COUNT (else_bb->succs) == 0
4162 && EDGE_COUNT (combo_bb->succs) > 1)
4163 {
4164 rtx_insn *end = NEXT_INSN (BB_END (else_bb));
4165 while (end && NOTE_P (end) && !NOTE_INSN_BASIC_BLOCK_P (end))
4166 end = NEXT_INSN (end);
4167
4168 if (end && BARRIER_P (end))
4169 delete_insn (end);
4170 }
4171 merge_blocks (combo_bb, else_bb);
4172 num_true_changes++;
4173 }
4174
4175 /* If there was no join block reported, that means it was not adjacent
4176 to the others, and so we cannot merge them. */
4177
4178 if (! join_bb)
4179 {
4180 rtx_insn *last = BB_END (combo_bb);
4181
4182 /* The outgoing edge for the current COMBO block should already
4183 be correct. Verify this. */
4184 if (EDGE_COUNT (combo_bb->succs) == 0)
4185 gcc_assert (find_reg_note (last, REG_NORETURN, NULL)
4186 || (NONJUMP_INSN_P (last)
4187 && GET_CODE (PATTERN (last)) == TRAP_IF
4188 && (TRAP_CONDITION (PATTERN (last))
4189 == const_true_rtx)));
4190
4191 else
4192 /* There should still be something at the end of the THEN or ELSE
4193 blocks taking us to our final destination. */
4194 gcc_assert (JUMP_P (last)
4195 || (EDGE_SUCC (combo_bb, 0)->dest
4196 == EXIT_BLOCK_PTR_FOR_FN (cfun)
4197 && CALL_P (last)
4198 && SIBLING_CALL_P (last))
4199 || ((EDGE_SUCC (combo_bb, 0)->flags & EDGE_EH)
4200 && can_throw_internal (last)));
4201 }
4202
4203 /* The JOIN block may have had quite a number of other predecessors too.
4204 Since we've already merged the TEST, THEN and ELSE blocks, we should
4205 have only one remaining edge from our if-then-else diamond. If there
4206 is more than one remaining edge, it must come from elsewhere. There
4207 may be zero incoming edges if the THEN block didn't actually join
4208 back up (as with a call to a non-return function). */
4209 else if (EDGE_COUNT (join_bb->preds) < 2
4210 && join_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
4211 {
4212 /* We can merge the JOIN cleanly and update the dataflow try
4213 again on this pass.*/
4214 merge_blocks (combo_bb, join_bb);
4215 num_true_changes++;
4216 }
4217 else
4218 {
4219 /* We cannot merge the JOIN. */
4220
4221 /* The outgoing edge for the current COMBO block should already
4222 be correct. Verify this. */
4223 gcc_assert (single_succ_p (combo_bb)
4224 && single_succ (combo_bb) == join_bb);
4225
4226 /* Remove the jump and cruft from the end of the COMBO block. */
4227 if (join_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
4228 tidy_fallthru_edge (single_succ_edge (combo_bb));
4229 }
4230
4231 num_updated_if_blocks++;
4232 }
4233 \f
4234 /* Find a block ending in a simple IF condition and try to transform it
4235 in some way. When converting a multi-block condition, put the new code
4236 in the first such block and delete the rest. Return a pointer to this
4237 first block if some transformation was done. Return NULL otherwise. */
4238
4239 static basic_block
4240 find_if_header (basic_block test_bb, int pass)
4241 {
4242 ce_if_block ce_info;
4243 edge then_edge;
4244 edge else_edge;
4245
4246 /* The kind of block we're looking for has exactly two successors. */
4247 if (EDGE_COUNT (test_bb->succs) != 2)
4248 return NULL;
4249
4250 then_edge = EDGE_SUCC (test_bb, 0);
4251 else_edge = EDGE_SUCC (test_bb, 1);
4252
4253 if (df_get_bb_dirty (then_edge->dest))
4254 return NULL;
4255 if (df_get_bb_dirty (else_edge->dest))
4256 return NULL;
4257
4258 /* Neither edge should be abnormal. */
4259 if ((then_edge->flags & EDGE_COMPLEX)
4260 || (else_edge->flags & EDGE_COMPLEX))
4261 return NULL;
4262
4263 /* Nor exit the loop. */
4264 if ((then_edge->flags & EDGE_LOOP_EXIT)
4265 || (else_edge->flags & EDGE_LOOP_EXIT))
4266 return NULL;
4267
4268 /* The THEN edge is canonically the one that falls through. */
4269 if (then_edge->flags & EDGE_FALLTHRU)
4270 ;
4271 else if (else_edge->flags & EDGE_FALLTHRU)
4272 std::swap (then_edge, else_edge);
4273 else
4274 /* Otherwise this must be a multiway branch of some sort. */
4275 return NULL;
4276
4277 memset (&ce_info, 0, sizeof (ce_info));
4278 ce_info.test_bb = test_bb;
4279 ce_info.then_bb = then_edge->dest;
4280 ce_info.else_bb = else_edge->dest;
4281 ce_info.pass = pass;
4282
4283 #ifdef IFCVT_MACHDEP_INIT
4284 IFCVT_MACHDEP_INIT (&ce_info);
4285 #endif
4286
4287 if (!reload_completed
4288 && noce_find_if_block (test_bb, then_edge, else_edge, pass))
4289 goto success;
4290
4291 if (reload_completed
4292 && targetm.have_conditional_execution ()
4293 && cond_exec_find_if_block (&ce_info))
4294 goto success;
4295
4296 if (targetm.have_trap ()
4297 && optab_handler (ctrap_optab, word_mode) != CODE_FOR_nothing
4298 && find_cond_trap (test_bb, then_edge, else_edge))
4299 goto success;
4300
4301 if (dom_info_state (CDI_POST_DOMINATORS) >= DOM_NO_FAST_QUERY
4302 && (reload_completed || !targetm.have_conditional_execution ()))
4303 {
4304 if (find_if_case_1 (test_bb, then_edge, else_edge))
4305 goto success;
4306 if (find_if_case_2 (test_bb, then_edge, else_edge))
4307 goto success;
4308 }
4309
4310 return NULL;
4311
4312 success:
4313 if (dump_file)
4314 fprintf (dump_file, "Conversion succeeded on pass %d.\n", pass);
4315 /* Set this so we continue looking. */
4316 cond_exec_changed_p = TRUE;
4317 return ce_info.test_bb;
4318 }
4319
4320 /* Return true if a block has two edges, one of which falls through to the next
4321 block, and the other jumps to a specific block, so that we can tell if the
4322 block is part of an && test or an || test. Returns either -1 or the number
4323 of non-note, non-jump, non-USE/CLOBBER insns in the block. */
4324
4325 static int
4326 block_jumps_and_fallthru_p (basic_block cur_bb, basic_block target_bb)
4327 {
4328 edge cur_edge;
4329 int fallthru_p = FALSE;
4330 int jump_p = FALSE;
4331 rtx_insn *insn;
4332 rtx_insn *end;
4333 int n_insns = 0;
4334 edge_iterator ei;
4335
4336 if (!cur_bb || !target_bb)
4337 return -1;
4338
4339 /* If no edges, obviously it doesn't jump or fallthru. */
4340 if (EDGE_COUNT (cur_bb->succs) == 0)
4341 return FALSE;
4342
4343 FOR_EACH_EDGE (cur_edge, ei, cur_bb->succs)
4344 {
4345 if (cur_edge->flags & EDGE_COMPLEX)
4346 /* Anything complex isn't what we want. */
4347 return -1;
4348
4349 else if (cur_edge->flags & EDGE_FALLTHRU)
4350 fallthru_p = TRUE;
4351
4352 else if (cur_edge->dest == target_bb)
4353 jump_p = TRUE;
4354
4355 else
4356 return -1;
4357 }
4358
4359 if ((jump_p & fallthru_p) == 0)
4360 return -1;
4361
4362 /* Don't allow calls in the block, since this is used to group && and ||
4363 together for conditional execution support. ??? we should support
4364 conditional execution support across calls for IA-64 some day, but
4365 for now it makes the code simpler. */
4366 end = BB_END (cur_bb);
4367 insn = BB_HEAD (cur_bb);
4368
4369 while (insn != NULL_RTX)
4370 {
4371 if (CALL_P (insn))
4372 return -1;
4373
4374 if (INSN_P (insn)
4375 && !JUMP_P (insn)
4376 && !DEBUG_INSN_P (insn)
4377 && GET_CODE (PATTERN (insn)) != USE
4378 && GET_CODE (PATTERN (insn)) != CLOBBER)
4379 n_insns++;
4380
4381 if (insn == end)
4382 break;
4383
4384 insn = NEXT_INSN (insn);
4385 }
4386
4387 return n_insns;
4388 }
4389
4390 /* Determine if a given basic block heads a simple IF-THEN or IF-THEN-ELSE
4391 block. If so, we'll try to convert the insns to not require the branch.
4392 Return TRUE if we were successful at converting the block. */
4393
4394 static int
4395 cond_exec_find_if_block (struct ce_if_block * ce_info)
4396 {
4397 basic_block test_bb = ce_info->test_bb;
4398 basic_block then_bb = ce_info->then_bb;
4399 basic_block else_bb = ce_info->else_bb;
4400 basic_block join_bb = NULL_BLOCK;
4401 edge cur_edge;
4402 basic_block next;
4403 edge_iterator ei;
4404
4405 ce_info->last_test_bb = test_bb;
4406
4407 /* We only ever should get here after reload,
4408 and if we have conditional execution. */
4409 gcc_assert (reload_completed && targetm.have_conditional_execution ());
4410
4411 /* Discover if any fall through predecessors of the current test basic block
4412 were && tests (which jump to the else block) or || tests (which jump to
4413 the then block). */
4414 if (single_pred_p (test_bb)
4415 && single_pred_edge (test_bb)->flags == EDGE_FALLTHRU)
4416 {
4417 basic_block bb = single_pred (test_bb);
4418 basic_block target_bb;
4419 int max_insns = MAX_CONDITIONAL_EXECUTE;
4420 int n_insns;
4421
4422 /* Determine if the preceding block is an && or || block. */
4423 if ((n_insns = block_jumps_and_fallthru_p (bb, else_bb)) >= 0)
4424 {
4425 ce_info->and_and_p = TRUE;
4426 target_bb = else_bb;
4427 }
4428 else if ((n_insns = block_jumps_and_fallthru_p (bb, then_bb)) >= 0)
4429 {
4430 ce_info->and_and_p = FALSE;
4431 target_bb = then_bb;
4432 }
4433 else
4434 target_bb = NULL_BLOCK;
4435
4436 if (target_bb && n_insns <= max_insns)
4437 {
4438 int total_insns = 0;
4439 int blocks = 0;
4440
4441 ce_info->last_test_bb = test_bb;
4442
4443 /* Found at least one && or || block, look for more. */
4444 do
4445 {
4446 ce_info->test_bb = test_bb = bb;
4447 total_insns += n_insns;
4448 blocks++;
4449
4450 if (!single_pred_p (bb))
4451 break;
4452
4453 bb = single_pred (bb);
4454 n_insns = block_jumps_and_fallthru_p (bb, target_bb);
4455 }
4456 while (n_insns >= 0 && (total_insns + n_insns) <= max_insns);
4457
4458 ce_info->num_multiple_test_blocks = blocks;
4459 ce_info->num_multiple_test_insns = total_insns;
4460
4461 if (ce_info->and_and_p)
4462 ce_info->num_and_and_blocks = blocks;
4463 else
4464 ce_info->num_or_or_blocks = blocks;
4465 }
4466 }
4467
4468 /* The THEN block of an IF-THEN combo must have exactly one predecessor,
4469 other than any || blocks which jump to the THEN block. */
4470 if ((EDGE_COUNT (then_bb->preds) - ce_info->num_or_or_blocks) != 1)
4471 return FALSE;
4472
4473 /* The edges of the THEN and ELSE blocks cannot have complex edges. */
4474 FOR_EACH_EDGE (cur_edge, ei, then_bb->preds)
4475 {
4476 if (cur_edge->flags & EDGE_COMPLEX)
4477 return FALSE;
4478 }
4479
4480 FOR_EACH_EDGE (cur_edge, ei, else_bb->preds)
4481 {
4482 if (cur_edge->flags & EDGE_COMPLEX)
4483 return FALSE;
4484 }
4485
4486 /* The THEN block of an IF-THEN combo must have zero or one successors. */
4487 if (EDGE_COUNT (then_bb->succs) > 0
4488 && (!single_succ_p (then_bb)
4489 || (single_succ_edge (then_bb)->flags & EDGE_COMPLEX)
4490 || (epilogue_completed
4491 && tablejump_p (BB_END (then_bb), NULL, NULL))))
4492 return FALSE;
4493
4494 /* If the THEN block has no successors, conditional execution can still
4495 make a conditional call. Don't do this unless the ELSE block has
4496 only one incoming edge -- the CFG manipulation is too ugly otherwise.
4497 Check for the last insn of the THEN block being an indirect jump, which
4498 is listed as not having any successors, but confuses the rest of the CE
4499 code processing. ??? we should fix this in the future. */
4500 if (EDGE_COUNT (then_bb->succs) == 0)
4501 {
4502 if (single_pred_p (else_bb) && else_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
4503 {
4504 rtx_insn *last_insn = BB_END (then_bb);
4505
4506 while (last_insn
4507 && NOTE_P (last_insn)
4508 && last_insn != BB_HEAD (then_bb))
4509 last_insn = PREV_INSN (last_insn);
4510
4511 if (last_insn
4512 && JUMP_P (last_insn)
4513 && ! simplejump_p (last_insn))
4514 return FALSE;
4515
4516 join_bb = else_bb;
4517 else_bb = NULL_BLOCK;
4518 }
4519 else
4520 return FALSE;
4521 }
4522
4523 /* If the THEN block's successor is the other edge out of the TEST block,
4524 then we have an IF-THEN combo without an ELSE. */
4525 else if (single_succ (then_bb) == else_bb)
4526 {
4527 join_bb = else_bb;
4528 else_bb = NULL_BLOCK;
4529 }
4530
4531 /* If the THEN and ELSE block meet in a subsequent block, and the ELSE
4532 has exactly one predecessor and one successor, and the outgoing edge
4533 is not complex, then we have an IF-THEN-ELSE combo. */
4534 else if (single_succ_p (else_bb)
4535 && single_succ (then_bb) == single_succ (else_bb)
4536 && single_pred_p (else_bb)
4537 && !(single_succ_edge (else_bb)->flags & EDGE_COMPLEX)
4538 && !(epilogue_completed
4539 && tablejump_p (BB_END (else_bb), NULL, NULL)))
4540 join_bb = single_succ (else_bb);
4541
4542 /* Otherwise it is not an IF-THEN or IF-THEN-ELSE combination. */
4543 else
4544 return FALSE;
4545
4546 num_possible_if_blocks++;
4547
4548 if (dump_file)
4549 {
4550 fprintf (dump_file,
4551 "\nIF-THEN%s block found, pass %d, start block %d "
4552 "[insn %d], then %d [%d]",
4553 (else_bb) ? "-ELSE" : "",
4554 ce_info->pass,
4555 test_bb->index,
4556 BB_HEAD (test_bb) ? (int)INSN_UID (BB_HEAD (test_bb)) : -1,
4557 then_bb->index,
4558 BB_HEAD (then_bb) ? (int)INSN_UID (BB_HEAD (then_bb)) : -1);
4559
4560 if (else_bb)
4561 fprintf (dump_file, ", else %d [%d]",
4562 else_bb->index,
4563 BB_HEAD (else_bb) ? (int)INSN_UID (BB_HEAD (else_bb)) : -1);
4564
4565 fprintf (dump_file, ", join %d [%d]",
4566 join_bb->index,
4567 BB_HEAD (join_bb) ? (int)INSN_UID (BB_HEAD (join_bb)) : -1);
4568
4569 if (ce_info->num_multiple_test_blocks > 0)
4570 fprintf (dump_file, ", %d %s block%s last test %d [%d]",
4571 ce_info->num_multiple_test_blocks,
4572 (ce_info->and_and_p) ? "&&" : "||",
4573 (ce_info->num_multiple_test_blocks == 1) ? "" : "s",
4574 ce_info->last_test_bb->index,
4575 ((BB_HEAD (ce_info->last_test_bb))
4576 ? (int)INSN_UID (BB_HEAD (ce_info->last_test_bb))
4577 : -1));
4578
4579 fputc ('\n', dump_file);
4580 }
4581
4582 /* Make sure IF, THEN, and ELSE, blocks are adjacent. Actually, we get the
4583 first condition for free, since we've already asserted that there's a
4584 fallthru edge from IF to THEN. Likewise for the && and || blocks, since
4585 we checked the FALLTHRU flag, those are already adjacent to the last IF
4586 block. */
4587 /* ??? As an enhancement, move the ELSE block. Have to deal with
4588 BLOCK notes, if by no other means than backing out the merge if they
4589 exist. Sticky enough I don't want to think about it now. */
4590 next = then_bb;
4591 if (else_bb && (next = next->next_bb) != else_bb)
4592 return FALSE;
4593 if ((next = next->next_bb) != join_bb
4594 && join_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
4595 {
4596 if (else_bb)
4597 join_bb = NULL;
4598 else
4599 return FALSE;
4600 }
4601
4602 /* Do the real work. */
4603
4604 ce_info->else_bb = else_bb;
4605 ce_info->join_bb = join_bb;
4606
4607 /* If we have && and || tests, try to first handle combining the && and ||
4608 tests into the conditional code, and if that fails, go back and handle
4609 it without the && and ||, which at present handles the && case if there
4610 was no ELSE block. */
4611 if (cond_exec_process_if_block (ce_info, TRUE))
4612 return TRUE;
4613
4614 if (ce_info->num_multiple_test_blocks)
4615 {
4616 cancel_changes (0);
4617
4618 if (cond_exec_process_if_block (ce_info, FALSE))
4619 return TRUE;
4620 }
4621
4622 return FALSE;
4623 }
4624
4625 /* Convert a branch over a trap, or a branch
4626 to a trap, into a conditional trap. */
4627
4628 static int
4629 find_cond_trap (basic_block test_bb, edge then_edge, edge else_edge)
4630 {
4631 basic_block then_bb = then_edge->dest;
4632 basic_block else_bb = else_edge->dest;
4633 basic_block other_bb, trap_bb;
4634 rtx_insn *trap, *jump;
4635 rtx cond;
4636 rtx_insn *cond_earliest;
4637 enum rtx_code code;
4638
4639 /* Locate the block with the trap instruction. */
4640 /* ??? While we look for no successors, we really ought to allow
4641 EH successors. Need to fix merge_if_block for that to work. */
4642 if ((trap = block_has_only_trap (then_bb)) != NULL)
4643 trap_bb = then_bb, other_bb = else_bb;
4644 else if ((trap = block_has_only_trap (else_bb)) != NULL)
4645 trap_bb = else_bb, other_bb = then_bb;
4646 else
4647 return FALSE;
4648
4649 if (dump_file)
4650 {
4651 fprintf (dump_file, "\nTRAP-IF block found, start %d, trap %d\n",
4652 test_bb->index, trap_bb->index);
4653 }
4654
4655 /* If this is not a standard conditional jump, we can't parse it. */
4656 jump = BB_END (test_bb);
4657 cond = noce_get_condition (jump, &cond_earliest, false);
4658 if (! cond)
4659 return FALSE;
4660
4661 /* If the conditional jump is more than just a conditional jump, then
4662 we can not do if-conversion on this block. Give up for returnjump_p,
4663 changing a conditional return followed by unconditional trap for
4664 conditional trap followed by unconditional return is likely not
4665 beneficial and harder to handle. */
4666 if (! onlyjump_p (jump) || returnjump_p (jump))
4667 return FALSE;
4668
4669 /* We must be comparing objects whose modes imply the size. */
4670 if (GET_MODE (XEXP (cond, 0)) == BLKmode)
4671 return FALSE;
4672
4673 /* Reverse the comparison code, if necessary. */
4674 code = GET_CODE (cond);
4675 if (then_bb == trap_bb)
4676 {
4677 code = reversed_comparison_code (cond, jump);
4678 if (code == UNKNOWN)
4679 return FALSE;
4680 }
4681
4682 /* Attempt to generate the conditional trap. */
4683 rtx_insn *seq = gen_cond_trap (code, copy_rtx (XEXP (cond, 0)),
4684 copy_rtx (XEXP (cond, 1)),
4685 TRAP_CODE (PATTERN (trap)));
4686 if (seq == NULL)
4687 return FALSE;
4688
4689 /* Emit the new insns before cond_earliest. */
4690 emit_insn_before_setloc (seq, cond_earliest, INSN_LOCATION (trap));
4691
4692 /* Delete the trap block if possible. */
4693 remove_edge (trap_bb == then_bb ? then_edge : else_edge);
4694 df_set_bb_dirty (test_bb);
4695 df_set_bb_dirty (then_bb);
4696 df_set_bb_dirty (else_bb);
4697
4698 if (EDGE_COUNT (trap_bb->preds) == 0)
4699 {
4700 delete_basic_block (trap_bb);
4701 num_true_changes++;
4702 }
4703
4704 /* Wire together the blocks again. */
4705 if (current_ir_type () == IR_RTL_CFGLAYOUT)
4706 single_succ_edge (test_bb)->flags |= EDGE_FALLTHRU;
4707 else if (trap_bb == then_bb)
4708 {
4709 rtx lab = JUMP_LABEL (jump);
4710 rtx_insn *seq = targetm.gen_jump (lab);
4711 rtx_jump_insn *newjump = emit_jump_insn_after (seq, jump);
4712 LABEL_NUSES (lab) += 1;
4713 JUMP_LABEL (newjump) = lab;
4714 emit_barrier_after (newjump);
4715 }
4716 delete_insn (jump);
4717
4718 if (can_merge_blocks_p (test_bb, other_bb))
4719 {
4720 merge_blocks (test_bb, other_bb);
4721 num_true_changes++;
4722 }
4723
4724 num_updated_if_blocks++;
4725 return TRUE;
4726 }
4727
4728 /* Subroutine of find_cond_trap: if BB contains only a trap insn,
4729 return it. */
4730
4731 static rtx_insn *
4732 block_has_only_trap (basic_block bb)
4733 {
4734 rtx_insn *trap;
4735
4736 /* We're not the exit block. */
4737 if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
4738 return NULL;
4739
4740 /* The block must have no successors. */
4741 if (EDGE_COUNT (bb->succs) > 0)
4742 return NULL;
4743
4744 /* The only instruction in the THEN block must be the trap. */
4745 trap = first_active_insn (bb);
4746 if (! (trap == BB_END (bb)
4747 && GET_CODE (PATTERN (trap)) == TRAP_IF
4748 && TRAP_CONDITION (PATTERN (trap)) == const_true_rtx))
4749 return NULL;
4750
4751 return trap;
4752 }
4753
4754 /* Look for IF-THEN-ELSE cases in which one of THEN or ELSE is
4755 transformable, but not necessarily the other. There need be no
4756 JOIN block.
4757
4758 Return TRUE if we were successful at converting the block.
4759
4760 Cases we'd like to look at:
4761
4762 (1)
4763 if (test) goto over; // x not live
4764 x = a;
4765 goto label;
4766 over:
4767
4768 becomes
4769
4770 x = a;
4771 if (! test) goto label;
4772
4773 (2)
4774 if (test) goto E; // x not live
4775 x = big();
4776 goto L;
4777 E:
4778 x = b;
4779 goto M;
4780
4781 becomes
4782
4783 x = b;
4784 if (test) goto M;
4785 x = big();
4786 goto L;
4787
4788 (3) // This one's really only interesting for targets that can do
4789 // multiway branching, e.g. IA-64 BBB bundles. For other targets
4790 // it results in multiple branches on a cache line, which often
4791 // does not sit well with predictors.
4792
4793 if (test1) goto E; // predicted not taken
4794 x = a;
4795 if (test2) goto F;
4796 ...
4797 E:
4798 x = b;
4799 J:
4800
4801 becomes
4802
4803 x = a;
4804 if (test1) goto E;
4805 if (test2) goto F;
4806
4807 Notes:
4808
4809 (A) Don't do (2) if the branch is predicted against the block we're
4810 eliminating. Do it anyway if we can eliminate a branch; this requires
4811 that the sole successor of the eliminated block postdominate the other
4812 side of the if.
4813
4814 (B) With CE, on (3) we can steal from both sides of the if, creating
4815
4816 if (test1) x = a;
4817 if (!test1) x = b;
4818 if (test1) goto J;
4819 if (test2) goto F;
4820 ...
4821 J:
4822
4823 Again, this is most useful if J postdominates.
4824
4825 (C) CE substitutes for helpful life information.
4826
4827 (D) These heuristics need a lot of work. */
4828
4829 /* Tests for case 1 above. */
4830
4831 static int
4832 find_if_case_1 (basic_block test_bb, edge then_edge, edge else_edge)
4833 {
4834 basic_block then_bb = then_edge->dest;
4835 basic_block else_bb = else_edge->dest;
4836 basic_block new_bb;
4837 int then_bb_index, then_prob;
4838 rtx else_target = NULL_RTX;
4839
4840 /* If we are partitioning hot/cold basic blocks, we don't want to
4841 mess up unconditional or indirect jumps that cross between hot
4842 and cold sections.
4843
4844 Basic block partitioning may result in some jumps that appear to
4845 be optimizable (or blocks that appear to be mergeable), but which really
4846 must be left untouched (they are required to make it safely across
4847 partition boundaries). See the comments at the top of
4848 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
4849
4850 if ((BB_END (then_bb)
4851 && JUMP_P (BB_END (then_bb))
4852 && CROSSING_JUMP_P (BB_END (then_bb)))
4853 || (BB_END (test_bb)
4854 && JUMP_P (BB_END (test_bb))
4855 && CROSSING_JUMP_P (BB_END (test_bb)))
4856 || (BB_END (else_bb)
4857 && JUMP_P (BB_END (else_bb))
4858 && CROSSING_JUMP_P (BB_END (else_bb))))
4859 return FALSE;
4860
4861 /* THEN has one successor. */
4862 if (!single_succ_p (then_bb))
4863 return FALSE;
4864
4865 /* THEN does not fall through, but is not strange either. */
4866 if (single_succ_edge (then_bb)->flags & (EDGE_COMPLEX | EDGE_FALLTHRU))
4867 return FALSE;
4868
4869 /* THEN has one predecessor. */
4870 if (!single_pred_p (then_bb))
4871 return FALSE;
4872
4873 /* THEN must do something. */
4874 if (forwarder_block_p (then_bb))
4875 return FALSE;
4876
4877 num_possible_if_blocks++;
4878 if (dump_file)
4879 fprintf (dump_file,
4880 "\nIF-CASE-1 found, start %d, then %d\n",
4881 test_bb->index, then_bb->index);
4882
4883 if (then_edge->probability)
4884 then_prob = REG_BR_PROB_BASE - then_edge->probability;
4885 else
4886 then_prob = REG_BR_PROB_BASE / 2;
4887
4888 /* We're speculating from the THEN path, we want to make sure the cost
4889 of speculation is within reason. */
4890 if (! cheap_bb_rtx_cost_p (then_bb, then_prob,
4891 COSTS_N_INSNS (BRANCH_COST (optimize_bb_for_speed_p (then_edge->src),
4892 predictable_edge_p (then_edge)))))
4893 return FALSE;
4894
4895 if (else_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
4896 {
4897 rtx_insn *jump = BB_END (else_edge->src);
4898 gcc_assert (JUMP_P (jump));
4899 else_target = JUMP_LABEL (jump);
4900 }
4901
4902 /* Registers set are dead, or are predicable. */
4903 if (! dead_or_predicable (test_bb, then_bb, else_bb,
4904 single_succ_edge (then_bb), 1))
4905 return FALSE;
4906
4907 /* Conversion went ok, including moving the insns and fixing up the
4908 jump. Adjust the CFG to match. */
4909
4910 /* We can avoid creating a new basic block if then_bb is immediately
4911 followed by else_bb, i.e. deleting then_bb allows test_bb to fall
4912 through to else_bb. */
4913
4914 if (then_bb->next_bb == else_bb
4915 && then_bb->prev_bb == test_bb
4916 && else_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
4917 {
4918 redirect_edge_succ (FALLTHRU_EDGE (test_bb), else_bb);
4919 new_bb = 0;
4920 }
4921 else if (else_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
4922 new_bb = force_nonfallthru_and_redirect (FALLTHRU_EDGE (test_bb),
4923 else_bb, else_target);
4924 else
4925 new_bb = redirect_edge_and_branch_force (FALLTHRU_EDGE (test_bb),
4926 else_bb);
4927
4928 df_set_bb_dirty (test_bb);
4929 df_set_bb_dirty (else_bb);
4930
4931 then_bb_index = then_bb->index;
4932 delete_basic_block (then_bb);
4933
4934 /* Make rest of code believe that the newly created block is the THEN_BB
4935 block we removed. */
4936 if (new_bb)
4937 {
4938 df_bb_replace (then_bb_index, new_bb);
4939 /* This should have been done above via force_nonfallthru_and_redirect
4940 (possibly called from redirect_edge_and_branch_force). */
4941 gcc_checking_assert (BB_PARTITION (new_bb) == BB_PARTITION (test_bb));
4942 }
4943
4944 num_true_changes++;
4945 num_updated_if_blocks++;
4946 return TRUE;
4947 }
4948
4949 /* Test for case 2 above. */
4950
4951 static int
4952 find_if_case_2 (basic_block test_bb, edge then_edge, edge else_edge)
4953 {
4954 basic_block then_bb = then_edge->dest;
4955 basic_block else_bb = else_edge->dest;
4956 edge else_succ;
4957 int then_prob, else_prob;
4958
4959 /* We do not want to speculate (empty) loop latches. */
4960 if (current_loops
4961 && else_bb->loop_father->latch == else_bb)
4962 return FALSE;
4963
4964 /* If we are partitioning hot/cold basic blocks, we don't want to
4965 mess up unconditional or indirect jumps that cross between hot
4966 and cold sections.
4967
4968 Basic block partitioning may result in some jumps that appear to
4969 be optimizable (or blocks that appear to be mergeable), but which really
4970 must be left untouched (they are required to make it safely across
4971 partition boundaries). See the comments at the top of
4972 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
4973
4974 if ((BB_END (then_bb)
4975 && JUMP_P (BB_END (then_bb))
4976 && CROSSING_JUMP_P (BB_END (then_bb)))
4977 || (BB_END (test_bb)
4978 && JUMP_P (BB_END (test_bb))
4979 && CROSSING_JUMP_P (BB_END (test_bb)))
4980 || (BB_END (else_bb)
4981 && JUMP_P (BB_END (else_bb))
4982 && CROSSING_JUMP_P (BB_END (else_bb))))
4983 return FALSE;
4984
4985 /* ELSE has one successor. */
4986 if (!single_succ_p (else_bb))
4987 return FALSE;
4988 else
4989 else_succ = single_succ_edge (else_bb);
4990
4991 /* ELSE outgoing edge is not complex. */
4992 if (else_succ->flags & EDGE_COMPLEX)
4993 return FALSE;
4994
4995 /* ELSE has one predecessor. */
4996 if (!single_pred_p (else_bb))
4997 return FALSE;
4998
4999 /* THEN is not EXIT. */
5000 if (then_bb->index < NUM_FIXED_BLOCKS)
5001 return FALSE;
5002
5003 if (else_edge->probability)
5004 {
5005 else_prob = else_edge->probability;
5006 then_prob = REG_BR_PROB_BASE - else_prob;
5007 }
5008 else
5009 {
5010 else_prob = REG_BR_PROB_BASE / 2;
5011 then_prob = REG_BR_PROB_BASE / 2;
5012 }
5013
5014 /* ELSE is predicted or SUCC(ELSE) postdominates THEN. */
5015 if (else_prob > then_prob)
5016 ;
5017 else if (else_succ->dest->index < NUM_FIXED_BLOCKS
5018 || dominated_by_p (CDI_POST_DOMINATORS, then_bb,
5019 else_succ->dest))
5020 ;
5021 else
5022 return FALSE;
5023
5024 num_possible_if_blocks++;
5025 if (dump_file)
5026 fprintf (dump_file,
5027 "\nIF-CASE-2 found, start %d, else %d\n",
5028 test_bb->index, else_bb->index);
5029
5030 /* We're speculating from the ELSE path, we want to make sure the cost
5031 of speculation is within reason. */
5032 if (! cheap_bb_rtx_cost_p (else_bb, else_prob,
5033 COSTS_N_INSNS (BRANCH_COST (optimize_bb_for_speed_p (else_edge->src),
5034 predictable_edge_p (else_edge)))))
5035 return FALSE;
5036
5037 /* Registers set are dead, or are predicable. */
5038 if (! dead_or_predicable (test_bb, else_bb, then_bb, else_succ, 0))
5039 return FALSE;
5040
5041 /* Conversion went ok, including moving the insns and fixing up the
5042 jump. Adjust the CFG to match. */
5043
5044 df_set_bb_dirty (test_bb);
5045 df_set_bb_dirty (then_bb);
5046 delete_basic_block (else_bb);
5047
5048 num_true_changes++;
5049 num_updated_if_blocks++;
5050
5051 /* ??? We may now fallthru from one of THEN's successors into a join
5052 block. Rerun cleanup_cfg? Examine things manually? Wait? */
5053
5054 return TRUE;
5055 }
5056
5057 /* Used by the code above to perform the actual rtl transformations.
5058 Return TRUE if successful.
5059
5060 TEST_BB is the block containing the conditional branch. MERGE_BB
5061 is the block containing the code to manipulate. DEST_EDGE is an
5062 edge representing a jump to the join block; after the conversion,
5063 TEST_BB should be branching to its destination.
5064 REVERSEP is true if the sense of the branch should be reversed. */
5065
5066 static int
5067 dead_or_predicable (basic_block test_bb, basic_block merge_bb,
5068 basic_block other_bb, edge dest_edge, int reversep)
5069 {
5070 basic_block new_dest = dest_edge->dest;
5071 rtx_insn *head, *end, *jump;
5072 rtx_insn *earliest = NULL;
5073 rtx old_dest;
5074 bitmap merge_set = NULL;
5075 /* Number of pending changes. */
5076 int n_validated_changes = 0;
5077 rtx new_dest_label = NULL_RTX;
5078
5079 jump = BB_END (test_bb);
5080
5081 /* Find the extent of the real code in the merge block. */
5082 head = BB_HEAD (merge_bb);
5083 end = BB_END (merge_bb);
5084
5085 while (DEBUG_INSN_P (end) && end != head)
5086 end = PREV_INSN (end);
5087
5088 /* If merge_bb ends with a tablejump, predicating/moving insn's
5089 into test_bb and then deleting merge_bb will result in the jumptable
5090 that follows merge_bb being removed along with merge_bb and then we
5091 get an unresolved reference to the jumptable. */
5092 if (tablejump_p (end, NULL, NULL))
5093 return FALSE;
5094
5095 if (LABEL_P (head))
5096 head = NEXT_INSN (head);
5097 while (DEBUG_INSN_P (head) && head != end)
5098 head = NEXT_INSN (head);
5099 if (NOTE_P (head))
5100 {
5101 if (head == end)
5102 {
5103 head = end = NULL;
5104 goto no_body;
5105 }
5106 head = NEXT_INSN (head);
5107 while (DEBUG_INSN_P (head) && head != end)
5108 head = NEXT_INSN (head);
5109 }
5110
5111 if (JUMP_P (end))
5112 {
5113 if (!onlyjump_p (end))
5114 return FALSE;
5115 if (head == end)
5116 {
5117 head = end = NULL;
5118 goto no_body;
5119 }
5120 end = PREV_INSN (end);
5121 while (DEBUG_INSN_P (end) && end != head)
5122 end = PREV_INSN (end);
5123 }
5124
5125 /* Don't move frame-related insn across the conditional branch. This
5126 can lead to one of the paths of the branch having wrong unwind info. */
5127 if (epilogue_completed)
5128 {
5129 rtx_insn *insn = head;
5130 while (1)
5131 {
5132 if (INSN_P (insn) && RTX_FRAME_RELATED_P (insn))
5133 return FALSE;
5134 if (insn == end)
5135 break;
5136 insn = NEXT_INSN (insn);
5137 }
5138 }
5139
5140 /* Disable handling dead code by conditional execution if the machine needs
5141 to do anything funny with the tests, etc. */
5142 #ifndef IFCVT_MODIFY_TESTS
5143 if (targetm.have_conditional_execution ())
5144 {
5145 /* In the conditional execution case, we have things easy. We know
5146 the condition is reversible. We don't have to check life info
5147 because we're going to conditionally execute the code anyway.
5148 All that's left is making sure the insns involved can actually
5149 be predicated. */
5150
5151 rtx cond;
5152
5153 cond = cond_exec_get_condition (jump);
5154 if (! cond)
5155 return FALSE;
5156
5157 rtx note = find_reg_note (jump, REG_BR_PROB, NULL_RTX);
5158 int prob_val = (note ? XINT (note, 0) : -1);
5159
5160 if (reversep)
5161 {
5162 enum rtx_code rev = reversed_comparison_code (cond, jump);
5163 if (rev == UNKNOWN)
5164 return FALSE;
5165 cond = gen_rtx_fmt_ee (rev, GET_MODE (cond), XEXP (cond, 0),
5166 XEXP (cond, 1));
5167 if (prob_val >= 0)
5168 prob_val = REG_BR_PROB_BASE - prob_val;
5169 }
5170
5171 if (cond_exec_process_insns (NULL, head, end, cond, prob_val, 0)
5172 && verify_changes (0))
5173 n_validated_changes = num_validated_changes ();
5174 else
5175 cancel_changes (0);
5176
5177 earliest = jump;
5178 }
5179 #endif
5180
5181 /* If we allocated new pseudos (e.g. in the conditional move
5182 expander called from noce_emit_cmove), we must resize the
5183 array first. */
5184 if (max_regno < max_reg_num ())
5185 max_regno = max_reg_num ();
5186
5187 /* Try the NCE path if the CE path did not result in any changes. */
5188 if (n_validated_changes == 0)
5189 {
5190 rtx cond;
5191 rtx_insn *insn;
5192 regset live;
5193 bool success;
5194
5195 /* In the non-conditional execution case, we have to verify that there
5196 are no trapping operations, no calls, no references to memory, and
5197 that any registers modified are dead at the branch site. */
5198
5199 if (!any_condjump_p (jump))
5200 return FALSE;
5201
5202 /* Find the extent of the conditional. */
5203 cond = noce_get_condition (jump, &earliest, false);
5204 if (!cond)
5205 return FALSE;
5206
5207 live = BITMAP_ALLOC (&reg_obstack);
5208 simulate_backwards_to_point (merge_bb, live, end);
5209 success = can_move_insns_across (head, end, earliest, jump,
5210 merge_bb, live,
5211 df_get_live_in (other_bb), NULL);
5212 BITMAP_FREE (live);
5213 if (!success)
5214 return FALSE;
5215
5216 /* Collect the set of registers set in MERGE_BB. */
5217 merge_set = BITMAP_ALLOC (&reg_obstack);
5218
5219 FOR_BB_INSNS (merge_bb, insn)
5220 if (NONDEBUG_INSN_P (insn))
5221 df_simulate_find_defs (insn, merge_set);
5222
5223 /* If shrink-wrapping, disable this optimization when test_bb is
5224 the first basic block and merge_bb exits. The idea is to not
5225 move code setting up a return register as that may clobber a
5226 register used to pass function parameters, which then must be
5227 saved in caller-saved regs. A caller-saved reg requires the
5228 prologue, killing a shrink-wrap opportunity. */
5229 if ((SHRINK_WRAPPING_ENABLED && !epilogue_completed)
5230 && ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb == test_bb
5231 && single_succ_p (new_dest)
5232 && single_succ (new_dest) == EXIT_BLOCK_PTR_FOR_FN (cfun)
5233 && bitmap_intersect_p (df_get_live_in (new_dest), merge_set))
5234 {
5235 regset return_regs;
5236 unsigned int i;
5237
5238 return_regs = BITMAP_ALLOC (&reg_obstack);
5239
5240 /* Start off with the intersection of regs used to pass
5241 params and regs used to return values. */
5242 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
5243 if (FUNCTION_ARG_REGNO_P (i)
5244 && targetm.calls.function_value_regno_p (i))
5245 bitmap_set_bit (return_regs, INCOMING_REGNO (i));
5246
5247 bitmap_and_into (return_regs,
5248 df_get_live_out (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
5249 bitmap_and_into (return_regs,
5250 df_get_live_in (EXIT_BLOCK_PTR_FOR_FN (cfun)));
5251 if (!bitmap_empty_p (return_regs))
5252 {
5253 FOR_BB_INSNS_REVERSE (new_dest, insn)
5254 if (NONDEBUG_INSN_P (insn))
5255 {
5256 df_ref def;
5257
5258 /* If this insn sets any reg in return_regs, add all
5259 reg uses to the set of regs we're interested in. */
5260 FOR_EACH_INSN_DEF (def, insn)
5261 if (bitmap_bit_p (return_regs, DF_REF_REGNO (def)))
5262 {
5263 df_simulate_uses (insn, return_regs);
5264 break;
5265 }
5266 }
5267 if (bitmap_intersect_p (merge_set, return_regs))
5268 {
5269 BITMAP_FREE (return_regs);
5270 BITMAP_FREE (merge_set);
5271 return FALSE;
5272 }
5273 }
5274 BITMAP_FREE (return_regs);
5275 }
5276 }
5277
5278 no_body:
5279 /* We don't want to use normal invert_jump or redirect_jump because
5280 we don't want to delete_insn called. Also, we want to do our own
5281 change group management. */
5282
5283 old_dest = JUMP_LABEL (jump);
5284 if (other_bb != new_dest)
5285 {
5286 if (!any_condjump_p (jump))
5287 goto cancel;
5288
5289 if (JUMP_P (BB_END (dest_edge->src)))
5290 new_dest_label = JUMP_LABEL (BB_END (dest_edge->src));
5291 else if (new_dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
5292 new_dest_label = ret_rtx;
5293 else
5294 new_dest_label = block_label (new_dest);
5295
5296 rtx_jump_insn *jump_insn = as_a <rtx_jump_insn *> (jump);
5297 if (reversep
5298 ? ! invert_jump_1 (jump_insn, new_dest_label)
5299 : ! redirect_jump_1 (jump_insn, new_dest_label))
5300 goto cancel;
5301 }
5302
5303 if (verify_changes (n_validated_changes))
5304 confirm_change_group ();
5305 else
5306 goto cancel;
5307
5308 if (other_bb != new_dest)
5309 {
5310 redirect_jump_2 (as_a <rtx_jump_insn *> (jump), old_dest, new_dest_label,
5311 0, reversep);
5312
5313 redirect_edge_succ (BRANCH_EDGE (test_bb), new_dest);
5314 if (reversep)
5315 {
5316 std::swap (BRANCH_EDGE (test_bb)->count,
5317 FALLTHRU_EDGE (test_bb)->count);
5318 std::swap (BRANCH_EDGE (test_bb)->probability,
5319 FALLTHRU_EDGE (test_bb)->probability);
5320 update_br_prob_note (test_bb);
5321 }
5322 }
5323
5324 /* Move the insns out of MERGE_BB to before the branch. */
5325 if (head != NULL)
5326 {
5327 rtx_insn *insn;
5328
5329 if (end == BB_END (merge_bb))
5330 BB_END (merge_bb) = PREV_INSN (head);
5331
5332 /* PR 21767: when moving insns above a conditional branch, the REG_EQUAL
5333 notes being moved might become invalid. */
5334 insn = head;
5335 do
5336 {
5337 rtx note;
5338
5339 if (! INSN_P (insn))
5340 continue;
5341 note = find_reg_note (insn, REG_EQUAL, NULL_RTX);
5342 if (! note)
5343 continue;
5344 remove_note (insn, note);
5345 } while (insn != end && (insn = NEXT_INSN (insn)));
5346
5347 /* PR46315: when moving insns above a conditional branch, the REG_EQUAL
5348 notes referring to the registers being set might become invalid. */
5349 if (merge_set)
5350 {
5351 unsigned i;
5352 bitmap_iterator bi;
5353
5354 EXECUTE_IF_SET_IN_BITMAP (merge_set, 0, i, bi)
5355 remove_reg_equal_equiv_notes_for_regno (i);
5356
5357 BITMAP_FREE (merge_set);
5358 }
5359
5360 reorder_insns (head, end, PREV_INSN (earliest));
5361 }
5362
5363 /* Remove the jump and edge if we can. */
5364 if (other_bb == new_dest)
5365 {
5366 delete_insn (jump);
5367 remove_edge (BRANCH_EDGE (test_bb));
5368 /* ??? Can't merge blocks here, as then_bb is still in use.
5369 At minimum, the merge will get done just before bb-reorder. */
5370 }
5371
5372 return TRUE;
5373
5374 cancel:
5375 cancel_changes (0);
5376
5377 if (merge_set)
5378 BITMAP_FREE (merge_set);
5379
5380 return FALSE;
5381 }
5382 \f
5383 /* Main entry point for all if-conversion. AFTER_COMBINE is true if
5384 we are after combine pass. */
5385
5386 static void
5387 if_convert (bool after_combine)
5388 {
5389 basic_block bb;
5390 int pass;
5391
5392 if (optimize == 1)
5393 {
5394 df_live_add_problem ();
5395 df_live_set_all_dirty ();
5396 }
5397
5398 /* Record whether we are after combine pass. */
5399 ifcvt_after_combine = after_combine;
5400 have_cbranchcc4 = (direct_optab_handler (cbranch_optab, CCmode)
5401 != CODE_FOR_nothing);
5402 num_possible_if_blocks = 0;
5403 num_updated_if_blocks = 0;
5404 num_true_changes = 0;
5405
5406 loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
5407 mark_loop_exit_edges ();
5408 loop_optimizer_finalize ();
5409 free_dominance_info (CDI_DOMINATORS);
5410
5411 /* Compute postdominators. */
5412 calculate_dominance_info (CDI_POST_DOMINATORS);
5413
5414 df_set_flags (DF_LR_RUN_DCE);
5415
5416 /* Go through each of the basic blocks looking for things to convert. If we
5417 have conditional execution, we make multiple passes to allow us to handle
5418 IF-THEN{-ELSE} blocks within other IF-THEN{-ELSE} blocks. */
5419 pass = 0;
5420 do
5421 {
5422 df_analyze ();
5423 /* Only need to do dce on the first pass. */
5424 df_clear_flags (DF_LR_RUN_DCE);
5425 cond_exec_changed_p = FALSE;
5426 pass++;
5427
5428 #ifdef IFCVT_MULTIPLE_DUMPS
5429 if (dump_file && pass > 1)
5430 fprintf (dump_file, "\n\n========== Pass %d ==========\n", pass);
5431 #endif
5432
5433 FOR_EACH_BB_FN (bb, cfun)
5434 {
5435 basic_block new_bb;
5436 while (!df_get_bb_dirty (bb)
5437 && (new_bb = find_if_header (bb, pass)) != NULL)
5438 bb = new_bb;
5439 }
5440
5441 #ifdef IFCVT_MULTIPLE_DUMPS
5442 if (dump_file && cond_exec_changed_p)
5443 print_rtl_with_bb (dump_file, get_insns (), dump_flags);
5444 #endif
5445 }
5446 while (cond_exec_changed_p);
5447
5448 #ifdef IFCVT_MULTIPLE_DUMPS
5449 if (dump_file)
5450 fprintf (dump_file, "\n\n========== no more changes\n");
5451 #endif
5452
5453 free_dominance_info (CDI_POST_DOMINATORS);
5454
5455 if (dump_file)
5456 fflush (dump_file);
5457
5458 clear_aux_for_blocks ();
5459
5460 /* If we allocated new pseudos, we must resize the array for sched1. */
5461 if (max_regno < max_reg_num ())
5462 max_regno = max_reg_num ();
5463
5464 /* Write the final stats. */
5465 if (dump_file && num_possible_if_blocks > 0)
5466 {
5467 fprintf (dump_file,
5468 "\n%d possible IF blocks searched.\n",
5469 num_possible_if_blocks);
5470 fprintf (dump_file,
5471 "%d IF blocks converted.\n",
5472 num_updated_if_blocks);
5473 fprintf (dump_file,
5474 "%d true changes made.\n\n\n",
5475 num_true_changes);
5476 }
5477
5478 if (optimize == 1)
5479 df_remove_problem (df_live);
5480
5481 checking_verify_flow_info ();
5482 }
5483 \f
5484 /* If-conversion and CFG cleanup. */
5485 static unsigned int
5486 rest_of_handle_if_conversion (void)
5487 {
5488 if (flag_if_conversion)
5489 {
5490 if (dump_file)
5491 {
5492 dump_reg_info (dump_file);
5493 dump_flow_info (dump_file, dump_flags);
5494 }
5495 cleanup_cfg (CLEANUP_EXPENSIVE);
5496 if_convert (false);
5497 }
5498
5499 cleanup_cfg (0);
5500 return 0;
5501 }
5502
5503 namespace {
5504
5505 const pass_data pass_data_rtl_ifcvt =
5506 {
5507 RTL_PASS, /* type */
5508 "ce1", /* name */
5509 OPTGROUP_NONE, /* optinfo_flags */
5510 TV_IFCVT, /* tv_id */
5511 0, /* properties_required */
5512 0, /* properties_provided */
5513 0, /* properties_destroyed */
5514 0, /* todo_flags_start */
5515 TODO_df_finish, /* todo_flags_finish */
5516 };
5517
5518 class pass_rtl_ifcvt : public rtl_opt_pass
5519 {
5520 public:
5521 pass_rtl_ifcvt (gcc::context *ctxt)
5522 : rtl_opt_pass (pass_data_rtl_ifcvt, ctxt)
5523 {}
5524
5525 /* opt_pass methods: */
5526 virtual bool gate (function *)
5527 {
5528 return (optimize > 0) && dbg_cnt (if_conversion);
5529 }
5530
5531 virtual unsigned int execute (function *)
5532 {
5533 return rest_of_handle_if_conversion ();
5534 }
5535
5536 }; // class pass_rtl_ifcvt
5537
5538 } // anon namespace
5539
5540 rtl_opt_pass *
5541 make_pass_rtl_ifcvt (gcc::context *ctxt)
5542 {
5543 return new pass_rtl_ifcvt (ctxt);
5544 }
5545
5546
5547 /* Rerun if-conversion, as combine may have simplified things enough
5548 to now meet sequence length restrictions. */
5549
5550 namespace {
5551
5552 const pass_data pass_data_if_after_combine =
5553 {
5554 RTL_PASS, /* type */
5555 "ce2", /* name */
5556 OPTGROUP_NONE, /* optinfo_flags */
5557 TV_IFCVT, /* tv_id */
5558 0, /* properties_required */
5559 0, /* properties_provided */
5560 0, /* properties_destroyed */
5561 0, /* todo_flags_start */
5562 TODO_df_finish, /* todo_flags_finish */
5563 };
5564
5565 class pass_if_after_combine : public rtl_opt_pass
5566 {
5567 public:
5568 pass_if_after_combine (gcc::context *ctxt)
5569 : rtl_opt_pass (pass_data_if_after_combine, ctxt)
5570 {}
5571
5572 /* opt_pass methods: */
5573 virtual bool gate (function *)
5574 {
5575 return optimize > 0 && flag_if_conversion
5576 && dbg_cnt (if_after_combine);
5577 }
5578
5579 virtual unsigned int execute (function *)
5580 {
5581 if_convert (true);
5582 return 0;
5583 }
5584
5585 }; // class pass_if_after_combine
5586
5587 } // anon namespace
5588
5589 rtl_opt_pass *
5590 make_pass_if_after_combine (gcc::context *ctxt)
5591 {
5592 return new pass_if_after_combine (ctxt);
5593 }
5594
5595
5596 namespace {
5597
5598 const pass_data pass_data_if_after_reload =
5599 {
5600 RTL_PASS, /* type */
5601 "ce3", /* name */
5602 OPTGROUP_NONE, /* optinfo_flags */
5603 TV_IFCVT2, /* tv_id */
5604 0, /* properties_required */
5605 0, /* properties_provided */
5606 0, /* properties_destroyed */
5607 0, /* todo_flags_start */
5608 TODO_df_finish, /* todo_flags_finish */
5609 };
5610
5611 class pass_if_after_reload : public rtl_opt_pass
5612 {
5613 public:
5614 pass_if_after_reload (gcc::context *ctxt)
5615 : rtl_opt_pass (pass_data_if_after_reload, ctxt)
5616 {}
5617
5618 /* opt_pass methods: */
5619 virtual bool gate (function *)
5620 {
5621 return optimize > 0 && flag_if_conversion2
5622 && dbg_cnt (if_after_reload);
5623 }
5624
5625 virtual unsigned int execute (function *)
5626 {
5627 if_convert (true);
5628 return 0;
5629 }
5630
5631 }; // class pass_if_after_reload
5632
5633 } // anon namespace
5634
5635 rtl_opt_pass *
5636 make_pass_if_after_reload (gcc::context *ctxt)
5637 {
5638 return new pass_if_after_reload (ctxt);
5639 }