re PR rtl-optimization/26855 (ICE in add_deps_for_def with -fmodulo-sched -maltivec)
[gcc.git] / gcc / modulo-sched.c
1 /* Swing Modulo Scheduling implementation.
2 Copyright (C) 2004, 2005, 2006
3 Free Software Foundation, Inc.
4 Contributed by Ayal Zaks and Mustafa Hagog <zaks,mustafa@il.ibm.com>
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
21 02110-1301, USA. */
22
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "toplev.h"
29 #include "rtl.h"
30 #include "tm_p.h"
31 #include "hard-reg-set.h"
32 #include "regs.h"
33 #include "function.h"
34 #include "flags.h"
35 #include "insn-config.h"
36 #include "insn-attr.h"
37 #include "except.h"
38 #include "toplev.h"
39 #include "recog.h"
40 #include "sched-int.h"
41 #include "target.h"
42 #include "cfglayout.h"
43 #include "cfgloop.h"
44 #include "cfghooks.h"
45 #include "expr.h"
46 #include "params.h"
47 #include "gcov-io.h"
48 #include "df.h"
49 #include "ddg.h"
50 #include "timevar.h"
51 #include "tree-pass.h"
52
53 #ifdef INSN_SCHEDULING
54
55 /* This file contains the implementation of the Swing Modulo Scheduler,
56 described in the following references:
57 [1] J. Llosa, A. Gonzalez, E. Ayguade, M. Valero., and J. Eckhardt.
58 Lifetime--sensitive modulo scheduling in a production environment.
59 IEEE Trans. on Comps., 50(3), March 2001
60 [2] J. Llosa, A. Gonzalez, E. Ayguade, and M. Valero.
61 Swing Modulo Scheduling: A Lifetime Sensitive Approach.
62 PACT '96 , pages 80-87, October 1996 (Boston - Massachusetts - USA).
63
64 The basic structure is:
65 1. Build a data-dependence graph (DDG) for each loop.
66 2. Use the DDG to order the insns of a loop (not in topological order
67 necessarily, but rather) trying to place each insn after all its
68 predecessors _or_ after all its successors.
69 3. Compute MII: a lower bound on the number of cycles to schedule the loop.
70 4. Use the ordering to perform list-scheduling of the loop:
71 1. Set II = MII. We will try to schedule the loop within II cycles.
72 2. Try to schedule the insns one by one according to the ordering.
73 For each insn compute an interval of cycles by considering already-
74 scheduled preds and succs (and associated latencies); try to place
75 the insn in the cycles of this window checking for potential
76 resource conflicts (using the DFA interface).
77 Note: this is different from the cycle-scheduling of schedule_insns;
78 here the insns are not scheduled monotonically top-down (nor bottom-
79 up).
80 3. If failed in scheduling all insns - bump II++ and try again, unless
81 II reaches an upper bound MaxII, in which case report failure.
82 5. If we succeeded in scheduling the loop within II cycles, we now
83 generate prolog and epilog, decrease the counter of the loop, and
84 perform modulo variable expansion for live ranges that span more than
85 II cycles (i.e. use register copies to prevent a def from overwriting
86 itself before reaching the use).
87 */
88
89 \f
90 /* This page defines partial-schedule structures and functions for
91 modulo scheduling. */
92
93 typedef struct partial_schedule *partial_schedule_ptr;
94 typedef struct ps_insn *ps_insn_ptr;
95
96 /* The minimum (absolute) cycle that a node of ps was scheduled in. */
97 #define PS_MIN_CYCLE(ps) (((partial_schedule_ptr)(ps))->min_cycle)
98
99 /* The maximum (absolute) cycle that a node of ps was scheduled in. */
100 #define PS_MAX_CYCLE(ps) (((partial_schedule_ptr)(ps))->max_cycle)
101
102 /* Perform signed modulo, always returning a non-negative value. */
103 #define SMODULO(x,y) ((x) % (y) < 0 ? ((x) % (y) + (y)) : (x) % (y))
104
105 /* The number of different iterations the nodes in ps span, assuming
106 the stage boundaries are placed efficiently. */
107 #define PS_STAGE_COUNT(ps) ((PS_MAX_CYCLE (ps) - PS_MIN_CYCLE (ps) \
108 + 1 + (ps)->ii - 1) / (ps)->ii)
109
110 /* A single instruction in the partial schedule. */
111 struct ps_insn
112 {
113 /* The corresponding DDG_NODE. */
114 ddg_node_ptr node;
115
116 /* The (absolute) cycle in which the PS instruction is scheduled.
117 Same as SCHED_TIME (node). */
118 int cycle;
119
120 /* The next/prev PS_INSN in the same row. */
121 ps_insn_ptr next_in_row,
122 prev_in_row;
123
124 /* The number of nodes in the same row that come after this node. */
125 int row_rest_count;
126 };
127
128 /* Holds the partial schedule as an array of II rows. Each entry of the
129 array points to a linked list of PS_INSNs, which represents the
130 instructions that are scheduled for that row. */
131 struct partial_schedule
132 {
133 int ii; /* Number of rows in the partial schedule. */
134 int history; /* Threshold for conflict checking using DFA. */
135
136 /* rows[i] points to linked list of insns scheduled in row i (0<=i<ii). */
137 ps_insn_ptr *rows;
138
139 /* The earliest absolute cycle of an insn in the partial schedule. */
140 int min_cycle;
141
142 /* The latest absolute cycle of an insn in the partial schedule. */
143 int max_cycle;
144
145 ddg_ptr g; /* The DDG of the insns in the partial schedule. */
146 };
147
148 /* We use this to record all the register replacements we do in
149 the kernel so we can undo SMS if it is not profitable. */
150 struct undo_replace_buff_elem
151 {
152 rtx insn;
153 rtx orig_reg;
154 rtx new_reg;
155 struct undo_replace_buff_elem *next;
156 };
157
158
159
160 static partial_schedule_ptr create_partial_schedule (int ii, ddg_ptr, int history);
161 static void free_partial_schedule (partial_schedule_ptr);
162 static void reset_partial_schedule (partial_schedule_ptr, int new_ii);
163 void print_partial_schedule (partial_schedule_ptr, FILE *);
164 static int kernel_number_of_cycles (rtx first_insn, rtx last_insn);
165 static ps_insn_ptr ps_add_node_check_conflicts (partial_schedule_ptr,
166 ddg_node_ptr node, int cycle,
167 sbitmap must_precede,
168 sbitmap must_follow);
169 static void rotate_partial_schedule (partial_schedule_ptr, int);
170 void set_row_column_for_ps (partial_schedule_ptr);
171 static bool ps_unschedule_node (partial_schedule_ptr, ddg_node_ptr );
172
173 \f
174 /* This page defines constants and structures for the modulo scheduling
175 driver. */
176
177 /* As in haifa-sched.c: */
178 /* issue_rate is the number of insns that can be scheduled in the same
179 machine cycle. It can be defined in the config/mach/mach.h file,
180 otherwise we set it to 1. */
181
182 static int issue_rate;
183
184 static int sms_order_nodes (ddg_ptr, int, int * result);
185 static void set_node_sched_params (ddg_ptr);
186 static partial_schedule_ptr sms_schedule_by_order (ddg_ptr, int, int, int *);
187 static void permute_partial_schedule (partial_schedule_ptr ps, rtx last);
188 static void generate_prolog_epilog (partial_schedule_ptr ,struct loop * loop, rtx);
189 static void duplicate_insns_of_cycles (partial_schedule_ptr ps,
190 int from_stage, int to_stage,
191 int is_prolog);
192
193 #define SCHED_ASAP(x) (((node_sched_params_ptr)(x)->aux.info)->asap)
194 #define SCHED_TIME(x) (((node_sched_params_ptr)(x)->aux.info)->time)
195 #define SCHED_FIRST_REG_MOVE(x) \
196 (((node_sched_params_ptr)(x)->aux.info)->first_reg_move)
197 #define SCHED_NREG_MOVES(x) \
198 (((node_sched_params_ptr)(x)->aux.info)->nreg_moves)
199 #define SCHED_ROW(x) (((node_sched_params_ptr)(x)->aux.info)->row)
200 #define SCHED_STAGE(x) (((node_sched_params_ptr)(x)->aux.info)->stage)
201 #define SCHED_COLUMN(x) (((node_sched_params_ptr)(x)->aux.info)->column)
202
203 /* The scheduling parameters held for each node. */
204 typedef struct node_sched_params
205 {
206 int asap; /* A lower-bound on the absolute scheduling cycle. */
207 int time; /* The absolute scheduling cycle (time >= asap). */
208
209 /* The following field (first_reg_move) is a pointer to the first
210 register-move instruction added to handle the modulo-variable-expansion
211 of the register defined by this node. This register-move copies the
212 original register defined by the node. */
213 rtx first_reg_move;
214
215 /* The number of register-move instructions added, immediately preceding
216 first_reg_move. */
217 int nreg_moves;
218
219 int row; /* Holds time % ii. */
220 int stage; /* Holds time / ii. */
221
222 /* The column of a node inside the ps. If nodes u, v are on the same row,
223 u will precede v if column (u) < column (v). */
224 int column;
225 } *node_sched_params_ptr;
226
227 \f
228 /* The following three functions are copied from the current scheduler
229 code in order to use sched_analyze() for computing the dependencies.
230 They are used when initializing the sched_info structure. */
231 static const char *
232 sms_print_insn (rtx insn, int aligned ATTRIBUTE_UNUSED)
233 {
234 static char tmp[80];
235
236 sprintf (tmp, "i%4d", INSN_UID (insn));
237 return tmp;
238 }
239
240 static void
241 compute_jump_reg_dependencies (rtx insn ATTRIBUTE_UNUSED,
242 regset cond_exec ATTRIBUTE_UNUSED,
243 regset used ATTRIBUTE_UNUSED,
244 regset set ATTRIBUTE_UNUSED)
245 {
246 }
247
248 static struct sched_info sms_sched_info =
249 {
250 NULL,
251 NULL,
252 NULL,
253 NULL,
254 NULL,
255 sms_print_insn,
256 NULL,
257 compute_jump_reg_dependencies,
258 NULL, NULL,
259 NULL, NULL,
260 0, 0, 0,
261
262 NULL, NULL, NULL, NULL, NULL,
263 #ifdef ENABLE_CHECKING
264 NULL,
265 #endif
266 0
267 };
268
269
270 /* Return the register decremented and tested in INSN,
271 or zero if it is not a decrement-and-branch insn. */
272
273 static rtx
274 doloop_register_get (rtx insn ATTRIBUTE_UNUSED)
275 {
276 #ifdef HAVE_doloop_end
277 rtx pattern, reg, condition;
278
279 if (! JUMP_P (insn))
280 return NULL_RTX;
281
282 pattern = PATTERN (insn);
283 condition = doloop_condition_get (pattern);
284 if (! condition)
285 return NULL_RTX;
286
287 if (REG_P (XEXP (condition, 0)))
288 reg = XEXP (condition, 0);
289 else if (GET_CODE (XEXP (condition, 0)) == PLUS
290 && REG_P (XEXP (XEXP (condition, 0), 0)))
291 reg = XEXP (XEXP (condition, 0), 0);
292 else
293 gcc_unreachable ();
294
295 return reg;
296 #else
297 return NULL_RTX;
298 #endif
299 }
300
301 /* Check if COUNT_REG is set to a constant in the PRE_HEADER block, so
302 that the number of iterations is a compile-time constant. If so,
303 return the rtx that sets COUNT_REG to a constant, and set COUNT to
304 this constant. Otherwise return 0. */
305 static rtx
306 const_iteration_count (rtx count_reg, basic_block pre_header,
307 HOST_WIDEST_INT * count)
308 {
309 rtx insn;
310 rtx head, tail;
311
312 if (! pre_header)
313 return NULL_RTX;
314
315 get_ebb_head_tail (pre_header, pre_header, &head, &tail);
316
317 for (insn = tail; insn != PREV_INSN (head); insn = PREV_INSN (insn))
318 if (INSN_P (insn) && single_set (insn) &&
319 rtx_equal_p (count_reg, SET_DEST (single_set (insn))))
320 {
321 rtx pat = single_set (insn);
322
323 if (GET_CODE (SET_SRC (pat)) == CONST_INT)
324 {
325 *count = INTVAL (SET_SRC (pat));
326 return insn;
327 }
328
329 return NULL_RTX;
330 }
331
332 return NULL_RTX;
333 }
334
335 /* A very simple resource-based lower bound on the initiation interval.
336 ??? Improve the accuracy of this bound by considering the
337 utilization of various units. */
338 static int
339 res_MII (ddg_ptr g)
340 {
341 return (g->num_nodes / issue_rate);
342 }
343
344
345 /* Points to the array that contains the sched data for each node. */
346 static node_sched_params_ptr node_sched_params;
347
348 /* Allocate sched_params for each node and initialize it. Assumes that
349 the aux field of each node contain the asap bound (computed earlier),
350 and copies it into the sched_params field. */
351 static void
352 set_node_sched_params (ddg_ptr g)
353 {
354 int i;
355
356 /* Allocate for each node in the DDG a place to hold the "sched_data". */
357 /* Initialize ASAP/ALAP/HIGHT to zero. */
358 node_sched_params = (node_sched_params_ptr)
359 xcalloc (g->num_nodes,
360 sizeof (struct node_sched_params));
361
362 /* Set the pointer of the general data of the node to point to the
363 appropriate sched_params structure. */
364 for (i = 0; i < g->num_nodes; i++)
365 {
366 /* Watch out for aliasing problems? */
367 node_sched_params[i].asap = g->nodes[i].aux.count;
368 g->nodes[i].aux.info = &node_sched_params[i];
369 }
370 }
371
372 static void
373 print_node_sched_params (FILE * file, int num_nodes)
374 {
375 int i;
376
377 if (! file)
378 return;
379 for (i = 0; i < num_nodes; i++)
380 {
381 node_sched_params_ptr nsp = &node_sched_params[i];
382 rtx reg_move = nsp->first_reg_move;
383 int j;
384
385 fprintf (file, "Node %d:\n", i);
386 fprintf (file, " asap = %d:\n", nsp->asap);
387 fprintf (file, " time = %d:\n", nsp->time);
388 fprintf (file, " nreg_moves = %d:\n", nsp->nreg_moves);
389 for (j = 0; j < nsp->nreg_moves; j++)
390 {
391 fprintf (file, " reg_move = ");
392 print_rtl_single (file, reg_move);
393 reg_move = PREV_INSN (reg_move);
394 }
395 }
396 }
397
398 /* Calculate an upper bound for II. SMS should not schedule the loop if it
399 requires more cycles than this bound. Currently set to the sum of the
400 longest latency edge for each node. Reset based on experiments. */
401 static int
402 calculate_maxii (ddg_ptr g)
403 {
404 int i;
405 int maxii = 0;
406
407 for (i = 0; i < g->num_nodes; i++)
408 {
409 ddg_node_ptr u = &g->nodes[i];
410 ddg_edge_ptr e;
411 int max_edge_latency = 0;
412
413 for (e = u->out; e; e = e->next_out)
414 max_edge_latency = MAX (max_edge_latency, e->latency);
415
416 maxii += max_edge_latency;
417 }
418 return maxii;
419 }
420
421 /*
422 Breaking intra-loop register anti-dependences:
423 Each intra-loop register anti-dependence implies a cross-iteration true
424 dependence of distance 1. Therefore, we can remove such false dependencies
425 and figure out if the partial schedule broke them by checking if (for a
426 true-dependence of distance 1): SCHED_TIME (def) < SCHED_TIME (use) and
427 if so generate a register move. The number of such moves is equal to:
428 SCHED_TIME (use) - SCHED_TIME (def) { 0 broken
429 nreg_moves = ----------------------------------- + 1 - { dependence.
430 ii { 1 if not.
431 */
432 static struct undo_replace_buff_elem *
433 generate_reg_moves (partial_schedule_ptr ps)
434 {
435 ddg_ptr g = ps->g;
436 int ii = ps->ii;
437 int i;
438 struct undo_replace_buff_elem *reg_move_replaces = NULL;
439
440 for (i = 0; i < g->num_nodes; i++)
441 {
442 ddg_node_ptr u = &g->nodes[i];
443 ddg_edge_ptr e;
444 int nreg_moves = 0, i_reg_move;
445 sbitmap *uses_of_defs;
446 rtx last_reg_move;
447 rtx prev_reg, old_reg;
448
449 /* Compute the number of reg_moves needed for u, by looking at life
450 ranges started at u (excluding self-loops). */
451 for (e = u->out; e; e = e->next_out)
452 if (e->type == TRUE_DEP && e->dest != e->src)
453 {
454 int nreg_moves4e = (SCHED_TIME (e->dest) - SCHED_TIME (e->src)) / ii;
455
456 if (e->distance == 1)
457 nreg_moves4e = (SCHED_TIME (e->dest) - SCHED_TIME (e->src) + ii) / ii;
458
459 /* If dest precedes src in the schedule of the kernel, then dest
460 will read before src writes and we can save one reg_copy. */
461 if (SCHED_ROW (e->dest) == SCHED_ROW (e->src)
462 && SCHED_COLUMN (e->dest) < SCHED_COLUMN (e->src))
463 nreg_moves4e--;
464
465 nreg_moves = MAX (nreg_moves, nreg_moves4e);
466 }
467
468 if (nreg_moves == 0)
469 continue;
470
471 /* Every use of the register defined by node may require a different
472 copy of this register, depending on the time the use is scheduled.
473 Set a bitmap vector, telling which nodes use each copy of this
474 register. */
475 uses_of_defs = sbitmap_vector_alloc (nreg_moves, g->num_nodes);
476 sbitmap_vector_zero (uses_of_defs, nreg_moves);
477 for (e = u->out; e; e = e->next_out)
478 if (e->type == TRUE_DEP && e->dest != e->src)
479 {
480 int dest_copy = (SCHED_TIME (e->dest) - SCHED_TIME (e->src)) / ii;
481
482 if (e->distance == 1)
483 dest_copy = (SCHED_TIME (e->dest) - SCHED_TIME (e->src) + ii) / ii;
484
485 if (SCHED_ROW (e->dest) == SCHED_ROW (e->src)
486 && SCHED_COLUMN (e->dest) < SCHED_COLUMN (e->src))
487 dest_copy--;
488
489 if (dest_copy)
490 SET_BIT (uses_of_defs[dest_copy - 1], e->dest->cuid);
491 }
492
493 /* Now generate the reg_moves, attaching relevant uses to them. */
494 SCHED_NREG_MOVES (u) = nreg_moves;
495 old_reg = prev_reg = copy_rtx (SET_DEST (single_set (u->insn)));
496 last_reg_move = u->insn;
497
498 for (i_reg_move = 0; i_reg_move < nreg_moves; i_reg_move++)
499 {
500 unsigned int i_use = 0;
501 rtx new_reg = gen_reg_rtx (GET_MODE (prev_reg));
502 rtx reg_move = gen_move_insn (new_reg, prev_reg);
503 sbitmap_iterator sbi;
504
505 add_insn_before (reg_move, last_reg_move);
506 last_reg_move = reg_move;
507
508 if (!SCHED_FIRST_REG_MOVE (u))
509 SCHED_FIRST_REG_MOVE (u) = reg_move;
510
511 EXECUTE_IF_SET_IN_SBITMAP (uses_of_defs[i_reg_move], 0, i_use, sbi)
512 {
513 struct undo_replace_buff_elem *rep;
514
515 rep = (struct undo_replace_buff_elem *)
516 xcalloc (1, sizeof (struct undo_replace_buff_elem));
517 rep->insn = g->nodes[i_use].insn;
518 rep->orig_reg = old_reg;
519 rep->new_reg = new_reg;
520
521 if (! reg_move_replaces)
522 reg_move_replaces = rep;
523 else
524 {
525 rep->next = reg_move_replaces;
526 reg_move_replaces = rep;
527 }
528
529 replace_rtx (g->nodes[i_use].insn, old_reg, new_reg);
530 }
531
532 prev_reg = new_reg;
533 }
534 sbitmap_vector_free (uses_of_defs);
535 }
536 return reg_move_replaces;
537 }
538
539 /* We call this when we want to undo the SMS schedule for a given loop.
540 One of the things that we do is to delete the register moves generated
541 for the sake of SMS; this function deletes the register move instructions
542 recorded in the undo buffer. */
543 static void
544 undo_generate_reg_moves (partial_schedule_ptr ps,
545 struct undo_replace_buff_elem *reg_move_replaces)
546 {
547 int i,j;
548
549 for (i = 0; i < ps->g->num_nodes; i++)
550 {
551 ddg_node_ptr u = &ps->g->nodes[i];
552 rtx prev;
553 rtx crr = SCHED_FIRST_REG_MOVE (u);
554
555 for (j = 0; j < SCHED_NREG_MOVES (u); j++)
556 {
557 prev = PREV_INSN (crr);
558 delete_insn (crr);
559 crr = prev;
560 }
561 SCHED_FIRST_REG_MOVE (u) = NULL_RTX;
562 }
563
564 while (reg_move_replaces)
565 {
566 struct undo_replace_buff_elem *rep = reg_move_replaces;
567
568 reg_move_replaces = reg_move_replaces->next;
569 replace_rtx (rep->insn, rep->new_reg, rep->orig_reg);
570 }
571 }
572
573 /* Free memory allocated for the undo buffer. */
574 static void
575 free_undo_replace_buff (struct undo_replace_buff_elem *reg_move_replaces)
576 {
577
578 while (reg_move_replaces)
579 {
580 struct undo_replace_buff_elem *rep = reg_move_replaces;
581
582 reg_move_replaces = reg_move_replaces->next;
583 free (rep);
584 }
585 }
586
587 /* Bump the SCHED_TIMEs of all nodes to start from zero. Set the values
588 of SCHED_ROW and SCHED_STAGE. */
589 static void
590 normalize_sched_times (partial_schedule_ptr ps)
591 {
592 int i;
593 ddg_ptr g = ps->g;
594 int amount = PS_MIN_CYCLE (ps);
595 int ii = ps->ii;
596
597 /* Don't include the closing branch assuming that it is the last node. */
598 for (i = 0; i < g->num_nodes - 1; i++)
599 {
600 ddg_node_ptr u = &g->nodes[i];
601 int normalized_time = SCHED_TIME (u) - amount;
602
603 gcc_assert (normalized_time >= 0);
604
605 SCHED_TIME (u) = normalized_time;
606 SCHED_ROW (u) = normalized_time % ii;
607 SCHED_STAGE (u) = normalized_time / ii;
608 }
609 }
610
611 /* Set SCHED_COLUMN of each node according to its position in PS. */
612 static void
613 set_columns_for_ps (partial_schedule_ptr ps)
614 {
615 int row;
616
617 for (row = 0; row < ps->ii; row++)
618 {
619 ps_insn_ptr cur_insn = ps->rows[row];
620 int column = 0;
621
622 for (; cur_insn; cur_insn = cur_insn->next_in_row)
623 SCHED_COLUMN (cur_insn->node) = column++;
624 }
625 }
626
627 /* Permute the insns according to their order in PS, from row 0 to
628 row ii-1, and position them right before LAST. This schedules
629 the insns of the loop kernel. */
630 static void
631 permute_partial_schedule (partial_schedule_ptr ps, rtx last)
632 {
633 int ii = ps->ii;
634 int row;
635 ps_insn_ptr ps_ij;
636
637 for (row = 0; row < ii ; row++)
638 for (ps_ij = ps->rows[row]; ps_ij; ps_ij = ps_ij->next_in_row)
639 if (PREV_INSN (last) != ps_ij->node->insn)
640 reorder_insns_nobb (ps_ij->node->first_note, ps_ij->node->insn,
641 PREV_INSN (last));
642 }
643
644 /* As part of undoing SMS we return to the original ordering of the
645 instructions inside the loop kernel. Given the partial schedule PS, this
646 function returns the ordering of the instruction according to their CUID
647 in the DDG (PS->G), which is the original order of the instruction before
648 performing SMS. */
649 static void
650 undo_permute_partial_schedule (partial_schedule_ptr ps, rtx last)
651 {
652 int i;
653
654 for (i = 0 ; i < ps->g->num_nodes; i++)
655 if (last == ps->g->nodes[i].insn
656 || last == ps->g->nodes[i].first_note)
657 break;
658 else if (PREV_INSN (last) != ps->g->nodes[i].insn)
659 reorder_insns_nobb (ps->g->nodes[i].first_note, ps->g->nodes[i].insn,
660 PREV_INSN (last));
661 }
662
663 /* Used to generate the prologue & epilogue. Duplicate the subset of
664 nodes whose stages are between FROM_STAGE and TO_STAGE (inclusive
665 of both), together with a prefix/suffix of their reg_moves. */
666 static void
667 duplicate_insns_of_cycles (partial_schedule_ptr ps, int from_stage,
668 int to_stage, int for_prolog)
669 {
670 int row;
671 ps_insn_ptr ps_ij;
672
673 for (row = 0; row < ps->ii; row++)
674 for (ps_ij = ps->rows[row]; ps_ij; ps_ij = ps_ij->next_in_row)
675 {
676 ddg_node_ptr u_node = ps_ij->node;
677 int j, i_reg_moves;
678 rtx reg_move = NULL_RTX;
679
680 if (for_prolog)
681 {
682 /* SCHED_STAGE (u_node) >= from_stage == 0. Generate increasing
683 number of reg_moves starting with the second occurrence of
684 u_node, which is generated if its SCHED_STAGE <= to_stage. */
685 i_reg_moves = to_stage - SCHED_STAGE (u_node) + 1;
686 i_reg_moves = MAX (i_reg_moves, 0);
687 i_reg_moves = MIN (i_reg_moves, SCHED_NREG_MOVES (u_node));
688
689 /* The reg_moves start from the *first* reg_move backwards. */
690 if (i_reg_moves)
691 {
692 reg_move = SCHED_FIRST_REG_MOVE (u_node);
693 for (j = 1; j < i_reg_moves; j++)
694 reg_move = PREV_INSN (reg_move);
695 }
696 }
697 else /* It's for the epilog. */
698 {
699 /* SCHED_STAGE (u_node) <= to_stage. Generate all reg_moves,
700 starting to decrease one stage after u_node no longer occurs;
701 that is, generate all reg_moves until
702 SCHED_STAGE (u_node) == from_stage - 1. */
703 i_reg_moves = SCHED_NREG_MOVES (u_node)
704 - (from_stage - SCHED_STAGE (u_node) - 1);
705 i_reg_moves = MAX (i_reg_moves, 0);
706 i_reg_moves = MIN (i_reg_moves, SCHED_NREG_MOVES (u_node));
707
708 /* The reg_moves start from the *last* reg_move forwards. */
709 if (i_reg_moves)
710 {
711 reg_move = SCHED_FIRST_REG_MOVE (u_node);
712 for (j = 1; j < SCHED_NREG_MOVES (u_node); j++)
713 reg_move = PREV_INSN (reg_move);
714 }
715 }
716
717 for (j = 0; j < i_reg_moves; j++, reg_move = NEXT_INSN (reg_move))
718 emit_insn (copy_rtx (PATTERN (reg_move)));
719 if (SCHED_STAGE (u_node) >= from_stage
720 && SCHED_STAGE (u_node) <= to_stage)
721 duplicate_insn_chain (u_node->first_note, u_node->insn);
722 }
723 }
724
725
726 /* Generate the instructions (including reg_moves) for prolog & epilog. */
727 static void
728 generate_prolog_epilog (partial_schedule_ptr ps, struct loop * loop, rtx count_reg)
729 {
730 int i;
731 int last_stage = PS_STAGE_COUNT (ps) - 1;
732 edge e;
733
734 /* Generate the prolog, inserting its insns on the loop-entry edge. */
735 start_sequence ();
736
737 if (count_reg)
738 /* Generate a subtract instruction at the beginning of the prolog to
739 adjust the loop count by STAGE_COUNT. */
740 emit_insn (gen_sub2_insn (count_reg, GEN_INT (last_stage)));
741
742 for (i = 0; i < last_stage; i++)
743 duplicate_insns_of_cycles (ps, 0, i, 1);
744
745 /* Put the prolog , on the one and only entry edge. */
746 e = loop_preheader_edge (loop);
747 loop_split_edge_with(e , get_insns());
748
749 end_sequence ();
750
751 /* Generate the epilog, inserting its insns on the loop-exit edge. */
752 start_sequence ();
753
754 for (i = 0; i < last_stage; i++)
755 duplicate_insns_of_cycles (ps, i + 1, last_stage, 0);
756
757 /* Put the epilogue on the one and only one exit edge. */
758 gcc_assert (loop->single_exit);
759 e = loop->single_exit;
760 loop_split_edge_with(e , get_insns());
761 end_sequence ();
762 }
763
764 /* Return the line note insn preceding INSN, for debugging. Taken from
765 emit-rtl.c. */
766 static rtx
767 find_line_note (rtx insn)
768 {
769 for (; insn; insn = PREV_INSN (insn))
770 if (NOTE_P (insn)
771 && NOTE_LINE_NUMBER (insn) >= 0)
772 break;
773
774 return insn;
775 }
776
777 /* Return true if all the BBs of the loop are empty except the
778 loop header. */
779 static bool
780 loop_single_full_bb_p (struct loop *loop)
781 {
782 unsigned i;
783 basic_block *bbs = get_loop_body (loop);
784
785 for (i = 0; i < loop->num_nodes ; i++)
786 {
787 rtx head, tail;
788 bool empty_bb = true;
789
790 if (bbs[i] == loop->header)
791 continue;
792
793 /* Make sure that basic blocks other than the header
794 have only notes labels or jumps. */
795 get_ebb_head_tail (bbs[i], bbs[i], &head, &tail);
796 for (; head != NEXT_INSN (tail); head = NEXT_INSN (head))
797 {
798 if (NOTE_P (head) || LABEL_P (head)
799 || (INSN_P (head) && JUMP_P (head)))
800 continue;
801 empty_bb = false;
802 break;
803 }
804
805 if (! empty_bb)
806 {
807 free (bbs);
808 return false;
809 }
810 }
811 free (bbs);
812 return true;
813 }
814
815 /* A simple loop from SMS point of view; it is a loop that is composed of
816 either a single basic block or two BBs - a header and a latch. */
817 #define SIMPLE_SMS_LOOP_P(loop) ((loop->num_nodes < 3 ) \
818 && (EDGE_COUNT (loop->latch->preds) == 1) \
819 && (EDGE_COUNT (loop->latch->succs) == 1))
820
821 /* Return true if the loop is in its canonical form and false if not.
822 i.e. SIMPLE_SMS_LOOP_P and have one preheader block, and single exit. */
823 static bool
824 loop_canon_p (struct loop *loop)
825 {
826
827 if (loop->inner || ! loop->outer)
828 return false;
829
830 if (!loop->single_exit)
831 {
832 if (dump_file)
833 {
834 rtx line_note = find_line_note (BB_END (loop->header));
835
836 fprintf (dump_file, "SMS loop many exits ");
837 if (line_note)
838 {
839 expanded_location xloc;
840 NOTE_EXPANDED_LOCATION (xloc, line_note);
841 fprintf (dump_file, " %s %d (file, line)\n",
842 xloc.file, xloc.line);
843 }
844 }
845 return false;
846 }
847
848 if (! SIMPLE_SMS_LOOP_P (loop) && ! loop_single_full_bb_p (loop))
849 {
850 if (dump_file)
851 {
852 rtx line_note = find_line_note (BB_END (loop->header));
853
854 fprintf (dump_file, "SMS loop many BBs. ");
855 if (line_note)
856 {
857 expanded_location xloc;
858 NOTE_EXPANDED_LOCATION (xloc, line_note);
859 fprintf (dump_file, " %s %d (file, line)\n",
860 xloc.file, xloc.line);
861 }
862 }
863 return false;
864 }
865
866 return true;
867 }
868
869 /* If there are more than one entry for the loop,
870 make it one by splitting the first entry edge and
871 redirecting the others to the new BB. */
872 static void
873 canon_loop (struct loop *loop)
874 {
875 edge e;
876 edge_iterator i;
877
878 /* Avoid annoying special cases of edges going to exit
879 block. */
880 FOR_EACH_EDGE (e, i, EXIT_BLOCK_PTR->preds)
881 if ((e->flags & EDGE_FALLTHRU) && (EDGE_COUNT (e->src->succs) > 1))
882 loop_split_edge_with (e, NULL_RTX);
883
884 if (loop->latch == loop->header
885 || EDGE_COUNT (loop->latch->succs) > 1)
886 {
887 FOR_EACH_EDGE (e, i, loop->header->preds)
888 if (e->src == loop->latch)
889 break;
890 loop_split_edge_with (e, NULL_RTX);
891 }
892 }
893
894 /* Main entry point, perform SMS scheduling on the loops of the function
895 that consist of single basic blocks. */
896 static void
897 sms_schedule (void)
898 {
899 static int passes = 0;
900 rtx insn;
901 ddg_ptr *g_arr, g;
902 int * node_order;
903 int maxii;
904 unsigned i,num_loops;
905 partial_schedule_ptr ps;
906 struct df *df;
907 struct loops *loops;
908 basic_block bb = NULL;
909 /* vars to the versioning only if needed*/
910 struct loop * nloop;
911 basic_block condition_bb = NULL;
912 edge latch_edge;
913 gcov_type trip_count = 0;
914
915 loops = loop_optimizer_init (LOOPS_HAVE_PREHEADERS
916 | LOOPS_HAVE_MARKED_SINGLE_EXITS);
917 if (!loops)
918 return; /* There is no loops to schedule. */
919
920 /* Initialize issue_rate. */
921 if (targetm.sched.issue_rate)
922 {
923 int temp = reload_completed;
924
925 reload_completed = 1;
926 issue_rate = targetm.sched.issue_rate ();
927 reload_completed = temp;
928 }
929 else
930 issue_rate = 1;
931
932 /* Initialize the scheduler. */
933 current_sched_info = &sms_sched_info;
934 sched_init ();
935
936 /* Init Data Flow analysis, to be used in interloop dep calculation. */
937 df = df_init (DF_HARD_REGS | DF_EQUIV_NOTES | DF_SUBREGS);
938 df_rd_add_problem (df, 0);
939 df_ru_add_problem (df, 0);
940 df_chain_add_problem (df, DF_DU_CHAIN | DF_UD_CHAIN);
941 df_analyze (df);
942
943 /* Allocate memory to hold the DDG array one entry for each loop.
944 We use loop->num as index into this array. */
945 g_arr = XCNEWVEC (ddg_ptr, loops->num);
946
947
948 /* Build DDGs for all the relevant loops and hold them in G_ARR
949 indexed by the loop index. */
950 for (i = 0; i < loops->num; i++)
951 {
952 rtx head, tail;
953 rtx count_reg;
954 struct loop *loop = loops->parray[i];
955
956 /* For debugging. */
957 if ((passes++ > MAX_SMS_LOOP_NUMBER) && (MAX_SMS_LOOP_NUMBER != -1))
958 {
959 if (dump_file)
960 fprintf (dump_file, "SMS reached MAX_PASSES... \n");
961
962 break;
963 }
964
965 if (! loop_canon_p (loop))
966 continue;
967
968 if (! loop_single_full_bb_p (loop))
969 continue;
970
971 bb = loop->header;
972
973 get_ebb_head_tail (bb, bb, &head, &tail);
974 latch_edge = loop_latch_edge (loop);
975 gcc_assert (loop->single_exit);
976 if (loop->single_exit->count)
977 trip_count = latch_edge->count / loop->single_exit->count;
978
979 /* Perfrom SMS only on loops that their average count is above threshold. */
980
981 if ( latch_edge->count
982 && (latch_edge->count < loop->single_exit->count * SMS_LOOP_AVERAGE_COUNT_THRESHOLD))
983 {
984 if (dump_file)
985 {
986 rtx line_note = find_line_note (tail);
987
988 if (line_note)
989 {
990 expanded_location xloc;
991 NOTE_EXPANDED_LOCATION (xloc, line_note);
992 fprintf (dump_file, "SMS bb %s %d (file, line)\n",
993 xloc.file, xloc.line);
994 }
995 fprintf (dump_file, "SMS single-bb-loop\n");
996 if (profile_info && flag_branch_probabilities)
997 {
998 fprintf (dump_file, "SMS loop-count ");
999 fprintf (dump_file, HOST_WIDEST_INT_PRINT_DEC,
1000 (HOST_WIDEST_INT) bb->count);
1001 fprintf (dump_file, "\n");
1002 fprintf (dump_file, "SMS trip-count ");
1003 fprintf (dump_file, HOST_WIDEST_INT_PRINT_DEC,
1004 (HOST_WIDEST_INT) trip_count);
1005 fprintf (dump_file, "\n");
1006 fprintf (dump_file, "SMS profile-sum-max ");
1007 fprintf (dump_file, HOST_WIDEST_INT_PRINT_DEC,
1008 (HOST_WIDEST_INT) profile_info->sum_max);
1009 fprintf (dump_file, "\n");
1010 }
1011 }
1012 continue;
1013 }
1014
1015 /* Make sure this is a doloop. */
1016 if ( !(count_reg = doloop_register_get (tail)))
1017 continue;
1018
1019 /* Don't handle BBs with calls or barriers, or !single_set insns. */
1020 for (insn = head; insn != NEXT_INSN (tail); insn = NEXT_INSN (insn))
1021 if (CALL_P (insn)
1022 || BARRIER_P (insn)
1023 || (INSN_P (insn) && !JUMP_P (insn)
1024 && !single_set (insn) && GET_CODE (PATTERN (insn)) != USE))
1025 break;
1026
1027 if (insn != NEXT_INSN (tail))
1028 {
1029 if (dump_file)
1030 {
1031 if (CALL_P (insn))
1032 fprintf (dump_file, "SMS loop-with-call\n");
1033 else if (BARRIER_P (insn))
1034 fprintf (dump_file, "SMS loop-with-barrier\n");
1035 else
1036 fprintf (dump_file, "SMS loop-with-not-single-set\n");
1037 print_rtl_single (dump_file, insn);
1038 }
1039
1040 continue;
1041 }
1042
1043 if (! (g = create_ddg (bb, df, 0)))
1044 {
1045 if (dump_file)
1046 fprintf (dump_file, "SMS doloop\n");
1047 continue;
1048 }
1049
1050 g_arr[i] = g;
1051 }
1052
1053 /* Release Data Flow analysis data structures. */
1054 df_finish (df);
1055 df = NULL;
1056
1057 /* We don't want to perform SMS on new loops - created by versioning. */
1058 num_loops = loops->num;
1059 /* Go over the built DDGs and perfrom SMS for each one of them. */
1060 for (i = 0; i < num_loops; i++)
1061 {
1062 rtx head, tail;
1063 rtx count_reg, count_init;
1064 int mii, rec_mii;
1065 unsigned stage_count = 0;
1066 HOST_WIDEST_INT loop_count = 0;
1067 struct loop *loop = loops->parray[i];
1068
1069 if (! (g = g_arr[i]))
1070 continue;
1071
1072 if (dump_file)
1073 print_ddg (dump_file, g);
1074
1075 get_ebb_head_tail (loop->header, loop->header, &head, &tail);
1076
1077 latch_edge = loop_latch_edge (loop);
1078 gcc_assert (loop->single_exit);
1079 if (loop->single_exit->count)
1080 trip_count = latch_edge->count / loop->single_exit->count;
1081
1082 if (dump_file)
1083 {
1084 rtx line_note = find_line_note (tail);
1085
1086 if (line_note)
1087 {
1088 expanded_location xloc;
1089 NOTE_EXPANDED_LOCATION (xloc, line_note);
1090 fprintf (dump_file, "SMS bb %s %d (file, line)\n",
1091 xloc.file, xloc.line);
1092 }
1093 fprintf (dump_file, "SMS single-bb-loop\n");
1094 if (profile_info && flag_branch_probabilities)
1095 {
1096 fprintf (dump_file, "SMS loop-count ");
1097 fprintf (dump_file, HOST_WIDEST_INT_PRINT_DEC,
1098 (HOST_WIDEST_INT) bb->count);
1099 fprintf (dump_file, "\n");
1100 fprintf (dump_file, "SMS profile-sum-max ");
1101 fprintf (dump_file, HOST_WIDEST_INT_PRINT_DEC,
1102 (HOST_WIDEST_INT) profile_info->sum_max);
1103 fprintf (dump_file, "\n");
1104 }
1105 fprintf (dump_file, "SMS doloop\n");
1106 fprintf (dump_file, "SMS built-ddg %d\n", g->num_nodes);
1107 fprintf (dump_file, "SMS num-loads %d\n", g->num_loads);
1108 fprintf (dump_file, "SMS num-stores %d\n", g->num_stores);
1109 }
1110
1111
1112 /* In case of th loop have doloop register it gets special
1113 handling. */
1114 count_init = NULL_RTX;
1115 if ((count_reg = doloop_register_get (tail)))
1116 {
1117 basic_block pre_header;
1118
1119 pre_header = loop_preheader_edge (loop)->src;
1120 count_init = const_iteration_count (count_reg, pre_header,
1121 &loop_count);
1122 }
1123 gcc_assert (count_reg);
1124
1125 if (dump_file && count_init)
1126 {
1127 fprintf (dump_file, "SMS const-doloop ");
1128 fprintf (dump_file, HOST_WIDEST_INT_PRINT_DEC,
1129 loop_count);
1130 fprintf (dump_file, "\n");
1131 }
1132
1133 node_order = XNEWVEC (int, g->num_nodes);
1134
1135 mii = 1; /* Need to pass some estimate of mii. */
1136 rec_mii = sms_order_nodes (g, mii, node_order);
1137 mii = MAX (res_MII (g), rec_mii);
1138 maxii = (calculate_maxii (g) * SMS_MAX_II_FACTOR) / 100;
1139
1140 if (dump_file)
1141 fprintf (dump_file, "SMS iis %d %d %d (rec_mii, mii, maxii)\n",
1142 rec_mii, mii, maxii);
1143
1144 /* After sms_order_nodes and before sms_schedule_by_order, to copy over
1145 ASAP. */
1146 set_node_sched_params (g);
1147
1148 ps = sms_schedule_by_order (g, mii, maxii, node_order);
1149
1150 if (ps)
1151 stage_count = PS_STAGE_COUNT (ps);
1152
1153 /* Stage count of 1 means that there is no interleaving between
1154 iterations, let the scheduling passes do the job. */
1155 if (stage_count < 1
1156 || (count_init && (loop_count <= stage_count))
1157 || (flag_branch_probabilities && (trip_count <= stage_count)))
1158 {
1159 if (dump_file)
1160 {
1161 fprintf (dump_file, "SMS failed... \n");
1162 fprintf (dump_file, "SMS sched-failed (stage-count=%d, loop-count=", stage_count);
1163 fprintf (dump_file, HOST_WIDEST_INT_PRINT_DEC, loop_count);
1164 fprintf (dump_file, ", trip-count=");
1165 fprintf (dump_file, HOST_WIDEST_INT_PRINT_DEC, trip_count);
1166 fprintf (dump_file, ")\n");
1167 }
1168 continue;
1169 }
1170 else
1171 {
1172 int orig_cycles = kernel_number_of_cycles (BB_HEAD (g->bb), BB_END (g->bb));
1173 int new_cycles;
1174 struct undo_replace_buff_elem *reg_move_replaces;
1175
1176 if (dump_file)
1177 {
1178 fprintf (dump_file,
1179 "SMS succeeded %d %d (with ii, sc)\n", ps->ii,
1180 stage_count);
1181 print_partial_schedule (ps, dump_file);
1182 fprintf (dump_file,
1183 "SMS Branch (%d) will later be scheduled at cycle %d.\n",
1184 g->closing_branch->cuid, PS_MIN_CYCLE (ps) - 1);
1185 }
1186
1187 /* Set the stage boundaries. If the DDG is built with closing_branch_deps,
1188 the closing_branch was scheduled and should appear in the last (ii-1)
1189 row. Otherwise, we are free to schedule the branch, and we let nodes
1190 that were scheduled at the first PS_MIN_CYCLE cycle appear in the first
1191 row; this should reduce stage_count to minimum. */
1192 normalize_sched_times (ps);
1193 rotate_partial_schedule (ps, PS_MIN_CYCLE (ps));
1194 set_columns_for_ps (ps);
1195
1196 /* Generate the kernel just to be able to measure its cycles. */
1197 permute_partial_schedule (ps, g->closing_branch->first_note);
1198 reg_move_replaces = generate_reg_moves (ps);
1199
1200 /* Get the number of cycles the new kernel expect to execute in. */
1201 new_cycles = kernel_number_of_cycles (BB_HEAD (g->bb), BB_END (g->bb));
1202
1203 /* Get back to the original loop so we can do loop versioning. */
1204 undo_permute_partial_schedule (ps, g->closing_branch->first_note);
1205 if (reg_move_replaces)
1206 undo_generate_reg_moves (ps, reg_move_replaces);
1207
1208 if ( new_cycles >= orig_cycles)
1209 {
1210 /* SMS is not profitable so undo the permutation and reg move generation
1211 and return the kernel to its original state. */
1212 if (dump_file)
1213 fprintf (dump_file, "Undoing SMS because it is not profitable.\n");
1214
1215 }
1216 else
1217 {
1218 canon_loop (loop);
1219
1220 /* case the BCT count is not known , Do loop-versioning */
1221 if (count_reg && ! count_init)
1222 {
1223 rtx comp_rtx = gen_rtx_fmt_ee (GT, VOIDmode, count_reg,
1224 GEN_INT(stage_count));
1225
1226 nloop = loop_version (loops, loop, comp_rtx, &condition_bb,
1227 true);
1228 }
1229
1230 /* Set new iteration count of loop kernel. */
1231 if (count_reg && count_init)
1232 SET_SRC (single_set (count_init)) = GEN_INT (loop_count
1233 - stage_count + 1);
1234
1235 /* Now apply the scheduled kernel to the RTL of the loop. */
1236 permute_partial_schedule (ps, g->closing_branch->first_note);
1237
1238 /* Mark this loop as software pipelined so the later
1239 scheduling passes doesn't touch it. */
1240 if (! flag_resched_modulo_sched)
1241 g->bb->flags |= BB_DISABLE_SCHEDULE;
1242 /* The life-info is not valid any more. */
1243 g->bb->flags |= BB_DIRTY;
1244
1245 reg_move_replaces = generate_reg_moves (ps);
1246 if (dump_file)
1247 print_node_sched_params (dump_file, g->num_nodes);
1248 /* Generate prolog and epilog. */
1249 if (count_reg && !count_init)
1250 generate_prolog_epilog (ps, loop, count_reg);
1251 else
1252 generate_prolog_epilog (ps, loop, NULL_RTX);
1253 }
1254 free_undo_replace_buff (reg_move_replaces);
1255 }
1256
1257 free_partial_schedule (ps);
1258 free (node_sched_params);
1259 free (node_order);
1260 free_ddg (g);
1261 }
1262
1263 free (g_arr);
1264
1265 /* Release scheduler data, needed until now because of DFA. */
1266 sched_finish ();
1267 loop_optimizer_finalize (loops);
1268 }
1269
1270 /* The SMS scheduling algorithm itself
1271 -----------------------------------
1272 Input: 'O' an ordered list of insns of a loop.
1273 Output: A scheduling of the loop - kernel, prolog, and epilogue.
1274
1275 'Q' is the empty Set
1276 'PS' is the partial schedule; it holds the currently scheduled nodes with
1277 their cycle/slot.
1278 'PSP' previously scheduled predecessors.
1279 'PSS' previously scheduled successors.
1280 't(u)' the cycle where u is scheduled.
1281 'l(u)' is the latency of u.
1282 'd(v,u)' is the dependence distance from v to u.
1283 'ASAP(u)' the earliest time at which u could be scheduled as computed in
1284 the node ordering phase.
1285 'check_hardware_resources_conflicts(u, PS, c)'
1286 run a trace around cycle/slot through DFA model
1287 to check resource conflicts involving instruction u
1288 at cycle c given the partial schedule PS.
1289 'add_to_partial_schedule_at_time(u, PS, c)'
1290 Add the node/instruction u to the partial schedule
1291 PS at time c.
1292 'calculate_register_pressure(PS)'
1293 Given a schedule of instructions, calculate the register
1294 pressure it implies. One implementation could be the
1295 maximum number of overlapping live ranges.
1296 'maxRP' The maximum allowed register pressure, it is usually derived from the number
1297 registers available in the hardware.
1298
1299 1. II = MII.
1300 2. PS = empty list
1301 3. for each node u in O in pre-computed order
1302 4. if (PSP(u) != Q && PSS(u) == Q) then
1303 5. Early_start(u) = max ( t(v) + l(v) - d(v,u)*II ) over all every v in PSP(u).
1304 6. start = Early_start; end = Early_start + II - 1; step = 1
1305 11. else if (PSP(u) == Q && PSS(u) != Q) then
1306 12. Late_start(u) = min ( t(v) - l(v) + d(v,u)*II ) over all every v in PSS(u).
1307 13. start = Late_start; end = Late_start - II + 1; step = -1
1308 14. else if (PSP(u) != Q && PSS(u) != Q) then
1309 15. Early_start(u) = max ( t(v) + l(v) - d(v,u)*II ) over all every v in PSP(u).
1310 16. Late_start(u) = min ( t(v) - l(v) + d(v,u)*II ) over all every v in PSS(u).
1311 17. start = Early_start;
1312 18. end = min(Early_start + II - 1 , Late_start);
1313 19. step = 1
1314 20. else "if (PSP(u) == Q && PSS(u) == Q)"
1315 21. start = ASAP(u); end = start + II - 1; step = 1
1316 22. endif
1317
1318 23. success = false
1319 24. for (c = start ; c != end ; c += step)
1320 25. if check_hardware_resources_conflicts(u, PS, c) then
1321 26. add_to_partial_schedule_at_time(u, PS, c)
1322 27. success = true
1323 28. break
1324 29. endif
1325 30. endfor
1326 31. if (success == false) then
1327 32. II = II + 1
1328 33. if (II > maxII) then
1329 34. finish - failed to schedule
1330 35. endif
1331 36. goto 2.
1332 37. endif
1333 38. endfor
1334 39. if (calculate_register_pressure(PS) > maxRP) then
1335 40. goto 32.
1336 41. endif
1337 42. compute epilogue & prologue
1338 43. finish - succeeded to schedule
1339 */
1340
1341 /* A limit on the number of cycles that resource conflicts can span. ??? Should
1342 be provided by DFA, and be dependent on the type of insn scheduled. Currently
1343 set to 0 to save compile time. */
1344 #define DFA_HISTORY SMS_DFA_HISTORY
1345
1346 /* Given the partial schedule PS, this function calculates and returns the
1347 cycles in which we can schedule the node with the given index I.
1348 NOTE: Here we do the backtracking in SMS, in some special cases. We have
1349 noticed that there are several cases in which we fail to SMS the loop
1350 because the sched window of a node is empty due to tight data-deps. In
1351 such cases we want to unschedule some of the predecessors/successors
1352 until we get non-empty scheduling window. It returns -1 if the
1353 scheduling window is empty and zero otherwise. */
1354
1355 static int
1356 get_sched_window (partial_schedule_ptr ps, int *nodes_order, int i,
1357 sbitmap sched_nodes, int ii, int *start_p, int *step_p, int *end_p)
1358 {
1359 int start, step, end;
1360 ddg_edge_ptr e;
1361 int u = nodes_order [i];
1362 ddg_node_ptr u_node = &ps->g->nodes[u];
1363 sbitmap psp = sbitmap_alloc (ps->g->num_nodes);
1364 sbitmap pss = sbitmap_alloc (ps->g->num_nodes);
1365 sbitmap u_node_preds = NODE_PREDECESSORS (u_node);
1366 sbitmap u_node_succs = NODE_SUCCESSORS (u_node);
1367 int psp_not_empty;
1368 int pss_not_empty;
1369
1370 /* 1. compute sched window for u (start, end, step). */
1371 sbitmap_zero (psp);
1372 sbitmap_zero (pss);
1373 psp_not_empty = sbitmap_a_and_b_cg (psp, u_node_preds, sched_nodes);
1374 pss_not_empty = sbitmap_a_and_b_cg (pss, u_node_succs, sched_nodes);
1375
1376 if (psp_not_empty && !pss_not_empty)
1377 {
1378 int early_start = INT_MIN;
1379
1380 end = INT_MAX;
1381 for (e = u_node->in; e != 0; e = e->next_in)
1382 {
1383 ddg_node_ptr v_node = e->src;
1384 if (TEST_BIT (sched_nodes, v_node->cuid))
1385 {
1386 int node_st = SCHED_TIME (v_node)
1387 + e->latency - (e->distance * ii);
1388
1389 early_start = MAX (early_start, node_st);
1390
1391 if (e->data_type == MEM_DEP)
1392 end = MIN (end, SCHED_TIME (v_node) + ii - 1);
1393 }
1394 }
1395 start = early_start;
1396 end = MIN (end, early_start + ii);
1397 step = 1;
1398 }
1399
1400 else if (!psp_not_empty && pss_not_empty)
1401 {
1402 int late_start = INT_MAX;
1403
1404 end = INT_MIN;
1405 for (e = u_node->out; e != 0; e = e->next_out)
1406 {
1407 ddg_node_ptr v_node = e->dest;
1408 if (TEST_BIT (sched_nodes, v_node->cuid))
1409 {
1410 late_start = MIN (late_start,
1411 SCHED_TIME (v_node) - e->latency
1412 + (e->distance * ii));
1413 if (e->data_type == MEM_DEP)
1414 end = MAX (end, SCHED_TIME (v_node) - ii + 1);
1415 }
1416 }
1417 start = late_start;
1418 end = MAX (end, late_start - ii);
1419 step = -1;
1420 }
1421
1422 else if (psp_not_empty && pss_not_empty)
1423 {
1424 int early_start = INT_MIN;
1425 int late_start = INT_MAX;
1426
1427 start = INT_MIN;
1428 end = INT_MAX;
1429 for (e = u_node->in; e != 0; e = e->next_in)
1430 {
1431 ddg_node_ptr v_node = e->src;
1432
1433 if (TEST_BIT (sched_nodes, v_node->cuid))
1434 {
1435 early_start = MAX (early_start,
1436 SCHED_TIME (v_node) + e->latency
1437 - (e->distance * ii));
1438 if (e->data_type == MEM_DEP)
1439 end = MIN (end, SCHED_TIME (v_node) + ii - 1);
1440 }
1441 }
1442 for (e = u_node->out; e != 0; e = e->next_out)
1443 {
1444 ddg_node_ptr v_node = e->dest;
1445
1446 if (TEST_BIT (sched_nodes, v_node->cuid))
1447 {
1448 late_start = MIN (late_start,
1449 SCHED_TIME (v_node) - e->latency
1450 + (e->distance * ii));
1451 if (e->data_type == MEM_DEP)
1452 start = MAX (start, SCHED_TIME (v_node) - ii + 1);
1453 }
1454 }
1455 start = MAX (start, early_start);
1456 end = MIN (end, MIN (early_start + ii, late_start + 1));
1457 step = 1;
1458 }
1459 else /* psp is empty && pss is empty. */
1460 {
1461 start = SCHED_ASAP (u_node);
1462 end = start + ii;
1463 step = 1;
1464 }
1465
1466 *start_p = start;
1467 *step_p = step;
1468 *end_p = end;
1469 sbitmap_free (psp);
1470 sbitmap_free (pss);
1471
1472 if ((start >= end && step == 1) || (start <= end && step == -1))
1473 return -1;
1474 else
1475 return 0;
1476 }
1477
1478 /* This function implements the scheduling algorithm for SMS according to the
1479 above algorithm. */
1480 static partial_schedule_ptr
1481 sms_schedule_by_order (ddg_ptr g, int mii, int maxii, int *nodes_order)
1482 {
1483 int ii = mii;
1484 int i, c, success;
1485 int try_again_with_larger_ii = true;
1486 int num_nodes = g->num_nodes;
1487 ddg_edge_ptr e;
1488 int start, end, step; /* Place together into one struct? */
1489 sbitmap sched_nodes = sbitmap_alloc (num_nodes);
1490 sbitmap must_precede = sbitmap_alloc (num_nodes);
1491 sbitmap must_follow = sbitmap_alloc (num_nodes);
1492 sbitmap tobe_scheduled = sbitmap_alloc (num_nodes);
1493
1494 partial_schedule_ptr ps = create_partial_schedule (ii, g, DFA_HISTORY);
1495
1496 sbitmap_ones (tobe_scheduled);
1497 sbitmap_zero (sched_nodes);
1498
1499 while ((! sbitmap_equal (tobe_scheduled, sched_nodes)
1500 || try_again_with_larger_ii ) && ii < maxii)
1501 {
1502 int j;
1503 bool unscheduled_nodes = false;
1504
1505 if (dump_file)
1506 fprintf(dump_file, "Starting with ii=%d\n", ii);
1507 if (try_again_with_larger_ii)
1508 {
1509 try_again_with_larger_ii = false;
1510 sbitmap_zero (sched_nodes);
1511 }
1512
1513 for (i = 0; i < num_nodes; i++)
1514 {
1515 int u = nodes_order[i];
1516 ddg_node_ptr u_node = &ps->g->nodes[u];
1517 rtx insn = u_node->insn;
1518
1519 if (!INSN_P (insn))
1520 {
1521 RESET_BIT (tobe_scheduled, u);
1522 continue;
1523 }
1524
1525 if (JUMP_P (insn)) /* Closing branch handled later. */
1526 {
1527 RESET_BIT (tobe_scheduled, u);
1528 continue;
1529 }
1530
1531 if (TEST_BIT (sched_nodes, u))
1532 continue;
1533
1534 /* Try to get non-empty scheduling window. */
1535 j = i;
1536 while (get_sched_window (ps, nodes_order, i, sched_nodes, ii, &start, &step, &end) < 0
1537 && j > 0)
1538 {
1539 unscheduled_nodes = true;
1540 if (TEST_BIT (NODE_PREDECESSORS (u_node), nodes_order[j - 1])
1541 || TEST_BIT (NODE_SUCCESSORS (u_node), nodes_order[j - 1]))
1542 {
1543 ps_unschedule_node (ps, &ps->g->nodes[nodes_order[j - 1]]);
1544 RESET_BIT (sched_nodes, nodes_order [j - 1]);
1545 }
1546 j--;
1547 }
1548 if (j < 0)
1549 {
1550 /* ??? Try backtracking instead of immediately ii++? */
1551 ii++;
1552 try_again_with_larger_ii = true;
1553 reset_partial_schedule (ps, ii);
1554 break;
1555 }
1556 /* 2. Try scheduling u in window. */
1557 if (dump_file)
1558 fprintf(dump_file, "Trying to schedule node %d in (%d .. %d) step %d\n",
1559 u, start, end, step);
1560
1561 /* use must_follow & must_precede bitmaps to determine order
1562 of nodes within the cycle. */
1563 sbitmap_zero (must_precede);
1564 sbitmap_zero (must_follow);
1565 for (e = u_node->in; e != 0; e = e->next_in)
1566 if (TEST_BIT (sched_nodes, e->src->cuid)
1567 && e->latency == (ii * e->distance)
1568 && start == SCHED_TIME (e->src))
1569 SET_BIT (must_precede, e->src->cuid);
1570
1571 for (e = u_node->out; e != 0; e = e->next_out)
1572 if (TEST_BIT (sched_nodes, e->dest->cuid)
1573 && e->latency == (ii * e->distance)
1574 && end == SCHED_TIME (e->dest))
1575 SET_BIT (must_follow, e->dest->cuid);
1576
1577 success = 0;
1578 if ((step > 0 && start < end) || (step < 0 && start > end))
1579 for (c = start; c != end; c += step)
1580 {
1581 ps_insn_ptr psi;
1582
1583 psi = ps_add_node_check_conflicts (ps, u_node, c,
1584 must_precede,
1585 must_follow);
1586
1587 if (psi)
1588 {
1589 SCHED_TIME (u_node) = c;
1590 SET_BIT (sched_nodes, u);
1591 success = 1;
1592 if (dump_file)
1593 fprintf(dump_file, "Schedule in %d\n", c);
1594 break;
1595 }
1596 }
1597 if (!success)
1598 {
1599 /* ??? Try backtracking instead of immediately ii++? */
1600 ii++;
1601 try_again_with_larger_ii = true;
1602 reset_partial_schedule (ps, ii);
1603 break;
1604 }
1605 if (unscheduled_nodes)
1606 break;
1607
1608 /* ??? If (success), check register pressure estimates. */
1609 } /* Continue with next node. */
1610 } /* While try_again_with_larger_ii. */
1611
1612 sbitmap_free (sched_nodes);
1613 sbitmap_free (must_precede);
1614 sbitmap_free (must_follow);
1615 sbitmap_free (tobe_scheduled);
1616
1617 if (ii >= maxii)
1618 {
1619 free_partial_schedule (ps);
1620 ps = NULL;
1621 }
1622 return ps;
1623 }
1624
1625 \f
1626 /* This page implements the algorithm for ordering the nodes of a DDG
1627 for modulo scheduling, activated through the
1628 "int sms_order_nodes (ddg_ptr, int mii, int * result)" API. */
1629
1630 #define ORDER_PARAMS(x) ((struct node_order_params *) (x)->aux.info)
1631 #define ASAP(x) (ORDER_PARAMS ((x))->asap)
1632 #define ALAP(x) (ORDER_PARAMS ((x))->alap)
1633 #define HEIGHT(x) (ORDER_PARAMS ((x))->height)
1634 #define MOB(x) (ALAP ((x)) - ASAP ((x)))
1635 #define DEPTH(x) (ASAP ((x)))
1636
1637 typedef struct node_order_params * nopa;
1638
1639 static void order_nodes_of_sccs (ddg_all_sccs_ptr, int * result);
1640 static int order_nodes_in_scc (ddg_ptr, sbitmap, sbitmap, int*, int);
1641 static nopa calculate_order_params (ddg_ptr, int mii);
1642 static int find_max_asap (ddg_ptr, sbitmap);
1643 static int find_max_hv_min_mob (ddg_ptr, sbitmap);
1644 static int find_max_dv_min_mob (ddg_ptr, sbitmap);
1645
1646 enum sms_direction {BOTTOMUP, TOPDOWN};
1647
1648 struct node_order_params
1649 {
1650 int asap;
1651 int alap;
1652 int height;
1653 };
1654
1655 /* Check if NODE_ORDER contains a permutation of 0 .. NUM_NODES-1. */
1656 static void
1657 check_nodes_order (int *node_order, int num_nodes)
1658 {
1659 int i;
1660 sbitmap tmp = sbitmap_alloc (num_nodes);
1661
1662 sbitmap_zero (tmp);
1663
1664 for (i = 0; i < num_nodes; i++)
1665 {
1666 int u = node_order[i];
1667
1668 gcc_assert (u < num_nodes && u >= 0 && !TEST_BIT (tmp, u));
1669
1670 SET_BIT (tmp, u);
1671 }
1672
1673 sbitmap_free (tmp);
1674 }
1675
1676 /* Order the nodes of G for scheduling and pass the result in
1677 NODE_ORDER. Also set aux.count of each node to ASAP.
1678 Return the recMII for the given DDG. */
1679 static int
1680 sms_order_nodes (ddg_ptr g, int mii, int * node_order)
1681 {
1682 int i;
1683 int rec_mii = 0;
1684 ddg_all_sccs_ptr sccs = create_ddg_all_sccs (g);
1685
1686 nopa nops = calculate_order_params (g, mii);
1687
1688 order_nodes_of_sccs (sccs, node_order);
1689
1690 if (sccs->num_sccs > 0)
1691 /* First SCC has the largest recurrence_length. */
1692 rec_mii = sccs->sccs[0]->recurrence_length;
1693
1694 /* Save ASAP before destroying node_order_params. */
1695 for (i = 0; i < g->num_nodes; i++)
1696 {
1697 ddg_node_ptr v = &g->nodes[i];
1698 v->aux.count = ASAP (v);
1699 }
1700
1701 free (nops);
1702 free_ddg_all_sccs (sccs);
1703 check_nodes_order (node_order, g->num_nodes);
1704
1705 return rec_mii;
1706 }
1707
1708 static void
1709 order_nodes_of_sccs (ddg_all_sccs_ptr all_sccs, int * node_order)
1710 {
1711 int i, pos = 0;
1712 ddg_ptr g = all_sccs->ddg;
1713 int num_nodes = g->num_nodes;
1714 sbitmap prev_sccs = sbitmap_alloc (num_nodes);
1715 sbitmap on_path = sbitmap_alloc (num_nodes);
1716 sbitmap tmp = sbitmap_alloc (num_nodes);
1717 sbitmap ones = sbitmap_alloc (num_nodes);
1718
1719 sbitmap_zero (prev_sccs);
1720 sbitmap_ones (ones);
1721
1722 /* Perfrom the node ordering starting from the SCC with the highest recMII.
1723 For each SCC order the nodes according to their ASAP/ALAP/HEIGHT etc. */
1724 for (i = 0; i < all_sccs->num_sccs; i++)
1725 {
1726 ddg_scc_ptr scc = all_sccs->sccs[i];
1727
1728 /* Add nodes on paths from previous SCCs to the current SCC. */
1729 find_nodes_on_paths (on_path, g, prev_sccs, scc->nodes);
1730 sbitmap_a_or_b (tmp, scc->nodes, on_path);
1731
1732 /* Add nodes on paths from the current SCC to previous SCCs. */
1733 find_nodes_on_paths (on_path, g, scc->nodes, prev_sccs);
1734 sbitmap_a_or_b (tmp, tmp, on_path);
1735
1736 /* Remove nodes of previous SCCs from current extended SCC. */
1737 sbitmap_difference (tmp, tmp, prev_sccs);
1738
1739 pos = order_nodes_in_scc (g, prev_sccs, tmp, node_order, pos);
1740 /* Above call to order_nodes_in_scc updated prev_sccs |= tmp. */
1741 }
1742
1743 /* Handle the remaining nodes that do not belong to any scc. Each call
1744 to order_nodes_in_scc handles a single connected component. */
1745 while (pos < g->num_nodes)
1746 {
1747 sbitmap_difference (tmp, ones, prev_sccs);
1748 pos = order_nodes_in_scc (g, prev_sccs, tmp, node_order, pos);
1749 }
1750 sbitmap_free (prev_sccs);
1751 sbitmap_free (on_path);
1752 sbitmap_free (tmp);
1753 sbitmap_free (ones);
1754 }
1755
1756 /* MII is needed if we consider backarcs (that do not close recursive cycles). */
1757 static struct node_order_params *
1758 calculate_order_params (ddg_ptr g, int mii ATTRIBUTE_UNUSED)
1759 {
1760 int u;
1761 int max_asap;
1762 int num_nodes = g->num_nodes;
1763 ddg_edge_ptr e;
1764 /* Allocate a place to hold ordering params for each node in the DDG. */
1765 nopa node_order_params_arr;
1766
1767 /* Initialize of ASAP/ALAP/HEIGHT to zero. */
1768 node_order_params_arr = (nopa) xcalloc (num_nodes,
1769 sizeof (struct node_order_params));
1770
1771 /* Set the aux pointer of each node to point to its order_params structure. */
1772 for (u = 0; u < num_nodes; u++)
1773 g->nodes[u].aux.info = &node_order_params_arr[u];
1774
1775 /* Disregarding a backarc from each recursive cycle to obtain a DAG,
1776 calculate ASAP, ALAP, mobility, distance, and height for each node
1777 in the dependence (direct acyclic) graph. */
1778
1779 /* We assume that the nodes in the array are in topological order. */
1780
1781 max_asap = 0;
1782 for (u = 0; u < num_nodes; u++)
1783 {
1784 ddg_node_ptr u_node = &g->nodes[u];
1785
1786 ASAP (u_node) = 0;
1787 for (e = u_node->in; e; e = e->next_in)
1788 if (e->distance == 0)
1789 ASAP (u_node) = MAX (ASAP (u_node),
1790 ASAP (e->src) + e->latency);
1791 max_asap = MAX (max_asap, ASAP (u_node));
1792 }
1793
1794 for (u = num_nodes - 1; u > -1; u--)
1795 {
1796 ddg_node_ptr u_node = &g->nodes[u];
1797
1798 ALAP (u_node) = max_asap;
1799 HEIGHT (u_node) = 0;
1800 for (e = u_node->out; e; e = e->next_out)
1801 if (e->distance == 0)
1802 {
1803 ALAP (u_node) = MIN (ALAP (u_node),
1804 ALAP (e->dest) - e->latency);
1805 HEIGHT (u_node) = MAX (HEIGHT (u_node),
1806 HEIGHT (e->dest) + e->latency);
1807 }
1808 }
1809
1810 return node_order_params_arr;
1811 }
1812
1813 static int
1814 find_max_asap (ddg_ptr g, sbitmap nodes)
1815 {
1816 unsigned int u = 0;
1817 int max_asap = -1;
1818 int result = -1;
1819 sbitmap_iterator sbi;
1820
1821 EXECUTE_IF_SET_IN_SBITMAP (nodes, 0, u, sbi)
1822 {
1823 ddg_node_ptr u_node = &g->nodes[u];
1824
1825 if (max_asap < ASAP (u_node))
1826 {
1827 max_asap = ASAP (u_node);
1828 result = u;
1829 }
1830 }
1831 return result;
1832 }
1833
1834 static int
1835 find_max_hv_min_mob (ddg_ptr g, sbitmap nodes)
1836 {
1837 unsigned int u = 0;
1838 int max_hv = -1;
1839 int min_mob = INT_MAX;
1840 int result = -1;
1841 sbitmap_iterator sbi;
1842
1843 EXECUTE_IF_SET_IN_SBITMAP (nodes, 0, u, sbi)
1844 {
1845 ddg_node_ptr u_node = &g->nodes[u];
1846
1847 if (max_hv < HEIGHT (u_node))
1848 {
1849 max_hv = HEIGHT (u_node);
1850 min_mob = MOB (u_node);
1851 result = u;
1852 }
1853 else if ((max_hv == HEIGHT (u_node))
1854 && (min_mob > MOB (u_node)))
1855 {
1856 min_mob = MOB (u_node);
1857 result = u;
1858 }
1859 }
1860 return result;
1861 }
1862
1863 static int
1864 find_max_dv_min_mob (ddg_ptr g, sbitmap nodes)
1865 {
1866 unsigned int u = 0;
1867 int max_dv = -1;
1868 int min_mob = INT_MAX;
1869 int result = -1;
1870 sbitmap_iterator sbi;
1871
1872 EXECUTE_IF_SET_IN_SBITMAP (nodes, 0, u, sbi)
1873 {
1874 ddg_node_ptr u_node = &g->nodes[u];
1875
1876 if (max_dv < DEPTH (u_node))
1877 {
1878 max_dv = DEPTH (u_node);
1879 min_mob = MOB (u_node);
1880 result = u;
1881 }
1882 else if ((max_dv == DEPTH (u_node))
1883 && (min_mob > MOB (u_node)))
1884 {
1885 min_mob = MOB (u_node);
1886 result = u;
1887 }
1888 }
1889 return result;
1890 }
1891
1892 /* Places the nodes of SCC into the NODE_ORDER array starting
1893 at position POS, according to the SMS ordering algorithm.
1894 NODES_ORDERED (in&out parameter) holds the bitset of all nodes in
1895 the NODE_ORDER array, starting from position zero. */
1896 static int
1897 order_nodes_in_scc (ddg_ptr g, sbitmap nodes_ordered, sbitmap scc,
1898 int * node_order, int pos)
1899 {
1900 enum sms_direction dir;
1901 int num_nodes = g->num_nodes;
1902 sbitmap workset = sbitmap_alloc (num_nodes);
1903 sbitmap tmp = sbitmap_alloc (num_nodes);
1904 sbitmap zero_bitmap = sbitmap_alloc (num_nodes);
1905 sbitmap predecessors = sbitmap_alloc (num_nodes);
1906 sbitmap successors = sbitmap_alloc (num_nodes);
1907
1908 sbitmap_zero (predecessors);
1909 find_predecessors (predecessors, g, nodes_ordered);
1910
1911 sbitmap_zero (successors);
1912 find_successors (successors, g, nodes_ordered);
1913
1914 sbitmap_zero (tmp);
1915 if (sbitmap_a_and_b_cg (tmp, predecessors, scc))
1916 {
1917 sbitmap_copy (workset, tmp);
1918 dir = BOTTOMUP;
1919 }
1920 else if (sbitmap_a_and_b_cg (tmp, successors, scc))
1921 {
1922 sbitmap_copy (workset, tmp);
1923 dir = TOPDOWN;
1924 }
1925 else
1926 {
1927 int u;
1928
1929 sbitmap_zero (workset);
1930 if ((u = find_max_asap (g, scc)) >= 0)
1931 SET_BIT (workset, u);
1932 dir = BOTTOMUP;
1933 }
1934
1935 sbitmap_zero (zero_bitmap);
1936 while (!sbitmap_equal (workset, zero_bitmap))
1937 {
1938 int v;
1939 ddg_node_ptr v_node;
1940 sbitmap v_node_preds;
1941 sbitmap v_node_succs;
1942
1943 if (dir == TOPDOWN)
1944 {
1945 while (!sbitmap_equal (workset, zero_bitmap))
1946 {
1947 v = find_max_hv_min_mob (g, workset);
1948 v_node = &g->nodes[v];
1949 node_order[pos++] = v;
1950 v_node_succs = NODE_SUCCESSORS (v_node);
1951 sbitmap_a_and_b (tmp, v_node_succs, scc);
1952
1953 /* Don't consider the already ordered successors again. */
1954 sbitmap_difference (tmp, tmp, nodes_ordered);
1955 sbitmap_a_or_b (workset, workset, tmp);
1956 RESET_BIT (workset, v);
1957 SET_BIT (nodes_ordered, v);
1958 }
1959 dir = BOTTOMUP;
1960 sbitmap_zero (predecessors);
1961 find_predecessors (predecessors, g, nodes_ordered);
1962 sbitmap_a_and_b (workset, predecessors, scc);
1963 }
1964 else
1965 {
1966 while (!sbitmap_equal (workset, zero_bitmap))
1967 {
1968 v = find_max_dv_min_mob (g, workset);
1969 v_node = &g->nodes[v];
1970 node_order[pos++] = v;
1971 v_node_preds = NODE_PREDECESSORS (v_node);
1972 sbitmap_a_and_b (tmp, v_node_preds, scc);
1973
1974 /* Don't consider the already ordered predecessors again. */
1975 sbitmap_difference (tmp, tmp, nodes_ordered);
1976 sbitmap_a_or_b (workset, workset, tmp);
1977 RESET_BIT (workset, v);
1978 SET_BIT (nodes_ordered, v);
1979 }
1980 dir = TOPDOWN;
1981 sbitmap_zero (successors);
1982 find_successors (successors, g, nodes_ordered);
1983 sbitmap_a_and_b (workset, successors, scc);
1984 }
1985 }
1986 sbitmap_free (tmp);
1987 sbitmap_free (workset);
1988 sbitmap_free (zero_bitmap);
1989 sbitmap_free (predecessors);
1990 sbitmap_free (successors);
1991 return pos;
1992 }
1993
1994 \f
1995 /* This page contains functions for manipulating partial-schedules during
1996 modulo scheduling. */
1997
1998 /* Create a partial schedule and allocate a memory to hold II rows. */
1999
2000 static partial_schedule_ptr
2001 create_partial_schedule (int ii, ddg_ptr g, int history)
2002 {
2003 partial_schedule_ptr ps = XNEW (struct partial_schedule);
2004 ps->rows = (ps_insn_ptr *) xcalloc (ii, sizeof (ps_insn_ptr));
2005 ps->ii = ii;
2006 ps->history = history;
2007 ps->min_cycle = INT_MAX;
2008 ps->max_cycle = INT_MIN;
2009 ps->g = g;
2010
2011 return ps;
2012 }
2013
2014 /* Free the PS_INSNs in rows array of the given partial schedule.
2015 ??? Consider caching the PS_INSN's. */
2016 static void
2017 free_ps_insns (partial_schedule_ptr ps)
2018 {
2019 int i;
2020
2021 for (i = 0; i < ps->ii; i++)
2022 {
2023 while (ps->rows[i])
2024 {
2025 ps_insn_ptr ps_insn = ps->rows[i]->next_in_row;
2026
2027 free (ps->rows[i]);
2028 ps->rows[i] = ps_insn;
2029 }
2030 ps->rows[i] = NULL;
2031 }
2032 }
2033
2034 /* Free all the memory allocated to the partial schedule. */
2035
2036 static void
2037 free_partial_schedule (partial_schedule_ptr ps)
2038 {
2039 if (!ps)
2040 return;
2041 free_ps_insns (ps);
2042 free (ps->rows);
2043 free (ps);
2044 }
2045
2046 /* Clear the rows array with its PS_INSNs, and create a new one with
2047 NEW_II rows. */
2048
2049 static void
2050 reset_partial_schedule (partial_schedule_ptr ps, int new_ii)
2051 {
2052 if (!ps)
2053 return;
2054 free_ps_insns (ps);
2055 if (new_ii == ps->ii)
2056 return;
2057 ps->rows = (ps_insn_ptr *) xrealloc (ps->rows, new_ii
2058 * sizeof (ps_insn_ptr));
2059 memset (ps->rows, 0, new_ii * sizeof (ps_insn_ptr));
2060 ps->ii = new_ii;
2061 ps->min_cycle = INT_MAX;
2062 ps->max_cycle = INT_MIN;
2063 }
2064
2065 /* Prints the partial schedule as an ii rows array, for each rows
2066 print the ids of the insns in it. */
2067 void
2068 print_partial_schedule (partial_schedule_ptr ps, FILE *dump)
2069 {
2070 int i;
2071
2072 for (i = 0; i < ps->ii; i++)
2073 {
2074 ps_insn_ptr ps_i = ps->rows[i];
2075
2076 fprintf (dump, "\n[CYCLE %d ]: ", i);
2077 while (ps_i)
2078 {
2079 fprintf (dump, "%d, ",
2080 INSN_UID (ps_i->node->insn));
2081 ps_i = ps_i->next_in_row;
2082 }
2083 }
2084 }
2085
2086 /* Creates an object of PS_INSN and initializes it to the given parameters. */
2087 static ps_insn_ptr
2088 create_ps_insn (ddg_node_ptr node, int rest_count, int cycle)
2089 {
2090 ps_insn_ptr ps_i = XNEW (struct ps_insn);
2091
2092 ps_i->node = node;
2093 ps_i->next_in_row = NULL;
2094 ps_i->prev_in_row = NULL;
2095 ps_i->row_rest_count = rest_count;
2096 ps_i->cycle = cycle;
2097
2098 return ps_i;
2099 }
2100
2101
2102 /* Removes the given PS_INSN from the partial schedule. Returns false if the
2103 node is not found in the partial schedule, else returns true. */
2104 static bool
2105 remove_node_from_ps (partial_schedule_ptr ps, ps_insn_ptr ps_i)
2106 {
2107 int row;
2108
2109 if (!ps || !ps_i)
2110 return false;
2111
2112 row = SMODULO (ps_i->cycle, ps->ii);
2113 if (! ps_i->prev_in_row)
2114 {
2115 if (ps_i != ps->rows[row])
2116 return false;
2117
2118 ps->rows[row] = ps_i->next_in_row;
2119 if (ps->rows[row])
2120 ps->rows[row]->prev_in_row = NULL;
2121 }
2122 else
2123 {
2124 ps_i->prev_in_row->next_in_row = ps_i->next_in_row;
2125 if (ps_i->next_in_row)
2126 ps_i->next_in_row->prev_in_row = ps_i->prev_in_row;
2127 }
2128 free (ps_i);
2129 return true;
2130 }
2131
2132 /* Unlike what literature describes for modulo scheduling (which focuses
2133 on VLIW machines) the order of the instructions inside a cycle is
2134 important. Given the bitmaps MUST_FOLLOW and MUST_PRECEDE we know
2135 where the current instruction should go relative to the already
2136 scheduled instructions in the given cycle. Go over these
2137 instructions and find the first possible column to put it in. */
2138 static bool
2139 ps_insn_find_column (partial_schedule_ptr ps, ps_insn_ptr ps_i,
2140 sbitmap must_precede, sbitmap must_follow)
2141 {
2142 ps_insn_ptr next_ps_i;
2143 ps_insn_ptr first_must_follow = NULL;
2144 ps_insn_ptr last_must_precede = NULL;
2145 int row;
2146
2147 if (! ps_i)
2148 return false;
2149
2150 row = SMODULO (ps_i->cycle, ps->ii);
2151
2152 /* Find the first must follow and the last must precede
2153 and insert the node immediately after the must precede
2154 but make sure that it there is no must follow after it. */
2155 for (next_ps_i = ps->rows[row];
2156 next_ps_i;
2157 next_ps_i = next_ps_i->next_in_row)
2158 {
2159 if (TEST_BIT (must_follow, next_ps_i->node->cuid)
2160 && ! first_must_follow)
2161 first_must_follow = next_ps_i;
2162 if (TEST_BIT (must_precede, next_ps_i->node->cuid))
2163 {
2164 /* If we have already met a node that must follow, then
2165 there is no possible column. */
2166 if (first_must_follow)
2167 return false;
2168 else
2169 last_must_precede = next_ps_i;
2170 }
2171 }
2172
2173 /* Now insert the node after INSERT_AFTER_PSI. */
2174
2175 if (! last_must_precede)
2176 {
2177 ps_i->next_in_row = ps->rows[row];
2178 ps_i->prev_in_row = NULL;
2179 if (ps_i->next_in_row)
2180 ps_i->next_in_row->prev_in_row = ps_i;
2181 ps->rows[row] = ps_i;
2182 }
2183 else
2184 {
2185 ps_i->next_in_row = last_must_precede->next_in_row;
2186 last_must_precede->next_in_row = ps_i;
2187 ps_i->prev_in_row = last_must_precede;
2188 if (ps_i->next_in_row)
2189 ps_i->next_in_row->prev_in_row = ps_i;
2190 }
2191
2192 return true;
2193 }
2194
2195 /* Advances the PS_INSN one column in its current row; returns false
2196 in failure and true in success. Bit N is set in MUST_FOLLOW if
2197 the node with cuid N must be come after the node pointed to by
2198 PS_I when scheduled in the same cycle. */
2199 static int
2200 ps_insn_advance_column (partial_schedule_ptr ps, ps_insn_ptr ps_i,
2201 sbitmap must_follow)
2202 {
2203 ps_insn_ptr prev, next;
2204 int row;
2205 ddg_node_ptr next_node;
2206
2207 if (!ps || !ps_i)
2208 return false;
2209
2210 row = SMODULO (ps_i->cycle, ps->ii);
2211
2212 if (! ps_i->next_in_row)
2213 return false;
2214
2215 next_node = ps_i->next_in_row->node;
2216
2217 /* Check if next_in_row is dependent on ps_i, both having same sched
2218 times (typically ANTI_DEP). If so, ps_i cannot skip over it. */
2219 if (TEST_BIT (must_follow, next_node->cuid))
2220 return false;
2221
2222 /* Advance PS_I over its next_in_row in the doubly linked list. */
2223 prev = ps_i->prev_in_row;
2224 next = ps_i->next_in_row;
2225
2226 if (ps_i == ps->rows[row])
2227 ps->rows[row] = next;
2228
2229 ps_i->next_in_row = next->next_in_row;
2230
2231 if (next->next_in_row)
2232 next->next_in_row->prev_in_row = ps_i;
2233
2234 next->next_in_row = ps_i;
2235 ps_i->prev_in_row = next;
2236
2237 next->prev_in_row = prev;
2238 if (prev)
2239 prev->next_in_row = next;
2240
2241 return true;
2242 }
2243
2244 /* Inserts a DDG_NODE to the given partial schedule at the given cycle.
2245 Returns 0 if this is not possible and a PS_INSN otherwise. Bit N is
2246 set in MUST_PRECEDE/MUST_FOLLOW if the node with cuid N must be come
2247 before/after (respectively) the node pointed to by PS_I when scheduled
2248 in the same cycle. */
2249 static ps_insn_ptr
2250 add_node_to_ps (partial_schedule_ptr ps, ddg_node_ptr node, int cycle,
2251 sbitmap must_precede, sbitmap must_follow)
2252 {
2253 ps_insn_ptr ps_i;
2254 int rest_count = 1;
2255 int row = SMODULO (cycle, ps->ii);
2256
2257 if (ps->rows[row]
2258 && ps->rows[row]->row_rest_count >= issue_rate)
2259 return NULL;
2260
2261 if (ps->rows[row])
2262 rest_count += ps->rows[row]->row_rest_count;
2263
2264 ps_i = create_ps_insn (node, rest_count, cycle);
2265
2266 /* Finds and inserts PS_I according to MUST_FOLLOW and
2267 MUST_PRECEDE. */
2268 if (! ps_insn_find_column (ps, ps_i, must_precede, must_follow))
2269 {
2270 free (ps_i);
2271 return NULL;
2272 }
2273
2274 return ps_i;
2275 }
2276
2277 /* Advance time one cycle. Assumes DFA is being used. */
2278 static void
2279 advance_one_cycle (void)
2280 {
2281 if (targetm.sched.dfa_pre_cycle_insn)
2282 state_transition (curr_state,
2283 targetm.sched.dfa_pre_cycle_insn ());
2284
2285 state_transition (curr_state, NULL);
2286
2287 if (targetm.sched.dfa_post_cycle_insn)
2288 state_transition (curr_state,
2289 targetm.sched.dfa_post_cycle_insn ());
2290 }
2291
2292 /* Given the kernel of a loop (from FIRST_INSN to LAST_INSN), finds
2293 the number of cycles according to DFA that the kernel fits in,
2294 we use this to check if we done well with SMS after we add
2295 register moves. In some cases register moves overhead makes
2296 it even worse than the original loop. We want SMS to be performed
2297 when it gives less cycles after register moves are added. */
2298 static int
2299 kernel_number_of_cycles (rtx first_insn, rtx last_insn)
2300 {
2301 int cycles = 0;
2302 rtx insn;
2303 int can_issue_more = issue_rate;
2304
2305 state_reset (curr_state);
2306
2307 for (insn = first_insn;
2308 insn != NULL_RTX && insn != last_insn;
2309 insn = NEXT_INSN (insn))
2310 {
2311 if (! INSN_P (insn) || GET_CODE (PATTERN (insn)) == USE)
2312 continue;
2313
2314 /* Check if there is room for the current insn. */
2315 if (!can_issue_more || state_dead_lock_p (curr_state))
2316 {
2317 cycles ++;
2318 advance_one_cycle ();
2319 can_issue_more = issue_rate;
2320 }
2321
2322 /* Update the DFA state and return with failure if the DFA found
2323 recource conflicts. */
2324 if (state_transition (curr_state, insn) >= 0)
2325 {
2326 cycles ++;
2327 advance_one_cycle ();
2328 can_issue_more = issue_rate;
2329 }
2330
2331 if (targetm.sched.variable_issue)
2332 can_issue_more =
2333 targetm.sched.variable_issue (sched_dump, sched_verbose,
2334 insn, can_issue_more);
2335 /* A naked CLOBBER or USE generates no instruction, so don't
2336 let them consume issue slots. */
2337 else if (GET_CODE (PATTERN (insn)) != USE
2338 && GET_CODE (PATTERN (insn)) != CLOBBER)
2339 can_issue_more--;
2340 }
2341 return cycles;
2342 }
2343
2344 /* Checks if PS has resource conflicts according to DFA, starting from
2345 FROM cycle to TO cycle; returns true if there are conflicts and false
2346 if there are no conflicts. Assumes DFA is being used. */
2347 static int
2348 ps_has_conflicts (partial_schedule_ptr ps, int from, int to)
2349 {
2350 int cycle;
2351
2352 state_reset (curr_state);
2353
2354 for (cycle = from; cycle <= to; cycle++)
2355 {
2356 ps_insn_ptr crr_insn;
2357 /* Holds the remaining issue slots in the current row. */
2358 int can_issue_more = issue_rate;
2359
2360 /* Walk through the DFA for the current row. */
2361 for (crr_insn = ps->rows[SMODULO (cycle, ps->ii)];
2362 crr_insn;
2363 crr_insn = crr_insn->next_in_row)
2364 {
2365 rtx insn = crr_insn->node->insn;
2366
2367 if (!INSN_P (insn))
2368 continue;
2369
2370 /* Check if there is room for the current insn. */
2371 if (!can_issue_more || state_dead_lock_p (curr_state))
2372 return true;
2373
2374 /* Update the DFA state and return with failure if the DFA found
2375 recource conflicts. */
2376 if (state_transition (curr_state, insn) >= 0)
2377 return true;
2378
2379 if (targetm.sched.variable_issue)
2380 can_issue_more =
2381 targetm.sched.variable_issue (sched_dump, sched_verbose,
2382 insn, can_issue_more);
2383 /* A naked CLOBBER or USE generates no instruction, so don't
2384 let them consume issue slots. */
2385 else if (GET_CODE (PATTERN (insn)) != USE
2386 && GET_CODE (PATTERN (insn)) != CLOBBER)
2387 can_issue_more--;
2388 }
2389
2390 /* Advance the DFA to the next cycle. */
2391 advance_one_cycle ();
2392 }
2393 return false;
2394 }
2395
2396 /* Checks if the given node causes resource conflicts when added to PS at
2397 cycle C. If not the node is added to PS and returned; otherwise zero
2398 is returned. Bit N is set in MUST_PRECEDE/MUST_FOLLOW if the node with
2399 cuid N must be come before/after (respectively) the node pointed to by
2400 PS_I when scheduled in the same cycle. */
2401 ps_insn_ptr
2402 ps_add_node_check_conflicts (partial_schedule_ptr ps, ddg_node_ptr n,
2403 int c, sbitmap must_precede,
2404 sbitmap must_follow)
2405 {
2406 int has_conflicts = 0;
2407 ps_insn_ptr ps_i;
2408
2409 /* First add the node to the PS, if this succeeds check for
2410 conflicts, trying different issue slots in the same row. */
2411 if (! (ps_i = add_node_to_ps (ps, n, c, must_precede, must_follow)))
2412 return NULL; /* Failed to insert the node at the given cycle. */
2413
2414 has_conflicts = ps_has_conflicts (ps, c, c)
2415 || (ps->history > 0
2416 && ps_has_conflicts (ps,
2417 c - ps->history,
2418 c + ps->history));
2419
2420 /* Try different issue slots to find one that the given node can be
2421 scheduled in without conflicts. */
2422 while (has_conflicts)
2423 {
2424 if (! ps_insn_advance_column (ps, ps_i, must_follow))
2425 break;
2426 has_conflicts = ps_has_conflicts (ps, c, c)
2427 || (ps->history > 0
2428 && ps_has_conflicts (ps,
2429 c - ps->history,
2430 c + ps->history));
2431 }
2432
2433 if (has_conflicts)
2434 {
2435 remove_node_from_ps (ps, ps_i);
2436 return NULL;
2437 }
2438
2439 ps->min_cycle = MIN (ps->min_cycle, c);
2440 ps->max_cycle = MAX (ps->max_cycle, c);
2441 return ps_i;
2442 }
2443
2444 /* Rotate the rows of PS such that insns scheduled at time
2445 START_CYCLE will appear in row 0. Updates max/min_cycles. */
2446 void
2447 rotate_partial_schedule (partial_schedule_ptr ps, int start_cycle)
2448 {
2449 int i, row, backward_rotates;
2450 int last_row = ps->ii - 1;
2451
2452 if (start_cycle == 0)
2453 return;
2454
2455 backward_rotates = SMODULO (start_cycle, ps->ii);
2456
2457 /* Revisit later and optimize this into a single loop. */
2458 for (i = 0; i < backward_rotates; i++)
2459 {
2460 ps_insn_ptr first_row = ps->rows[0];
2461
2462 for (row = 0; row < last_row; row++)
2463 ps->rows[row] = ps->rows[row+1];
2464
2465 ps->rows[last_row] = first_row;
2466 }
2467
2468 ps->max_cycle -= start_cycle;
2469 ps->min_cycle -= start_cycle;
2470 }
2471
2472 /* Remove the node N from the partial schedule PS; because we restart the DFA
2473 each time we want to check for resource conflicts; this is equivalent to
2474 unscheduling the node N. */
2475 static bool
2476 ps_unschedule_node (partial_schedule_ptr ps, ddg_node_ptr n)
2477 {
2478 ps_insn_ptr ps_i;
2479 int row = SMODULO (SCHED_TIME (n), ps->ii);
2480
2481 if (row < 0 || row > ps->ii)
2482 return false;
2483
2484 for (ps_i = ps->rows[row];
2485 ps_i && ps_i->node != n;
2486 ps_i = ps_i->next_in_row);
2487 if (!ps_i)
2488 return false;
2489
2490 return remove_node_from_ps (ps, ps_i);
2491 }
2492 #endif /* INSN_SCHEDULING */
2493 \f
2494 static bool
2495 gate_handle_sms (void)
2496 {
2497 return (optimize > 0 && flag_modulo_sched);
2498 }
2499
2500
2501 /* Run instruction scheduler. */
2502 /* Perform SMS module scheduling. */
2503 static unsigned int
2504 rest_of_handle_sms (void)
2505 {
2506 #ifdef INSN_SCHEDULING
2507 basic_block bb;
2508
2509 /* We want to be able to create new pseudos. */
2510 no_new_pseudos = 0;
2511 /* Collect loop information to be used in SMS. */
2512 cfg_layout_initialize (CLEANUP_UPDATE_LIFE);
2513 sms_schedule ();
2514
2515 /* Update the life information, because we add pseudos. */
2516 max_regno = max_reg_num ();
2517 allocate_reg_info (max_regno, FALSE, FALSE);
2518 update_life_info (NULL, UPDATE_LIFE_GLOBAL_RM_NOTES,
2519 (PROP_DEATH_NOTES
2520 | PROP_REG_INFO
2521 | PROP_KILL_DEAD_CODE
2522 | PROP_SCAN_DEAD_CODE));
2523
2524 no_new_pseudos = 1;
2525
2526 /* Finalize layout changes. */
2527 FOR_EACH_BB (bb)
2528 if (bb->next_bb != EXIT_BLOCK_PTR)
2529 bb->aux = bb->next_bb;
2530 cfg_layout_finalize ();
2531 free_dominance_info (CDI_DOMINATORS);
2532 #endif /* INSN_SCHEDULING */
2533 return 0;
2534 }
2535
2536 struct tree_opt_pass pass_sms =
2537 {
2538 "sms", /* name */
2539 gate_handle_sms, /* gate */
2540 rest_of_handle_sms, /* execute */
2541 NULL, /* sub */
2542 NULL, /* next */
2543 0, /* static_pass_number */
2544 TV_SMS, /* tv_id */
2545 0, /* properties_required */
2546 0, /* properties_provided */
2547 0, /* properties_destroyed */
2548 0, /* todo_flags_start */
2549 TODO_dump_func |
2550 TODO_ggc_collect, /* todo_flags_finish */
2551 'm' /* letter */
2552 };
2553