improve debug of codegen
[gcc.git] / gcc / graphite-sese-to-poly.c
1 /* Conversion of SESE regions to Polyhedra.
2 Copyright (C) 2009-2015 Free Software Foundation, Inc.
3 Contributed by Sebastian Pop <sebastian.pop@amd.com>.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22
23 #ifdef HAVE_isl
24 /* Workaround for GMP 5.1.3 bug, see PR56019. */
25 #include <stddef.h>
26
27 #include <isl/constraint.h>
28 #include <isl/set.h>
29 #include <isl/map.h>
30 #include <isl/union_map.h>
31 #include <isl/constraint.h>
32 #include <isl/aff.h>
33 #include <isl/val.h>
34
35 /* Since ISL-0.13, the extern is in val_gmp.h. */
36 #if !defined(HAVE_ISL_SCHED_CONSTRAINTS_COMPUTE_SCHEDULE) && defined(__cplusplus)
37 extern "C" {
38 #endif
39 #include <isl/val_gmp.h>
40 #if !defined(HAVE_ISL_SCHED_CONSTRAINTS_COMPUTE_SCHEDULE) && defined(__cplusplus)
41 }
42 #endif
43
44 #include "system.h"
45 #include "coretypes.h"
46 #include "backend.h"
47 #include "cfghooks.h"
48 #include "tree.h"
49 #include "gimple.h"
50 #include "ssa.h"
51 #include "params.h"
52 #include "fold-const.h"
53 #include "gimple-iterator.h"
54 #include "gimplify.h"
55 #include "gimplify-me.h"
56 #include "tree-cfg.h"
57 #include "tree-ssa-loop-manip.h"
58 #include "tree-ssa-loop-niter.h"
59 #include "tree-ssa-loop.h"
60 #include "tree-into-ssa.h"
61 #include "tree-pass.h"
62 #include "cfgloop.h"
63 #include "tree-data-ref.h"
64 #include "tree-scalar-evolution.h"
65 #include "domwalk.h"
66 #include "graphite-poly.h"
67 #include "tree-ssa-propagate.h"
68 #include "graphite-sese-to-poly.h"
69
70 /* Assigns to RES the value of the INTEGER_CST T. */
71
72 static inline void
73 tree_int_to_gmp (tree t, mpz_t res)
74 {
75 wi::to_mpz (t, res, TYPE_SIGN (TREE_TYPE (t)));
76 }
77
78 /* Returns the index of the PHI argument defined in the outermost
79 loop. */
80
81 static size_t
82 phi_arg_in_outermost_loop (gphi *phi)
83 {
84 loop_p loop = gimple_bb (phi)->loop_father;
85 size_t i, res = 0;
86
87 for (i = 0; i < gimple_phi_num_args (phi); i++)
88 if (!flow_bb_inside_loop_p (loop, gimple_phi_arg_edge (phi, i)->src))
89 {
90 loop = gimple_phi_arg_edge (phi, i)->src->loop_father;
91 res = i;
92 }
93
94 return res;
95 }
96
97 /* Removes a simple copy phi node "RES = phi (INIT, RES)" at position
98 PSI by inserting on the loop ENTRY edge assignment "RES = INIT". */
99
100 static void
101 remove_simple_copy_phi (gphi_iterator *psi)
102 {
103 gphi *phi = psi->phi ();
104 tree res = gimple_phi_result (phi);
105 size_t entry = phi_arg_in_outermost_loop (phi);
106 tree init = gimple_phi_arg_def (phi, entry);
107 gassign *stmt = gimple_build_assign (res, init);
108 edge e = gimple_phi_arg_edge (phi, entry);
109
110 remove_phi_node (psi, false);
111 gsi_insert_on_edge_immediate (e, stmt);
112 }
113
114 /* Removes an invariant phi node at position PSI by inserting on the
115 loop ENTRY edge the assignment RES = INIT. */
116
117 static void
118 remove_invariant_phi (sese_l &region, gphi_iterator *psi)
119 {
120 gphi *phi = psi->phi ();
121 loop_p loop = loop_containing_stmt (phi);
122 tree res = gimple_phi_result (phi);
123 tree scev = scalar_evolution_in_region (region, loop, res);
124 size_t entry = phi_arg_in_outermost_loop (phi);
125 edge e = gimple_phi_arg_edge (phi, entry);
126 tree var;
127 gassign *stmt;
128 gimple_seq stmts = NULL;
129
130 if (tree_contains_chrecs (scev, NULL))
131 scev = gimple_phi_arg_def (phi, entry);
132
133 var = force_gimple_operand (scev, &stmts, true, NULL_TREE);
134 stmt = gimple_build_assign (res, var);
135 remove_phi_node (psi, false);
136
137 gimple_seq_add_stmt (&stmts, stmt);
138 gsi_insert_seq_on_edge (e, stmts);
139 gsi_commit_edge_inserts ();
140 SSA_NAME_DEF_STMT (res) = stmt;
141 }
142
143 /* Returns true when the phi node at PSI is of the form "a = phi (a, x)". */
144
145 static inline bool
146 simple_copy_phi_p (gphi *phi)
147 {
148 if (gimple_phi_num_args (phi) != 2)
149 return false;
150
151 tree res = gimple_phi_result (phi);
152 return (res == gimple_phi_arg_def (phi, 0)
153 || res == gimple_phi_arg_def (phi, 1));
154 }
155
156 /* Returns true when the phi node at position PSI is a reduction phi
157 node in REGION. Otherwise moves the pointer PSI to the next phi to
158 be considered. */
159
160 static bool
161 reduction_phi_p (sese_l &region, gphi_iterator *psi)
162 {
163 loop_p loop;
164 gphi *phi = psi->phi ();
165 tree res = gimple_phi_result (phi);
166
167 loop = loop_containing_stmt (phi);
168
169 if (simple_copy_phi_p (phi))
170 {
171 /* PRE introduces phi nodes like these, for an example,
172 see id-5.f in the fortran graphite testsuite:
173
174 # prephitmp.85_265 = PHI <prephitmp.85_258(33), prephitmp.85_265(18)>
175 */
176 remove_simple_copy_phi (psi);
177 return false;
178 }
179
180 if (scev_analyzable_p (res, region))
181 {
182 tree scev = scalar_evolution_in_region (region, loop, res);
183
184 if (evolution_function_is_invariant_p (scev, loop->num))
185 remove_invariant_phi (region, psi);
186 else
187 gsi_next (psi);
188
189 return false;
190 }
191
192 /* All the other cases are considered reductions. */
193 return true;
194 }
195
196 /* Return an ISL identifier for the polyhedral basic block PBB. */
197
198 static isl_id *
199 isl_id_for_pbb (scop_p s, poly_bb_p pbb)
200 {
201 char name[10];
202 snprintf (name, sizeof (name), "S_%d", pbb_index (pbb));
203 return isl_id_alloc (s->isl_context, name, pbb);
204 }
205
206 /* Converts the STATIC_SCHEDULE of PBB into a scattering polyhedron.
207 We generate SCATTERING_DIMENSIONS scattering dimensions.
208
209 The scattering polyhedron consists of these dimensions: scattering,
210 loop_iterators, parameters.
211
212 Example:
213
214 | scattering_dimensions = 5
215 | nb_iterators = 1
216 | scop_nb_params = 2
217 |
218 | Schedule:
219 | i
220 | 4 5
221 |
222 | Scattering polyhedron:
223 |
224 | scattering: {s1, s2, s3, s4, s5}
225 | loop_iterators: {i}
226 | parameters: {p1, p2}
227 |
228 | s1 s2 s3 s4 s5 i p1 p2 1
229 | 1 0 0 0 0 0 0 0 -4 = 0
230 | 0 1 0 0 0 -1 0 0 0 = 0
231 | 0 0 1 0 0 0 0 0 -5 = 0 */
232
233 static void
234 build_pbb_scattering_polyhedrons (isl_aff *static_sched,
235 poly_bb_p pbb)
236 {
237 isl_val *val;
238
239 int scattering_dimensions = isl_set_dim (pbb->domain, isl_dim_set) * 2 + 1;
240
241 isl_space *dc = isl_set_get_space (pbb->domain);
242 isl_space *dm = isl_space_add_dims (isl_space_from_domain (dc),
243 isl_dim_out, scattering_dimensions);
244 pbb->schedule = isl_map_universe (dm);
245
246 for (int i = 0; i < scattering_dimensions; i++)
247 {
248 /* Textual order inside this loop. */
249 if ((i % 2) == 0)
250 {
251 isl_constraint *c = isl_equality_alloc
252 (isl_local_space_from_space (isl_map_get_space (pbb->schedule)));
253
254 val = isl_aff_get_coefficient_val (static_sched, isl_dim_in, i / 2);
255 gcc_assert (val && isl_val_is_int (val));
256
257 val = isl_val_neg (val);
258 c = isl_constraint_set_constant_val (c, val);
259 c = isl_constraint_set_coefficient_si (c, isl_dim_out, i, 1);
260 pbb->schedule = isl_map_add_constraint (pbb->schedule, c);
261 }
262
263 /* Iterations of this loop. */
264 else /* if ((i % 2) == 1) */
265 {
266 int loop = (i - 1) / 2;
267 pbb->schedule = isl_map_equate (pbb->schedule, isl_dim_in, loop,
268 isl_dim_out, i);
269 }
270 }
271
272 pbb->transformed = isl_map_copy (pbb->schedule);
273 }
274
275 /* Build for BB the static schedule.
276
277 The static schedule is a Dewey numbering of the abstract syntax
278 tree: http://en.wikipedia.org/wiki/Dewey_Decimal_Classification
279
280 The following example informally defines the static schedule:
281
282 A
283 for (i: ...)
284 {
285 for (j: ...)
286 {
287 B
288 C
289 }
290
291 for (k: ...)
292 {
293 D
294 E
295 }
296 }
297 F
298
299 Static schedules for A to F:
300
301 DEPTH
302 0 1 2
303 A 0
304 B 1 0 0
305 C 1 0 1
306 D 1 1 0
307 E 1 1 1
308 F 2
309 */
310
311 static void
312 build_scop_scattering (scop_p scop)
313 {
314 gimple_poly_bb_p previous_gbb = NULL;
315 isl_space *dc = isl_set_get_space (scop->param_context);
316 isl_aff *static_sched;
317
318 dc = isl_space_add_dims (dc, isl_dim_set, number_of_loops (cfun));
319 static_sched = isl_aff_zero_on_domain (isl_local_space_from_space (dc));
320
321 /* We have to start schedules at 0 on the first component and
322 because we cannot compare_prefix_loops against a previous loop,
323 prefix will be equal to zero, and that index will be
324 incremented before copying. */
325 static_sched = isl_aff_add_coefficient_si (static_sched, isl_dim_in, 0, -1);
326
327 int i;
328 poly_bb_p pbb;
329 FOR_EACH_VEC_ELT (scop->pbbs, i, pbb)
330 {
331 gimple_poly_bb_p gbb = PBB_BLACK_BOX (pbb);
332 int prefix = 0;
333
334 if (previous_gbb)
335 prefix = nb_common_loops (scop->scop_info->region, previous_gbb, gbb);
336
337 previous_gbb = gbb;
338
339 static_sched = isl_aff_add_coefficient_si (static_sched, isl_dim_in,
340 prefix, 1);
341 build_pbb_scattering_polyhedrons (static_sched, pbb);
342 }
343
344 isl_aff_free (static_sched);
345 }
346
347 static isl_pw_aff *extract_affine (scop_p, tree, __isl_take isl_space *space);
348
349 /* Extract an affine expression from the chain of recurrence E. */
350
351 static isl_pw_aff *
352 extract_affine_chrec (scop_p s, tree e, __isl_take isl_space *space)
353 {
354 isl_pw_aff *lhs = extract_affine (s, CHREC_LEFT (e), isl_space_copy (space));
355 isl_pw_aff *rhs = extract_affine (s, CHREC_RIGHT (e), isl_space_copy (space));
356 isl_local_space *ls = isl_local_space_from_space (space);
357 unsigned pos = sese_loop_depth (s->scop_info->region, get_chrec_loop (e)) - 1;
358 isl_aff *loop = isl_aff_set_coefficient_si
359 (isl_aff_zero_on_domain (ls), isl_dim_in, pos, 1);
360 isl_pw_aff *l = isl_pw_aff_from_aff (loop);
361
362 /* Before multiplying, make sure that the result is affine. */
363 gcc_assert (isl_pw_aff_is_cst (rhs)
364 || isl_pw_aff_is_cst (l));
365
366 return isl_pw_aff_add (lhs, isl_pw_aff_mul (rhs, l));
367 }
368
369 /* Extract an affine expression from the mult_expr E. */
370
371 static isl_pw_aff *
372 extract_affine_mul (scop_p s, tree e, __isl_take isl_space *space)
373 {
374 isl_pw_aff *lhs = extract_affine (s, TREE_OPERAND (e, 0),
375 isl_space_copy (space));
376 isl_pw_aff *rhs = extract_affine (s, TREE_OPERAND (e, 1), space);
377
378 if (!isl_pw_aff_is_cst (lhs)
379 && !isl_pw_aff_is_cst (rhs))
380 {
381 isl_pw_aff_free (lhs);
382 isl_pw_aff_free (rhs);
383 return NULL;
384 }
385
386 return isl_pw_aff_mul (lhs, rhs);
387 }
388
389 /* Return an ISL identifier from the name of the ssa_name E. */
390
391 static isl_id *
392 isl_id_for_ssa_name (scop_p s, tree e)
393 {
394 const char *name = get_name (e);
395 isl_id *id;
396
397 if (name)
398 id = isl_id_alloc (s->isl_context, name, e);
399 else
400 {
401 char name1[10];
402 snprintf (name1, sizeof (name1), "P_%d", SSA_NAME_VERSION (e));
403 id = isl_id_alloc (s->isl_context, name1, e);
404 }
405
406 return id;
407 }
408
409 /* Return an ISL identifier for the data reference DR. */
410
411 static isl_id *
412 isl_id_for_dr (scop_p s, data_reference_p dr ATTRIBUTE_UNUSED)
413 {
414 /* Data references all get the same isl_id. They need to be comparable
415 and are distinguished through the first dimension, which contains the
416 alias set number. */
417 return isl_id_alloc (s->isl_context, "", 0);
418 }
419
420 /* Extract an affine expression from the ssa_name E. */
421
422 static isl_pw_aff *
423 extract_affine_name (scop_p s, tree e, __isl_take isl_space *space)
424 {
425 isl_id *id = isl_id_for_ssa_name (s, e);
426 int dimension = isl_space_find_dim_by_id (space, isl_dim_param, id);
427 isl_id_free (id);
428 isl_set *dom = isl_set_universe (isl_space_copy (space));
429 isl_aff *aff = isl_aff_zero_on_domain (isl_local_space_from_space (space));
430 aff = isl_aff_add_coefficient_si (aff, isl_dim_param, dimension, 1);
431 return isl_pw_aff_alloc (dom, aff);
432 }
433
434 /* Extract an affine expression from the gmp constant G. */
435
436 static isl_pw_aff *
437 extract_affine_gmp (mpz_t g, __isl_take isl_space *space)
438 {
439 isl_local_space *ls = isl_local_space_from_space (isl_space_copy (space));
440 isl_aff *aff = isl_aff_zero_on_domain (ls);
441 isl_set *dom = isl_set_universe (space);
442 isl_ctx *ct = isl_aff_get_ctx (aff);
443 isl_val *v = isl_val_int_from_gmp (ct, g);
444 aff = isl_aff_add_constant_val (aff, v);
445
446 return isl_pw_aff_alloc (dom, aff);
447 }
448
449 /* Extract an affine expression from the integer_cst E. */
450
451 static isl_pw_aff *
452 extract_affine_int (tree e, __isl_take isl_space *space)
453 {
454 mpz_t g;
455
456 mpz_init (g);
457 tree_int_to_gmp (e, g);
458 isl_pw_aff *res = extract_affine_gmp (g, space);
459 mpz_clear (g);
460
461 return res;
462 }
463
464 /* Compute pwaff mod 2^width. */
465
466 static isl_pw_aff *
467 wrap (isl_pw_aff *pwaff, unsigned width)
468 {
469 isl_val *mod;
470
471 mod = isl_val_int_from_ui (isl_pw_aff_get_ctx (pwaff), width);
472 mod = isl_val_2exp (mod);
473 pwaff = isl_pw_aff_mod_val (pwaff, mod);
474
475 return pwaff;
476 }
477
478 /* When parameter NAME is in REGION, returns its index in SESE_PARAMS.
479 Otherwise returns -1. */
480
481 static inline int
482 parameter_index_in_region_1 (tree name, sese_info_p region)
483 {
484 int i;
485 tree p;
486
487 gcc_assert (TREE_CODE (name) == SSA_NAME);
488
489 FOR_EACH_VEC_ELT (SESE_PARAMS (region), i, p)
490 if (p == name)
491 return i;
492
493 return -1;
494 }
495
496 /* Extract an affine expression from the tree E in the scop S. */
497
498 static isl_pw_aff *
499 extract_affine (scop_p s, tree e, __isl_take isl_space *space)
500 {
501 isl_pw_aff *lhs, *rhs, *res;
502
503 if (e == chrec_dont_know) {
504 isl_space_free (space);
505 return NULL;
506 }
507
508 switch (TREE_CODE (e))
509 {
510 case POLYNOMIAL_CHREC:
511 res = extract_affine_chrec (s, e, space);
512 break;
513
514 case MULT_EXPR:
515 res = extract_affine_mul (s, e, space);
516 break;
517
518 case PLUS_EXPR:
519 case POINTER_PLUS_EXPR:
520 lhs = extract_affine (s, TREE_OPERAND (e, 0), isl_space_copy (space));
521 rhs = extract_affine (s, TREE_OPERAND (e, 1), space);
522 res = isl_pw_aff_add (lhs, rhs);
523 break;
524
525 case MINUS_EXPR:
526 lhs = extract_affine (s, TREE_OPERAND (e, 0), isl_space_copy (space));
527 rhs = extract_affine (s, TREE_OPERAND (e, 1), space);
528 res = isl_pw_aff_sub (lhs, rhs);
529 break;
530
531 case NEGATE_EXPR:
532 case BIT_NOT_EXPR:
533 lhs = extract_affine (s, TREE_OPERAND (e, 0), isl_space_copy (space));
534 rhs = extract_affine (s, integer_minus_one_node, space);
535 res = isl_pw_aff_mul (lhs, rhs);
536 break;
537
538 case SSA_NAME:
539 gcc_assert (-1 != parameter_index_in_region_1 (e, s->scop_info)
540 || !invariant_in_sese_p_rec (e, s->scop_info->region, NULL));
541 res = extract_affine_name (s, e, space);
542 break;
543
544 case INTEGER_CST:
545 res = extract_affine_int (e, space);
546 /* No need to wrap a single integer. */
547 return res;
548
549 CASE_CONVERT:
550 case NON_LVALUE_EXPR:
551 res = extract_affine (s, TREE_OPERAND (e, 0), space);
552 break;
553
554 default:
555 gcc_unreachable ();
556 break;
557 }
558
559 tree type = TREE_TYPE (e);
560 if (TYPE_UNSIGNED (type))
561 res = wrap (res, TYPE_PRECISION (type));
562
563 return res;
564 }
565
566 /* Assign dimension for each parameter in SCOP. */
567
568 static void
569 set_scop_parameter_dim (scop_p scop)
570 {
571 sese_info_p region = scop->scop_info;
572 unsigned nbp = sese_nb_params (region);
573 isl_space *space = isl_space_set_alloc (scop->isl_context, nbp, 0);
574
575 unsigned i;
576 tree e;
577 FOR_EACH_VEC_ELT (SESE_PARAMS (region), i, e)
578 space = isl_space_set_dim_id (space, isl_dim_param, i,
579 isl_id_for_ssa_name (scop, e));
580
581 scop->param_context = isl_set_universe (space);
582 }
583
584 /* Builds the constraint polyhedra for LOOP in SCOP. OUTER_PH gives
585 the constraints for the surrounding loops. */
586
587 static void
588 build_loop_iteration_domains (scop_p scop, struct loop *loop,
589 int nb,
590 isl_set *outer, isl_set **doms)
591 {
592
593 tree nb_iters = number_of_latch_executions (loop);
594 sese_l region = scop->scop_info->region;
595 gcc_assert (loop_in_sese_p (loop, region));
596
597 isl_set *inner = isl_set_copy (outer);
598 int pos = isl_set_dim (outer, isl_dim_set);
599 isl_val *v;
600 mpz_t g;
601
602 mpz_init (g);
603
604 inner = isl_set_add_dims (inner, isl_dim_set, 1);
605 isl_space *space = isl_set_get_space (inner);
606
607 /* 0 <= loop_i */
608 isl_constraint *c = isl_inequality_alloc
609 (isl_local_space_from_space (isl_space_copy (space)));
610 c = isl_constraint_set_coefficient_si (c, isl_dim_set, pos, 1);
611 inner = isl_set_add_constraint (inner, c);
612
613 /* loop_i <= cst_nb_iters */
614 if (TREE_CODE (nb_iters) == INTEGER_CST)
615 {
616 c = isl_inequality_alloc
617 (isl_local_space_from_space (isl_space_copy (space)));
618 c = isl_constraint_set_coefficient_si (c, isl_dim_set, pos, -1);
619 tree_int_to_gmp (nb_iters, g);
620 v = isl_val_int_from_gmp (scop->isl_context, g);
621 c = isl_constraint_set_constant_val (c, v);
622 inner = isl_set_add_constraint (inner, c);
623 }
624
625 /* loop_i <= expr_nb_iters */
626 else if (!chrec_contains_undetermined (nb_iters))
627 {
628 isl_pw_aff *aff;
629
630 nb_iters = scalar_evolution_in_region (region, loop, nb_iters);
631
632 aff = extract_affine (scop, nb_iters, isl_set_get_space (inner));
633 isl_set *valid = isl_pw_aff_nonneg_set (isl_pw_aff_copy (aff));
634 valid = isl_set_project_out (valid, isl_dim_set, 0,
635 isl_set_dim (valid, isl_dim_set));
636 scop->param_context = isl_set_intersect (scop->param_context, valid);
637
638 isl_local_space *ls = isl_local_space_from_space (isl_space_copy (space));
639 isl_aff *al = isl_aff_set_coefficient_si (isl_aff_zero_on_domain (ls),
640 isl_dim_in, pos, 1);
641 isl_set *le = isl_pw_aff_le_set (isl_pw_aff_from_aff (al),
642 isl_pw_aff_copy (aff));
643 inner = isl_set_intersect (inner, le);
644
645 widest_int nit;
646 if (max_stmt_executions (loop, &nit))
647 {
648 /* Insert in the context the constraints from the
649 estimation of the number of iterations NIT and the
650 symbolic number of iterations (involving parameter
651 names) NB_ITERS. First, build the affine expression
652 "NIT - NB_ITERS" and then say that it is positive,
653 i.e., NIT approximates NB_ITERS: "NIT >= NB_ITERS". */
654 mpz_t g;
655 mpz_init (g);
656 wi::to_mpz (nit, g, SIGNED);
657 mpz_sub_ui (g, g, 1);
658
659 isl_pw_aff *approx
660 = extract_affine_gmp (g, isl_set_get_space (inner));
661 isl_set *x = isl_pw_aff_ge_set (approx, aff);
662 x = isl_set_project_out (x, isl_dim_set, 0,
663 isl_set_dim (x, isl_dim_set));
664 scop->param_context = isl_set_intersect (scop->param_context, x);
665
666 isl_constraint *c = isl_inequality_alloc
667 (isl_local_space_from_space (isl_space_copy (space)));
668 c = isl_constraint_set_coefficient_si (c, isl_dim_set, pos, -1);
669 v = isl_val_int_from_gmp (scop->isl_context, g);
670 mpz_clear (g);
671 c = isl_constraint_set_constant_val (c, v);
672 inner = isl_set_add_constraint (inner, c);
673 }
674 else
675 isl_pw_aff_free (aff);
676 }
677 else
678 gcc_unreachable ();
679
680 if (loop->inner)
681 build_loop_iteration_domains (scop, loop->inner, nb + 1,
682 isl_set_copy (inner), doms);
683
684 if (nb != 0
685 && loop->next
686 && loop_in_sese_p (loop->next, region))
687 build_loop_iteration_domains (scop, loop->next, nb,
688 isl_set_copy (outer), doms);
689
690 doms[loop->num] = inner;
691
692 isl_set_free (outer);
693 isl_space_free (space);
694 mpz_clear (g);
695 }
696
697 /* Returns a linear expression for tree T evaluated in PBB. */
698
699 static isl_pw_aff *
700 create_pw_aff_from_tree (poly_bb_p pbb, tree t)
701 {
702 scop_p scop = PBB_SCOP (pbb);
703
704 t = scalar_evolution_in_region (scop->scop_info->region, pbb_loop (pbb), t);
705 gcc_assert (!automatically_generated_chrec_p (t));
706
707 return extract_affine (scop, t, isl_set_get_space (pbb->domain));
708 }
709
710 /* Add conditional statement STMT to pbb. CODE is used as the comparison
711 operator. This allows us to invert the condition or to handle
712 inequalities. */
713
714 static void
715 add_condition_to_pbb (poly_bb_p pbb, gcond *stmt, enum tree_code code)
716 {
717 isl_pw_aff *lhs = create_pw_aff_from_tree (pbb, gimple_cond_lhs (stmt));
718 isl_pw_aff *rhs = create_pw_aff_from_tree (pbb, gimple_cond_rhs (stmt));
719 isl_set *cond;
720
721 switch (code)
722 {
723 case LT_EXPR:
724 cond = isl_pw_aff_lt_set (lhs, rhs);
725 break;
726
727 case GT_EXPR:
728 cond = isl_pw_aff_gt_set (lhs, rhs);
729 break;
730
731 case LE_EXPR:
732 cond = isl_pw_aff_le_set (lhs, rhs);
733 break;
734
735 case GE_EXPR:
736 cond = isl_pw_aff_ge_set (lhs, rhs);
737 break;
738
739 case EQ_EXPR:
740 cond = isl_pw_aff_eq_set (lhs, rhs);
741 break;
742
743 case NE_EXPR:
744 cond = isl_pw_aff_ne_set (lhs, rhs);
745 break;
746
747 default:
748 isl_pw_aff_free (lhs);
749 isl_pw_aff_free (rhs);
750 return;
751 }
752
753 cond = isl_set_coalesce (cond);
754 cond = isl_set_set_tuple_id (cond, isl_set_get_tuple_id (pbb->domain));
755 pbb->domain = isl_set_intersect (pbb->domain, cond);
756 }
757
758 /* Add conditions to the domain of PBB. */
759
760 static void
761 add_conditions_to_domain (poly_bb_p pbb)
762 {
763 unsigned int i;
764 gimple *stmt;
765 gimple_poly_bb_p gbb = PBB_BLACK_BOX (pbb);
766
767 if (GBB_CONDITIONS (gbb).is_empty ())
768 return;
769
770 FOR_EACH_VEC_ELT (GBB_CONDITIONS (gbb), i, stmt)
771 switch (gimple_code (stmt))
772 {
773 case GIMPLE_COND:
774 {
775 /* Don't constrain on anything else than INTEGER_TYPE. */
776 if (TREE_CODE (TREE_TYPE (gimple_cond_lhs (stmt))) != INTEGER_TYPE)
777 break;
778
779 gcond *cond_stmt = as_a <gcond *> (stmt);
780 enum tree_code code = gimple_cond_code (cond_stmt);
781
782 /* The conditions for ELSE-branches are inverted. */
783 if (!GBB_CONDITION_CASES (gbb)[i])
784 code = invert_tree_comparison (code, false);
785
786 add_condition_to_pbb (pbb, cond_stmt, code);
787 break;
788 }
789
790 case GIMPLE_SWITCH:
791 /* Switch statements are not supported right now - fall through. */
792
793 default:
794 gcc_unreachable ();
795 break;
796 }
797 }
798
799 /* Traverses all the GBBs of the SCOP and add their constraints to the
800 iteration domains. */
801
802 static void
803 add_conditions_to_constraints (scop_p scop)
804 {
805 int i;
806 poly_bb_p pbb;
807
808 FOR_EACH_VEC_ELT (scop->pbbs, i, pbb)
809 add_conditions_to_domain (pbb);
810 }
811
812 /* Add constraints on the possible values of parameter P from the type
813 of P. */
814
815 static void
816 add_param_constraints (scop_p scop, graphite_dim_t p)
817 {
818 tree parameter = SESE_PARAMS (scop->scop_info)[p];
819 tree type = TREE_TYPE (parameter);
820 tree lb = NULL_TREE;
821 tree ub = NULL_TREE;
822
823 if (POINTER_TYPE_P (type) || !TYPE_MIN_VALUE (type))
824 lb = lower_bound_in_type (type, type);
825 else
826 lb = TYPE_MIN_VALUE (type);
827
828 if (POINTER_TYPE_P (type) || !TYPE_MAX_VALUE (type))
829 ub = upper_bound_in_type (type, type);
830 else
831 ub = TYPE_MAX_VALUE (type);
832
833 if (lb)
834 {
835 isl_space *space = isl_set_get_space (scop->param_context);
836 isl_constraint *c;
837 mpz_t g;
838 isl_val *v;
839
840 c = isl_inequality_alloc (isl_local_space_from_space (space));
841 mpz_init (g);
842 tree_int_to_gmp (lb, g);
843 v = isl_val_int_from_gmp (scop->isl_context, g);
844 v = isl_val_neg (v);
845 mpz_clear (g);
846 c = isl_constraint_set_constant_val (c, v);
847 c = isl_constraint_set_coefficient_si (c, isl_dim_param, p, 1);
848
849 scop->param_context = isl_set_add_constraint (scop->param_context, c);
850 }
851
852 if (ub)
853 {
854 isl_space *space = isl_set_get_space (scop->param_context);
855 isl_constraint *c;
856 mpz_t g;
857 isl_val *v;
858
859 c = isl_inequality_alloc (isl_local_space_from_space (space));
860
861 mpz_init (g);
862 tree_int_to_gmp (ub, g);
863 v = isl_val_int_from_gmp (scop->isl_context, g);
864 mpz_clear (g);
865 c = isl_constraint_set_constant_val (c, v);
866 c = isl_constraint_set_coefficient_si (c, isl_dim_param, p, -1);
867
868 scop->param_context = isl_set_add_constraint (scop->param_context, c);
869 }
870 }
871
872 /* Build the context of the SCOP. The context usually contains extra
873 constraints that are added to the iteration domains that constrain
874 some parameters. */
875
876 static void
877 build_scop_context (scop_p scop)
878 {
879 graphite_dim_t p, n = scop_nb_params (scop);
880
881 for (p = 0; p < n; p++)
882 add_param_constraints (scop, p);
883 }
884
885 /* Build the iteration domains: the loops belonging to the current
886 SCOP, and that vary for the execution of the current basic block.
887 Returns false if there is no loop in SCOP. */
888
889 static void
890 build_scop_iteration_domain (scop_p scop)
891 {
892 sese_info_p region = scop->scop_info;
893 int nb_loops = number_of_loops (cfun);
894 isl_set **doms = XCNEWVEC (isl_set *, nb_loops);
895
896 int i;
897 struct loop *loop;
898 FOR_EACH_VEC_ELT (SESE_LOOP_NEST (region), i, loop)
899 if (!loop_in_sese_p (loop_outer (loop), region->region))
900 build_loop_iteration_domains (scop, loop, 0,
901 isl_set_copy (scop->param_context), doms);
902
903 poly_bb_p pbb;
904 FOR_EACH_VEC_ELT (scop->pbbs, i, pbb)
905 {
906 loop = pbb_loop (pbb);
907
908 if (doms[loop->num])
909 pbb->domain = isl_set_copy (doms[loop->num]);
910 else
911 pbb->domain = isl_set_copy (scop->param_context);
912
913 pbb->domain = isl_set_set_tuple_id (pbb->domain,
914 isl_id_for_pbb (scop, pbb));
915 }
916
917 for (int i = 0; i < nb_loops; i++)
918 if (doms[i])
919 isl_set_free (doms[i]);
920
921 free (doms);
922 }
923
924 /* Add a constrain to the ACCESSES polyhedron for the alias set of
925 data reference DR. ACCESSP_NB_DIMS is the dimension of the
926 ACCESSES polyhedron, DOM_NB_DIMS is the dimension of the iteration
927 domain. */
928
929 static isl_map *
930 pdr_add_alias_set (isl_map *acc, dr_info &dri)
931 {
932 isl_constraint *c = isl_equality_alloc
933 (isl_local_space_from_space (isl_map_get_space (acc)));
934 c = isl_constraint_set_constant_si (c, -dri.alias_set);
935 c = isl_constraint_set_coefficient_si (c, isl_dim_out, 0, 1);
936
937 return isl_map_add_constraint (acc, c);
938 }
939
940 /* Assign the affine expression INDEX to the output dimension POS of
941 MAP and return the result. */
942
943 static isl_map *
944 set_index (isl_map *map, int pos, isl_pw_aff *index)
945 {
946 isl_map *index_map;
947 int len = isl_map_dim (map, isl_dim_out);
948 isl_id *id;
949
950 index_map = isl_map_from_pw_aff (index);
951 index_map = isl_map_insert_dims (index_map, isl_dim_out, 0, pos);
952 index_map = isl_map_add_dims (index_map, isl_dim_out, len - pos - 1);
953
954 id = isl_map_get_tuple_id (map, isl_dim_out);
955 index_map = isl_map_set_tuple_id (index_map, isl_dim_out, id);
956 id = isl_map_get_tuple_id (map, isl_dim_in);
957 index_map = isl_map_set_tuple_id (index_map, isl_dim_in, id);
958
959 return isl_map_intersect (map, index_map);
960 }
961
962 /* Add to ACCESSES polyhedron equalities defining the access functions
963 to the memory. ACCESSP_NB_DIMS is the dimension of the ACCESSES
964 polyhedron, DOM_NB_DIMS is the dimension of the iteration domain.
965 PBB is the poly_bb_p that contains the data reference DR. */
966
967 static isl_map *
968 pdr_add_memory_accesses (isl_map *acc, dr_info &dri)
969 {
970 data_reference_p dr = dri.dr;
971 poly_bb_p pbb = dri.pbb;
972 int i, nb_subscripts = DR_NUM_DIMENSIONS (dr);
973 scop_p scop = PBB_SCOP (pbb);
974
975 for (i = 0; i < nb_subscripts; i++)
976 {
977 isl_pw_aff *aff;
978 tree afn = DR_ACCESS_FN (dr, nb_subscripts - 1 - i);
979
980 aff = extract_affine (scop, afn,
981 isl_space_domain (isl_map_get_space (acc)));
982 acc = set_index (acc, i + 1, aff);
983 }
984
985 return acc;
986 }
987
988 /* Add constrains representing the size of the accessed data to the
989 ACCESSES polyhedron. ACCESSP_NB_DIMS is the dimension of the
990 ACCESSES polyhedron, DOM_NB_DIMS is the dimension of the iteration
991 domain. */
992
993 static isl_set *
994 pdr_add_data_dimensions (isl_set *subscript_sizes, scop_p scop,
995 data_reference_p dr)
996 {
997 tree ref = DR_REF (dr);
998
999 int nb_subscripts = DR_NUM_DIMENSIONS (dr);
1000 for (int i = nb_subscripts - 1; i >= 0; i--, ref = TREE_OPERAND (ref, 0))
1001 {
1002 if (TREE_CODE (ref) != ARRAY_REF)
1003 return subscript_sizes;
1004
1005 tree low = array_ref_low_bound (ref);
1006 tree high = array_ref_up_bound (ref);
1007
1008 /* XXX The PPL code dealt separately with
1009 subscript - low >= 0 and high - subscript >= 0 in case one of
1010 the two bounds isn't known. Do the same here? */
1011
1012 if (tree_fits_shwi_p (low)
1013 && high
1014 && tree_fits_shwi_p (high)
1015 /* 1-element arrays at end of structures may extend over
1016 their declared size. */
1017 && !(array_at_struct_end_p (ref)
1018 && operand_equal_p (low, high, 0)))
1019 {
1020 isl_id *id;
1021 isl_aff *aff;
1022 isl_set *univ, *lbs, *ubs;
1023 isl_pw_aff *index;
1024 isl_set *valid;
1025 isl_space *space = isl_set_get_space (subscript_sizes);
1026 isl_pw_aff *lb = extract_affine_int (low, isl_space_copy (space));
1027 isl_pw_aff *ub = extract_affine_int (high, isl_space_copy (space));
1028
1029 /* high >= 0 */
1030 valid = isl_pw_aff_nonneg_set (isl_pw_aff_copy (ub));
1031 valid = isl_set_project_out (valid, isl_dim_set, 0,
1032 isl_set_dim (valid, isl_dim_set));
1033 scop->param_context = isl_set_intersect (scop->param_context, valid);
1034
1035 aff = isl_aff_zero_on_domain (isl_local_space_from_space (space));
1036 aff = isl_aff_add_coefficient_si (aff, isl_dim_in, i + 1, 1);
1037 univ = isl_set_universe (isl_space_domain (isl_aff_get_space (aff)));
1038 index = isl_pw_aff_alloc (univ, aff);
1039
1040 id = isl_set_get_tuple_id (subscript_sizes);
1041 lb = isl_pw_aff_set_tuple_id (lb, isl_dim_in, isl_id_copy (id));
1042 ub = isl_pw_aff_set_tuple_id (ub, isl_dim_in, id);
1043
1044 /* low <= sub_i <= high */
1045 lbs = isl_pw_aff_ge_set (isl_pw_aff_copy (index), lb);
1046 ubs = isl_pw_aff_le_set (index, ub);
1047 subscript_sizes = isl_set_intersect (subscript_sizes, lbs);
1048 subscript_sizes = isl_set_intersect (subscript_sizes, ubs);
1049 }
1050 }
1051
1052 return subscript_sizes;
1053 }
1054
1055 /* Build data accesses for DR in PBB. */
1056
1057 static void
1058 build_poly_dr (dr_info &dri)
1059 {
1060 isl_map *acc;
1061 isl_set *subscript_sizes;
1062 poly_bb_p pbb = dri.pbb;
1063 data_reference_p dr = dri.dr;
1064 scop_p scop = PBB_SCOP (pbb);
1065
1066 {
1067 isl_space *dc = isl_set_get_space (pbb->domain);
1068 int nb_out = 1 + DR_NUM_DIMENSIONS (dr);
1069 isl_space *space = isl_space_add_dims (isl_space_from_domain (dc),
1070 isl_dim_out, nb_out);
1071
1072 acc = isl_map_universe (space);
1073 acc = isl_map_set_tuple_id (acc, isl_dim_out, isl_id_for_dr (scop, dr));
1074 }
1075
1076 acc = pdr_add_alias_set (acc, dri);
1077 acc = pdr_add_memory_accesses (acc, dri);
1078
1079 {
1080 isl_id *id = isl_id_for_dr (scop, dr);
1081 int nb = 1 + DR_NUM_DIMENSIONS (dr);
1082 isl_space *space = isl_space_set_alloc (scop->isl_context, 0, nb);
1083
1084 space = isl_space_set_tuple_id (space, isl_dim_set, id);
1085 subscript_sizes = isl_set_nat_universe (space);
1086 subscript_sizes = isl_set_fix_si (subscript_sizes, isl_dim_set, 0,
1087 dri.alias_set);
1088 subscript_sizes = pdr_add_data_dimensions (subscript_sizes, scop, dr);
1089 }
1090
1091 new_poly_dr (pbb,
1092 DR_IS_READ (dr) ? PDR_READ : PDR_WRITE,
1093 dr, DR_NUM_DIMENSIONS (dr), acc, subscript_sizes);
1094 }
1095
1096 /* Compute alias-sets for all data references in DRS. */
1097
1098 static void
1099 build_alias_set (scop_p scop)
1100 {
1101 int num_vertices = scop->drs.length ();
1102 struct graph *g = new_graph (num_vertices);
1103 dr_info *dr1, *dr2;
1104 int i, j;
1105 int *all_vertices;
1106
1107 FOR_EACH_VEC_ELT (scop->drs, i, dr1)
1108 for (j = i+1; scop->drs.iterate (j, &dr2); j++)
1109 if (dr_may_alias_p (dr1->dr, dr2->dr, true))
1110 {
1111 add_edge (g, i, j);
1112 add_edge (g, j, i);
1113 }
1114
1115 all_vertices = XNEWVEC (int, num_vertices);
1116 for (i = 0; i < num_vertices; i++)
1117 all_vertices[i] = i;
1118
1119 graphds_dfs (g, all_vertices, num_vertices, NULL, true, NULL);
1120 free (all_vertices);
1121
1122 for (i = 0; i < g->n_vertices; i++)
1123 scop->drs[i].alias_set = g->vertices[i].component + 1;
1124
1125 free_graph (g);
1126 }
1127
1128 /* Build data references in SCOP. */
1129
1130 static void
1131 build_scop_drs (scop_p scop)
1132 {
1133 int i, j;
1134 poly_bb_p pbb;
1135
1136 /* Remove all the PBBs that do not have data references: these basic
1137 blocks are not handled in the polyhedral representation. */
1138 for (i = 0; scop->pbbs.iterate (i, &pbb); i++)
1139 if (GBB_DATA_REFS (PBB_BLACK_BOX (pbb)).is_empty ())
1140 {
1141 free_gimple_poly_bb (PBB_BLACK_BOX (pbb));
1142 free_poly_bb (pbb);
1143 scop->pbbs.ordered_remove (i);
1144 i--;
1145 }
1146
1147 data_reference_p dr;
1148 FOR_EACH_VEC_ELT (scop->pbbs, i, pbb)
1149 if (pbb)
1150 FOR_EACH_VEC_ELT (GBB_DATA_REFS (PBB_BLACK_BOX (pbb)), j, dr)
1151 scop->drs.safe_push (dr_info (dr, pbb));
1152
1153 build_alias_set (scop);
1154
1155 dr_info *dri;
1156 FOR_EACH_VEC_ELT (scop->drs, i, dri)
1157 build_poly_dr (*dri);
1158 }
1159
1160 /* Analyze all the data references of STMTS and add them to the
1161 GBB_DATA_REFS vector of BB. */
1162
1163 static void
1164 analyze_drs_in_stmts (scop_p scop, basic_block bb, vec<gimple *> stmts)
1165 {
1166 sese_l region = scop->scop_info->region;
1167 if (!bb_in_sese_p (bb, region))
1168 return;
1169
1170 loop_p nest = outermost_loop_in_sese (region, bb);
1171 loop_p loop = bb->loop_father;
1172 if (!loop_in_sese_p (loop, region))
1173 loop = nest;
1174
1175 gimple_poly_bb_p gbb = gbb_from_bb (bb);
1176
1177 gimple *stmt;
1178 int i;
1179 FOR_EACH_VEC_ELT (stmts, i, stmt)
1180 {
1181 if (is_gimple_debug (stmt))
1182 continue;
1183
1184 graphite_find_data_references_in_stmt (nest, loop, stmt,
1185 &GBB_DATA_REFS (gbb));
1186 }
1187 }
1188
1189 /* Insert STMT at the end of the STMTS sequence and then insert the
1190 statements from STMTS at INSERT_GSI and call analyze_drs_in_stmts
1191 on STMTS. */
1192
1193 static void
1194 insert_stmts (scop_p scop, gimple *stmt, gimple_seq stmts,
1195 gimple_stmt_iterator insert_gsi)
1196 {
1197 gimple_stmt_iterator gsi;
1198 auto_vec<gimple *, 3> x;
1199
1200 gimple_seq_add_stmt (&stmts, stmt);
1201 for (gsi = gsi_start (stmts); !gsi_end_p (gsi); gsi_next (&gsi))
1202 x.safe_push (gsi_stmt (gsi));
1203
1204 gsi_insert_seq_before (&insert_gsi, stmts, GSI_SAME_STMT);
1205 analyze_drs_in_stmts (scop, gsi_bb (insert_gsi), x);
1206 }
1207
1208 /* Insert the assignment "RES := EXPR" just after AFTER_STMT. */
1209
1210 static void
1211 insert_out_of_ssa_copy (scop_p scop, tree res, tree expr, gimple *after_stmt)
1212 {
1213 gimple_stmt_iterator gsi;
1214 auto_vec<gimple *, 3> x;
1215 gimple_seq stmts;
1216 tree var = force_gimple_operand (expr, &stmts, true, NULL_TREE);
1217 gassign *stmt = gimple_build_assign (unshare_expr (res), var);
1218
1219 gimple_seq_add_stmt (&stmts, stmt);
1220
1221 for (gsi = gsi_start (stmts); !gsi_end_p (gsi); gsi_next (&gsi))
1222 x.safe_push (gsi_stmt (gsi));
1223
1224 if (gimple_code (after_stmt) == GIMPLE_PHI)
1225 {
1226 gsi = gsi_after_labels (gimple_bb (after_stmt));
1227 gsi_insert_seq_before (&gsi, stmts, GSI_NEW_STMT);
1228 }
1229 else
1230 {
1231 gsi = gsi_for_stmt (after_stmt);
1232 gsi_insert_seq_after (&gsi, stmts, GSI_NEW_STMT);
1233 }
1234
1235 analyze_drs_in_stmts (scop, gimple_bb (after_stmt), x);
1236 }
1237
1238 /* Creates a poly_bb_p for basic_block BB from the existing PBB. */
1239
1240 static void
1241 new_pbb_from_pbb (scop_p scop, poly_bb_p pbb, basic_block bb)
1242 {
1243 vec<data_reference_p> drs;
1244 drs.create (3);
1245 gimple_poly_bb_p gbb = PBB_BLACK_BOX (pbb);
1246 gimple_poly_bb_p gbb1 = new_gimple_poly_bb (bb, drs);
1247 poly_bb_p pbb1 = new_poly_bb (scop, gbb1);
1248 int index, n = scop->pbbs.length ();
1249
1250 for (index = 0; index < n; index++)
1251 if (scop->pbbs[index] == pbb)
1252 break;
1253
1254 pbb1->domain = isl_set_copy (pbb->domain);
1255 pbb1->domain = isl_set_set_tuple_id (pbb1->domain,
1256 isl_id_for_pbb (scop, pbb1));
1257
1258 GBB_PBB (gbb1) = pbb1;
1259 GBB_CONDITIONS (gbb1) = GBB_CONDITIONS (gbb).copy ();
1260 GBB_CONDITION_CASES (gbb1) = GBB_CONDITION_CASES (gbb).copy ();
1261 scop->pbbs.safe_insert (index + 1, pbb1);
1262 }
1263
1264 /* Insert on edge E the assignment "RES := EXPR". */
1265
1266 static void
1267 insert_out_of_ssa_copy_on_edge (scop_p scop, edge e, tree res, tree expr)
1268 {
1269 gimple_seq stmts = NULL;
1270 tree var = force_gimple_operand (expr, &stmts, true, NULL_TREE);
1271 gimple *stmt = gimple_build_assign (unshare_expr (res), var);
1272 auto_vec<gimple *, 3> x;
1273
1274 gimple_seq_add_stmt (&stmts, stmt);
1275 gimple_stmt_iterator gsi;
1276 for (gsi = gsi_start (stmts); !gsi_end_p (gsi); gsi_next (&gsi))
1277 x.safe_push (gsi_stmt (gsi));
1278
1279 gsi_insert_seq_on_edge (e, stmts);
1280 gsi_commit_edge_inserts ();
1281 basic_block bb = gimple_bb (stmt);
1282
1283 if (!bb_in_sese_p (bb, scop->scop_info->region))
1284 return;
1285
1286 if (!gbb_from_bb (bb))
1287 new_pbb_from_pbb (scop, pbb_from_bb (e->src), bb);
1288
1289 analyze_drs_in_stmts (scop, bb, x);
1290 }
1291
1292 /* Creates a zero dimension array of the same type as VAR. */
1293
1294 static tree
1295 create_zero_dim_array (tree var, const char *base_name)
1296 {
1297 tree index_type = build_index_type (integer_zero_node);
1298 tree elt_type = TREE_TYPE (var);
1299 tree array_type = build_array_type (elt_type, index_type);
1300 tree base = create_tmp_var (array_type, base_name);
1301
1302 return build4 (ARRAY_REF, elt_type, base, integer_zero_node, NULL_TREE,
1303 NULL_TREE);
1304 }
1305
1306 /* Returns true when PHI is a loop close phi node. */
1307
1308 static bool
1309 scalar_close_phi_node_p (gimple *phi)
1310 {
1311 if (gimple_code (phi) != GIMPLE_PHI
1312 || virtual_operand_p (gimple_phi_result (phi)))
1313 return false;
1314
1315 /* Note that loop close phi nodes should have a single argument
1316 because we translated the representation into a canonical form
1317 before Graphite: see canonicalize_loop_closed_ssa_form. */
1318 return (gimple_phi_num_args (phi) == 1);
1319 }
1320
1321 /* For a definition DEF in REGION, propagates the expression EXPR in
1322 all the uses of DEF outside REGION. */
1323
1324 static void
1325 propagate_expr_outside_region (tree def, tree expr, sese_l &region)
1326 {
1327 gimple_seq stmts;
1328 bool replaced_once = false;
1329
1330 gcc_assert (TREE_CODE (def) == SSA_NAME);
1331
1332 expr = force_gimple_operand (unshare_expr (expr), &stmts, true,
1333 NULL_TREE);
1334
1335 imm_use_iterator imm_iter;
1336 gimple *use_stmt;
1337 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, def)
1338 if (!is_gimple_debug (use_stmt)
1339 && !bb_in_sese_p (gimple_bb (use_stmt), region))
1340 {
1341 ssa_op_iter iter;
1342 use_operand_p use_p;
1343
1344 FOR_EACH_PHI_OR_STMT_USE (use_p, use_stmt, iter, SSA_OP_ALL_USES)
1345 if (operand_equal_p (def, USE_FROM_PTR (use_p), 0)
1346 && (replaced_once = true))
1347 replace_exp (use_p, expr);
1348
1349 update_stmt (use_stmt);
1350 }
1351
1352 if (replaced_once)
1353 {
1354 gsi_insert_seq_on_edge (region.entry, stmts);
1355 gsi_commit_edge_inserts ();
1356 }
1357 }
1358
1359 /* Rewrite out of SSA the reduction phi node at PSI by creating a zero
1360 dimension array for it. */
1361
1362 static void
1363 rewrite_close_phi_out_of_ssa (scop_p scop, gimple_stmt_iterator *psi)
1364 {
1365 sese_l region = scop->scop_info->region;
1366 gimple *phi = gsi_stmt (*psi);
1367 tree res = gimple_phi_result (phi);
1368 basic_block bb = gimple_bb (phi);
1369 gimple_stmt_iterator gsi = gsi_after_labels (bb);
1370 tree arg = gimple_phi_arg_def (phi, 0);
1371 gimple *stmt;
1372
1373 /* Note that loop close phi nodes should have a single argument
1374 because we translated the representation into a canonical form
1375 before Graphite: see canonicalize_loop_closed_ssa_form. */
1376 gcc_assert (gimple_phi_num_args (phi) == 1);
1377
1378 /* The phi node can be a non close phi node, when its argument is
1379 invariant, or a default definition. */
1380 if (is_gimple_min_invariant (arg)
1381 || SSA_NAME_IS_DEFAULT_DEF (arg))
1382 {
1383 propagate_expr_outside_region (res, arg, region);
1384 gsi_next (psi);
1385 return;
1386 }
1387
1388 else if (gimple_bb (SSA_NAME_DEF_STMT (arg))->loop_father == bb->loop_father)
1389 {
1390 propagate_expr_outside_region (res, arg, region);
1391 stmt = gimple_build_assign (res, arg);
1392 remove_phi_node (psi, false);
1393 gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
1394 return;
1395 }
1396
1397 /* If res is scev analyzable and is not a scalar value, it is safe
1398 to ignore the close phi node: it will be code generated in the
1399 out of Graphite pass. */
1400 else if (scev_analyzable_p (res, region))
1401 {
1402 loop_p loop = loop_containing_stmt (SSA_NAME_DEF_STMT (res));
1403 tree scev;
1404
1405 if (!loop_in_sese_p (loop, region))
1406 {
1407 loop = loop_containing_stmt (SSA_NAME_DEF_STMT (arg));
1408 scev = scalar_evolution_in_region (region, loop, arg);
1409 scev = compute_overall_effect_of_inner_loop (loop, scev);
1410 }
1411 else
1412 scev = scalar_evolution_in_region (region, loop, res);
1413
1414 if (tree_does_not_contain_chrecs (scev))
1415 propagate_expr_outside_region (res, scev, region);
1416
1417 gsi_next (psi);
1418 return;
1419 }
1420 else
1421 {
1422 tree zero_dim_array = create_zero_dim_array (res, "Close_Phi");
1423
1424 stmt = gimple_build_assign (res, unshare_expr (zero_dim_array));
1425
1426 if (TREE_CODE (arg) == SSA_NAME)
1427 insert_out_of_ssa_copy (scop, zero_dim_array, arg,
1428 SSA_NAME_DEF_STMT (arg));
1429 else
1430 insert_out_of_ssa_copy_on_edge (scop, single_pred_edge (bb),
1431 zero_dim_array, arg);
1432 }
1433
1434 remove_phi_node (psi, false);
1435 SSA_NAME_DEF_STMT (res) = stmt;
1436
1437 insert_stmts (scop, stmt, NULL, gsi_after_labels (bb));
1438 }
1439
1440 /* Rewrite out of SSA the reduction phi node at PSI by creating a zero
1441 dimension array for it. */
1442
1443 static void
1444 rewrite_phi_out_of_ssa (scop_p scop, gphi_iterator *psi)
1445 {
1446 gphi *phi = psi->phi ();
1447 basic_block bb = gimple_bb (phi);
1448 tree res = gimple_phi_result (phi);
1449 tree zero_dim_array = create_zero_dim_array (res, "phi_out_of_ssa");
1450
1451 for (size_t i = 0; i < gimple_phi_num_args (phi); i++)
1452 {
1453 tree arg = gimple_phi_arg_def (phi, i);
1454 edge e = gimple_phi_arg_edge (phi, i);
1455
1456 /* Avoid the insertion of code in the loop latch to please the
1457 pattern matching of the vectorizer. */
1458 if (TREE_CODE (arg) == SSA_NAME
1459 && !SSA_NAME_IS_DEFAULT_DEF (arg)
1460 && e->src == bb->loop_father->latch)
1461 insert_out_of_ssa_copy (scop, zero_dim_array, arg,
1462 SSA_NAME_DEF_STMT (arg));
1463 else
1464 insert_out_of_ssa_copy_on_edge (scop, e, zero_dim_array, arg);
1465 }
1466
1467 gimple *stmt = gimple_build_assign (res, unshare_expr (zero_dim_array));
1468 remove_phi_node (psi, false);
1469 insert_stmts (scop, stmt, NULL, gsi_after_labels (bb));
1470 }
1471
1472 /* Rewrite the degenerate phi node at position PSI from the degenerate
1473 form "x = phi (y, y, ..., y)" to "x = y". */
1474
1475 static void
1476 rewrite_degenerate_phi (gphi_iterator *psi)
1477 {
1478 gphi *phi = psi->phi ();
1479 tree res = gimple_phi_result (phi);
1480
1481 basic_block bb = gimple_bb (phi);
1482 tree rhs = degenerate_phi_result (phi);
1483 gcc_assert (rhs);
1484
1485 gimple *stmt = gimple_build_assign (res, rhs);
1486 remove_phi_node (psi, false);
1487
1488 gimple_stmt_iterator gsi = gsi_after_labels (bb);
1489 gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
1490 }
1491
1492 /* Rewrite out of SSA all the reduction phi nodes of SCOP. */
1493
1494 static void
1495 rewrite_reductions_out_of_ssa (scop_p scop)
1496 {
1497 int i;
1498 basic_block bb;
1499 FOR_EACH_VEC_ELT (scop->scop_info->bbs, i, bb)
1500 for (gphi_iterator psi = gsi_start_phis (bb); !gsi_end_p (psi);)
1501 {
1502 gphi *phi = psi.phi ();
1503
1504 if (virtual_operand_p (gimple_phi_result (phi)))
1505 {
1506 gsi_next (&psi);
1507 continue;
1508 }
1509
1510 if (gimple_phi_num_args (phi) > 1
1511 && degenerate_phi_result (phi))
1512 rewrite_degenerate_phi (&psi);
1513
1514 else if (scalar_close_phi_node_p (phi))
1515 rewrite_close_phi_out_of_ssa (scop, &psi);
1516
1517 else if (reduction_phi_p (scop->scop_info->region, &psi))
1518 rewrite_phi_out_of_ssa (scop, &psi);
1519 }
1520
1521 update_ssa (TODO_update_ssa);
1522 checking_verify_loop_closed_ssa (true);
1523 }
1524
1525 /* Rewrite the scalar dependence of DEF used in USE_STMT with a memory
1526 read from ZERO_DIM_ARRAY. */
1527
1528 static void
1529 rewrite_cross_bb_scalar_dependence (scop_p scop, tree zero_dim_array,
1530 tree def, gimple *use_stmt)
1531 {
1532 gcc_assert (gimple_code (use_stmt) != GIMPLE_PHI);
1533
1534 tree name = copy_ssa_name (def);
1535 gimple *name_stmt = gimple_build_assign (name, zero_dim_array);
1536
1537 gimple_assign_set_lhs (name_stmt, name);
1538 insert_stmts (scop, name_stmt, NULL, gsi_for_stmt (use_stmt));
1539
1540 ssa_op_iter iter;
1541 use_operand_p use_p;
1542 FOR_EACH_SSA_USE_OPERAND (use_p, use_stmt, iter, SSA_OP_ALL_USES)
1543 if (operand_equal_p (def, USE_FROM_PTR (use_p), 0))
1544 replace_exp (use_p, name);
1545
1546 update_stmt (use_stmt);
1547 }
1548
1549 /* For every definition DEF in the SCOP that is used outside the scop,
1550 insert a closing-scop definition in the basic block just after this
1551 SCOP. */
1552
1553 static void
1554 handle_scalar_deps_crossing_scop_limits (scop_p scop, tree def, gimple *stmt)
1555 {
1556 tree var = create_tmp_reg (TREE_TYPE (def));
1557 tree new_name = make_ssa_name (var, stmt);
1558 bool needs_copy = false;
1559 sese_l region = scop->scop_info->region;
1560
1561 imm_use_iterator imm_iter;
1562 gimple *use_stmt;
1563 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, def)
1564 {
1565 if (!bb_in_sese_p (gimple_bb (use_stmt), region))
1566 {
1567 use_operand_p use_p;
1568 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
1569 {
1570 SET_USE (use_p, new_name);
1571 }
1572 update_stmt (use_stmt);
1573 needs_copy = true;
1574 }
1575 }
1576
1577 /* Insert in the empty BB just after the scop a use of DEF such
1578 that the rewrite of cross_bb_scalar_dependences won't insert
1579 arrays everywhere else. */
1580 if (needs_copy)
1581 {
1582 gimple *assign = gimple_build_assign (new_name, def);
1583 gimple_stmt_iterator psi = gsi_after_labels (region.exit->dest);
1584
1585 update_stmt (assign);
1586 gsi_insert_before (&psi, assign, GSI_SAME_STMT);
1587 }
1588 }
1589
1590 /* Rewrite the scalar dependences crossing the boundary of the BB
1591 containing STMT with an array. Return true when something has been
1592 changed. */
1593
1594 static bool
1595 rewrite_cross_bb_scalar_deps (scop_p scop, gimple_stmt_iterator *gsi)
1596 {
1597 sese_l region = scop->scop_info->region;
1598 gimple *stmt = gsi_stmt (*gsi);
1599 imm_use_iterator imm_iter;
1600 tree def;
1601 tree zero_dim_array = NULL_TREE;
1602 gimple *use_stmt;
1603 bool res = false;
1604
1605 switch (gimple_code (stmt))
1606 {
1607 case GIMPLE_ASSIGN:
1608 def = gimple_assign_lhs (stmt);
1609 break;
1610
1611 case GIMPLE_CALL:
1612 def = gimple_call_lhs (stmt);
1613 break;
1614
1615 default:
1616 return false;
1617 }
1618
1619 if (!def
1620 || !is_gimple_reg (def))
1621 return false;
1622
1623 if (scev_analyzable_p (def, region))
1624 {
1625 loop_p loop = loop_containing_stmt (SSA_NAME_DEF_STMT (def));
1626 tree scev = scalar_evolution_in_region (region, loop, def);
1627
1628 if (tree_contains_chrecs (scev, NULL))
1629 return false;
1630
1631 propagate_expr_outside_region (def, scev, region);
1632 return true;
1633 }
1634
1635 basic_block def_bb = gimple_bb (stmt);
1636
1637 handle_scalar_deps_crossing_scop_limits (scop, def, stmt);
1638
1639 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, def)
1640 if (gphi *phi = dyn_cast <gphi *> (use_stmt))
1641 {
1642 res = true;
1643 gphi_iterator psi = gsi_for_phi (phi);
1644
1645 if (scalar_close_phi_node_p (gsi_stmt (psi)))
1646 rewrite_close_phi_out_of_ssa (scop, &psi);
1647 else
1648 rewrite_phi_out_of_ssa (scop, &psi);
1649 }
1650
1651 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, def)
1652 if (gimple_code (use_stmt) != GIMPLE_PHI
1653 && def_bb != gimple_bb (use_stmt)
1654 && !is_gimple_debug (use_stmt)
1655 && (res = true))
1656 {
1657 if (!zero_dim_array)
1658 {
1659 zero_dim_array = create_zero_dim_array
1660 (def, "Cross_BB_scalar_dependence");
1661 insert_out_of_ssa_copy (scop, zero_dim_array, def,
1662 SSA_NAME_DEF_STMT (def));
1663 gsi_next (gsi);
1664 }
1665
1666 rewrite_cross_bb_scalar_dependence (scop, unshare_expr (zero_dim_array),
1667 def, use_stmt);
1668 }
1669
1670 update_ssa (TODO_update_ssa);
1671
1672 return res;
1673 }
1674
1675 /* Rewrite out of SSA all the reduction phi nodes of SCOP. */
1676
1677 static void
1678 rewrite_cross_bb_scalar_deps_out_of_ssa (scop_p scop)
1679 {
1680 gimple_stmt_iterator psi;
1681 sese_l region = scop->scop_info->region;
1682 bool changed = false;
1683
1684 /* Create an extra empty BB after the scop. */
1685 split_edge (region.exit);
1686
1687 int i;
1688 basic_block bb;
1689 FOR_EACH_VEC_ELT (scop->scop_info->bbs, i, bb)
1690 for (psi = gsi_start_bb (bb); !gsi_end_p (psi); gsi_next (&psi))
1691 changed |= rewrite_cross_bb_scalar_deps (scop, &psi);
1692
1693 if (changed)
1694 {
1695 scev_reset_htab ();
1696 update_ssa (TODO_update_ssa);
1697 checking_verify_loop_closed_ssa (true);
1698 }
1699 }
1700
1701 /* Builds the polyhedral representation for a SESE region. */
1702
1703 void
1704 build_poly_scop (scop_p scop)
1705 {
1706 set_scop_parameter_dim (scop);
1707 build_scop_iteration_domain (scop);
1708 build_scop_context (scop);
1709 add_conditions_to_constraints (scop);
1710
1711 /* Rewrite out of SSA only after having translated the
1712 representation to the polyhedral representation to avoid scev
1713 analysis failures. That means that these functions will insert
1714 new data references that they create in the right place. */
1715 rewrite_reductions_out_of_ssa (scop);
1716 rewrite_cross_bb_scalar_deps_out_of_ssa (scop);
1717
1718 build_scop_drs (scop);
1719 build_scop_scattering (scop);
1720
1721 /* This SCoP has been translated to the polyhedral
1722 representation. */
1723 scop->poly_scop_p = true;
1724 }
1725 #endif /* HAVE_isl */