assert instead of silently failing code gen
[gcc.git] / gcc / graphite-isl-ast-to-gimple.c
1 /* Translation of isl AST to Gimple.
2 Copyright (C) 2014-2016 Free Software Foundation, Inc.
3 Contributed by Roman Gareev <gareevroman@gmail.com>.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #define USES_ISL
22
23 #include "config.h"
24
25 #ifdef HAVE_isl
26
27 #include "system.h"
28 #include "coretypes.h"
29 #include "backend.h"
30 #include "cfghooks.h"
31 #include "tree.h"
32 #include "gimple.h"
33 #include "params.h"
34 #include "fold-const.h"
35 #include "gimple-fold.h"
36 #include "gimple-iterator.h"
37 #include "gimplify.h"
38 #include "gimplify-me.h"
39 #include "tree-eh.h"
40 #include "tree-ssa-loop.h"
41 #include "tree-ssa-operands.h"
42 #include "tree-ssa-propagate.h"
43 #include "tree-pass.h"
44 #include "cfgloop.h"
45 #include "tree-data-ref.h"
46 #include "tree-ssa-loop-manip.h"
47 #include "tree-scalar-evolution.h"
48 #include "gimple-ssa.h"
49 #include "tree-phinodes.h"
50 #include "tree-into-ssa.h"
51 #include "ssa-iterators.h"
52 #include "tree-cfg.h"
53 #include "gimple-pretty-print.h"
54 #include "cfganal.h"
55 #include "value-prof.h"
56 #include "graphite.h"
57 #include <map>
58
59 /* We always try to use signed 128 bit types, but fall back to smaller types
60 in case a platform does not provide types of these sizes. In the future we
61 should use isl to derive the optimal type for each subexpression. */
62
63 static int max_mode_int_precision =
64 GET_MODE_PRECISION (mode_for_size (MAX_FIXED_MODE_SIZE, MODE_INT, 0));
65 static int graphite_expression_type_precision = 128 <= max_mode_int_precision ?
66 128 : max_mode_int_precision;
67
68 struct ast_build_info
69 {
70 ast_build_info()
71 : is_parallelizable(false)
72 { }
73 bool is_parallelizable;
74 };
75
76 /* Converts a GMP constant VAL to a tree and returns it. */
77
78 static tree
79 gmp_cst_to_tree (tree type, mpz_t val)
80 {
81 tree t = type ? type : integer_type_node;
82 mpz_t tmp;
83
84 mpz_init (tmp);
85 mpz_set (tmp, val);
86 wide_int wi = wi::from_mpz (t, tmp, true);
87 mpz_clear (tmp);
88
89 return wide_int_to_tree (t, wi);
90 }
91
92 /* Verifies properties that GRAPHITE should maintain during translation. */
93
94 static inline void
95 graphite_verify (void)
96 {
97 checking_verify_loop_structure ();
98 checking_verify_loop_closed_ssa (true);
99 }
100
101 /* IVS_PARAMS maps isl's scattering and parameter identifiers
102 to corresponding trees. */
103
104 typedef std::map<isl_id *, tree> ivs_params;
105
106 /* Free all memory allocated for isl's identifiers. */
107
108 static void ivs_params_clear (ivs_params &ip)
109 {
110 std::map<isl_id *, tree>::iterator it;
111 for (it = ip.begin ();
112 it != ip.end (); it++)
113 {
114 isl_id_free (it->first);
115 }
116 }
117
118 #ifdef HAVE_ISL_OPTIONS_SET_SCHEDULE_SERIALIZE_SCCS
119
120 /* Set the "separate" option for the schedule node. */
121
122 static isl_schedule_node *
123 set_separate_option (__isl_take isl_schedule_node *node, void *user)
124 {
125 if (user)
126 return node;
127
128 if (isl_schedule_node_get_type (node) != isl_schedule_node_band)
129 return node;
130
131 /* Set the "separate" option unless it is set earlier to another option. */
132 if (isl_schedule_node_band_member_get_ast_loop_type (node, 0)
133 == isl_ast_loop_default)
134 return isl_schedule_node_band_member_set_ast_loop_type
135 (node, 0, isl_ast_loop_separate);
136
137 return node;
138 }
139
140 /* Print SCHEDULE under an AST form on file F. */
141
142 void
143 print_schedule_ast (FILE *f, __isl_keep isl_schedule *schedule, scop_p scop)
144 {
145 isl_set *set = isl_set_params (isl_set_copy (scop->param_context));
146 isl_ast_build *context = isl_ast_build_from_context (set);
147 isl_ast_node *ast
148 = isl_ast_build_node_from_schedule (context, isl_schedule_copy (schedule));
149 isl_ast_build_free (context);
150 print_isl_ast (f, ast);
151 isl_ast_node_free (ast);
152 }
153
154 DEBUG_FUNCTION void
155 debug_schedule_ast (__isl_keep isl_schedule *s, scop_p scop)
156 {
157 print_schedule_ast (stderr, s, scop);
158 }
159
160 #endif
161
162 enum phi_node_kind
163 {
164 unknown_phi,
165 loop_phi,
166 close_phi,
167 cond_phi
168 };
169
170 class translate_isl_ast_to_gimple
171 {
172 public:
173 translate_isl_ast_to_gimple (sese_info_p r)
174 : region (r), codegen_error (false)
175 { }
176
177 /* Translates an isl AST node NODE to GCC representation in the
178 context of a SESE. */
179 edge translate_isl_ast (loop_p context_loop, __isl_keep isl_ast_node *node,
180 edge next_e, ivs_params &ip);
181
182 /* Translates an isl_ast_node_for to Gimple. */
183 edge translate_isl_ast_node_for (loop_p context_loop,
184 __isl_keep isl_ast_node *node,
185 edge next_e, ivs_params &ip);
186
187 /* Create the loop for a isl_ast_node_for.
188
189 - NEXT_E is the edge where new generated code should be attached. */
190 edge translate_isl_ast_for_loop (loop_p context_loop,
191 __isl_keep isl_ast_node *node_for,
192 edge next_e,
193 tree type, tree lb, tree ub,
194 ivs_params &ip);
195
196 /* Translates an isl_ast_node_if to Gimple. */
197 edge translate_isl_ast_node_if (loop_p context_loop,
198 __isl_keep isl_ast_node *node,
199 edge next_e, ivs_params &ip);
200
201 /* Translates an isl_ast_node_user to Gimple.
202
203 FIXME: We should remove iv_map.create (loop->num + 1), if it is
204 possible. */
205 edge translate_isl_ast_node_user (__isl_keep isl_ast_node *node,
206 edge next_e, ivs_params &ip);
207
208 /* Translates an isl_ast_node_block to Gimple. */
209 edge translate_isl_ast_node_block (loop_p context_loop,
210 __isl_keep isl_ast_node *node,
211 edge next_e, ivs_params &ip);
212
213 /* Converts a unary isl_ast_expr_op expression E to a GCC expression tree of
214 type TYPE. */
215 tree unary_op_to_tree (tree type, __isl_take isl_ast_expr *expr,
216 ivs_params &ip);
217
218 /* Converts a binary isl_ast_expr_op expression E to a GCC expression tree of
219 type TYPE. */
220 tree binary_op_to_tree (tree type, __isl_take isl_ast_expr *expr,
221 ivs_params &ip);
222
223 /* Converts a ternary isl_ast_expr_op expression E to a GCC expression tree of
224 type TYPE. */
225 tree ternary_op_to_tree (tree type, __isl_take isl_ast_expr *expr,
226 ivs_params &ip);
227
228 /* Converts an isl_ast_expr_op expression E with unknown number of arguments
229 to a GCC expression tree of type TYPE. */
230 tree nary_op_to_tree (tree type, __isl_take isl_ast_expr *expr,
231 ivs_params &ip);
232
233 /* Converts an isl AST expression E back to a GCC expression tree of
234 type TYPE. */
235 tree gcc_expression_from_isl_expression (tree type,
236 __isl_take isl_ast_expr *,
237 ivs_params &ip);
238
239 /* Return the tree variable that corresponds to the given isl ast identifier
240 expression (an isl_ast_expr of type isl_ast_expr_id).
241
242 FIXME: We should replace blind conversation of id's type with derivation
243 of the optimal type when we get the corresponding isl support. Blindly
244 converting type sizes may be problematic when we switch to smaller
245 types. */
246 tree gcc_expression_from_isl_ast_expr_id (tree type,
247 __isl_keep isl_ast_expr *expr_id,
248 ivs_params &ip);
249
250 /* Converts an isl_ast_expr_int expression E to a GCC expression tree of
251 type TYPE. */
252 tree gcc_expression_from_isl_expr_int (tree type,
253 __isl_take isl_ast_expr *expr);
254
255 /* Converts an isl_ast_expr_op expression E to a GCC expression tree of
256 type TYPE. */
257 tree gcc_expression_from_isl_expr_op (tree type,
258 __isl_take isl_ast_expr *expr,
259 ivs_params &ip);
260
261 /* Creates a new LOOP corresponding to isl_ast_node_for. Inserts an
262 induction variable for the new LOOP. New LOOP is attached to CFG
263 starting at ENTRY_EDGE. LOOP is inserted into the loop tree and
264 becomes the child loop of the OUTER_LOOP. NEWIVS_INDEX binds
265 isl's scattering name to the induction variable created for the
266 loop of STMT. The new induction variable is inserted in the NEWIVS
267 vector and is of type TYPE. */
268 struct loop *graphite_create_new_loop (edge entry_edge,
269 __isl_keep isl_ast_node *node_for,
270 loop_p outer, tree type,
271 tree lb, tree ub, ivs_params &ip);
272
273 /* All loops generated by create_empty_loop_on_edge have the form of
274 a post-test loop:
275
276 do
277
278 {
279 body of the loop;
280 } while (lower bound < upper bound);
281
282 We create a new if region protecting the loop to be executed, if
283 the execution count is zero (lower bound > upper bound). */
284 edge graphite_create_new_loop_guard (edge entry_edge,
285 __isl_keep isl_ast_node *node_for,
286 tree *type,
287 tree *lb, tree *ub, ivs_params &ip);
288
289 /* Creates a new if region corresponding to isl's cond. */
290 edge graphite_create_new_guard (edge entry_edge,
291 __isl_take isl_ast_expr *if_cond,
292 ivs_params &ip);
293
294 /* Inserts in iv_map a tuple (OLD_LOOP->num, NEW_NAME) for the induction
295 variables of the loops around GBB in SESE.
296
297 FIXME: Instead of using a vec<tree> that maps each loop id to a possible
298 chrec, we could consider using a map<int, tree> that maps loop ids to the
299 corresponding tree expressions. */
300 void build_iv_mapping (vec<tree> iv_map, gimple_poly_bb_p gbb,
301 __isl_keep isl_ast_expr *user_expr, ivs_params &ip,
302 sese_l &region);
303
304 /* Patch the missing arguments of the phi nodes. */
305
306 void translate_pending_phi_nodes (void);
307
308 /* Add isl's parameter identifiers and corresponding trees to ivs_params. */
309
310 void add_parameters_to_ivs_params (scop_p scop, ivs_params &ip);
311
312 /* Generates a build, which specifies the constraints on the parameters. */
313
314 __isl_give isl_ast_build *generate_isl_context (scop_p scop);
315
316 #ifdef HAVE_ISL_OPTIONS_SET_SCHEDULE_SERIALIZE_SCCS
317 /* Generate isl AST from schedule of SCOP. */
318 __isl_give isl_ast_node * scop_to_isl_ast (scop_p scop);
319 #else
320 /* Get the maximal number of schedule dimensions in the scop SCOP. */
321 int get_max_schedule_dimensions (scop_p scop);
322
323 /* Extend the schedule to NB_SCHEDULE_DIMS schedule dimensions.
324
325 For schedules with different dimensionality, the isl AST generator can not
326 define an order and will just randomly choose an order. The solution to
327 this problem is to extend all schedules to the maximal number of schedule
328 dimensions (using '0's for the remaining values). */
329 __isl_give isl_map *extend_schedule (__isl_take isl_map *schedule,
330 int nb_schedule_dims);
331
332 /* Generates a schedule, which specifies an order used to
333 visit elements in a domain. */
334 __isl_give isl_union_map *generate_isl_schedule (scop_p scop);
335
336 /* Set the separate option for all dimensions.
337 This helps to reduce control overhead. */
338 __isl_give isl_ast_build *set_options (__isl_take isl_ast_build *control,
339 __isl_keep isl_union_map *schedule);
340
341 /* Generate isl AST from schedule of SCOP. Also, collects IVS_PARAMS in
342 IP. */
343 __isl_give isl_ast_node *scop_to_isl_ast (scop_p scop, ivs_params &ip);
344
345 /* Prints NODE to FILE. */
346 void print_isl_ast_node (FILE *file, __isl_keep isl_ast_node *node,
347 __isl_keep isl_ctx *ctx) const
348 {
349 isl_printer *prn = isl_printer_to_file (ctx, file);
350 prn = isl_printer_set_output_format (prn, ISL_FORMAT_C);
351 prn = isl_printer_print_ast_node (prn, node);
352 prn = isl_printer_print_str (prn, "\n");
353 isl_printer_free (prn);
354 }
355 #endif
356
357 /* Return true if RENAME (defined in BB) is a valid use in NEW_BB. The
358 definition should flow into use, and the use should respect the loop-closed
359 SSA form. */
360
361 bool is_valid_rename (tree rename, basic_block def_bb, basic_block use_bb,
362 phi_node_kind, tree old_name, basic_block old_bb) const;
363
364 /* Returns the expression associated to OLD_NAME (which is used in OLD_BB), in
365 NEW_BB from RENAME_MAP. LOOP_PHI is true when we want to rename OLD_NAME
366 within a loop PHI instruction. */
367
368 tree get_rename (basic_block new_bb, tree old_name,
369 basic_block old_bb, phi_node_kind) const;
370
371 /* For ops which are scev_analyzeable, we can regenerate a new name from
372 its scalar evolution around LOOP. */
373
374 tree get_rename_from_scev (tree old_name, gimple_seq *stmts, loop_p loop,
375 basic_block new_bb, basic_block old_bb,
376 vec<tree> iv_map);
377
378 /* Returns a basic block that could correspond to where a constant was defined
379 in the original code. In the original code OLD_BB had the definition, we
380 need to find which basic block out of the copies of old_bb, in the new
381 region, should a definition correspond to if it has to reach BB. */
382
383 basic_block get_def_bb_for_const (basic_block bb, basic_block old_bb) const;
384
385 /* Get the new name of OP (from OLD_BB) to be used in NEW_BB. LOOP_PHI is
386 true when we want to rename an OP within a loop PHI instruction. */
387
388 tree get_new_name (basic_block new_bb, tree op,
389 basic_block old_bb, phi_node_kind) const;
390
391 /* Collect all the operands of NEW_EXPR by recursively visiting each
392 operand. */
393
394 void collect_all_ssa_names (tree new_expr, vec<tree> *vec_ssa);
395
396 /* Copy the PHI arguments from OLD_PHI to the NEW_PHI. The arguments to
397 NEW_PHI must be found unless they can be POSTPONEd for later. */
398
399 bool copy_loop_phi_args (gphi *old_phi, init_back_edge_pair_t &ibp_old_bb,
400 gphi *new_phi, init_back_edge_pair_t &ibp_new_bb,
401 bool postpone);
402
403 /* Copy loop phi nodes from BB to NEW_BB. */
404
405 bool copy_loop_phi_nodes (basic_block bb, basic_block new_bb);
406
407 /* Add phi nodes to all merge points of all the diamonds enclosing the loop of
408 the close phi node PHI. */
409
410 bool add_close_phis_to_merge_points (gphi *old_phi, gphi *new_phi,
411 tree default_value);
412
413 tree add_close_phis_to_outer_loops (tree last_merge_name, edge merge_e,
414 gimple *old_close_phi);
415
416 /* Copy all the loop-close phi args from BB to NEW_BB. */
417
418 bool copy_loop_close_phi_args (basic_block old_bb, basic_block new_bb,
419 bool postpone);
420
421 /* Copy loop close phi nodes from BB to NEW_BB. */
422
423 bool copy_loop_close_phi_nodes (basic_block old_bb, basic_block new_bb);
424
425 /* Copy the arguments of cond-phi node PHI, to NEW_PHI in the codegenerated
426 region. If postpone is true and it isn't possible to copy any arg of PHI,
427 the PHI is added to the REGION->INCOMPLETE_PHIS to be codegenerated later.
428 Returns false if the copying was unsuccessful. */
429
430 bool copy_cond_phi_args (gphi *phi, gphi *new_phi, vec<tree> iv_map,
431 bool postpone);
432
433 /* Copy cond phi nodes from BB to NEW_BB. A cond-phi node is a basic block
434 containing phi nodes coming from two predecessors, and none of them are back
435 edges. */
436
437 bool copy_cond_phi_nodes (basic_block bb, basic_block new_bb,
438 vec<tree> iv_map);
439
440 /* Duplicates the statements of basic block BB into basic block NEW_BB
441 and compute the new induction variables according to the IV_MAP.
442 CODEGEN_ERROR is set when the code generation cannot continue. */
443
444 bool graphite_copy_stmts_from_block (basic_block bb, basic_block new_bb,
445 vec<tree> iv_map);
446
447 /* Copies BB and includes in the copied BB all the statements that can
448 be reached following the use-def chains from the memory accesses,
449 and returns the next edge following this new block. codegen_error is
450 set when the code generation cannot continue. */
451
452 edge copy_bb_and_scalar_dependences (basic_block bb, edge next_e,
453 vec<tree> iv_map);
454
455 /* Given a basic block containing close-phi it returns the new basic block
456 where to insert a copy of the close-phi nodes. All the uses in close phis
457 should come from a single loop otherwise it returns NULL. */
458 edge edge_for_new_close_phis (basic_block bb);
459
460 /* Add NEW_NAME as the ARGNUM-th arg of NEW_PHI which is in NEW_BB.
461 DOMINATING_PRED is the predecessor basic block of OLD_BB which dominates
462 the other pred of OLD_BB as well. If no such basic block exists then it is
463 NULL. NON_DOMINATING_PRED is a pred which does not dominate OLD_BB, it
464 cannot be NULL.
465
466 Case1: OLD_BB->preds {BB1, BB2} and BB1 does not dominate BB2 and vice
467 versa. In this case DOMINATING_PRED = NULL.
468
469 Case2: OLD_BB->preds {BB1, BB2} and BB1 dominates BB2.
470
471 Returns true on successful copy of the args, false otherwise. */
472
473 bool add_phi_arg_for_new_expr (tree old_phi_args[2], tree new_phi_args[2],
474 edge old_bb_dominating_edge,
475 edge old_bb_non_dominating_edge,
476 gphi *phi, gphi *new_phi,
477 basic_block new_bb);
478
479 /* Renames the scalar uses of the statement COPY, using the substitution map
480 RENAME_MAP, inserting the gimplification code at GSI_TGT, for the
481 translation REGION, with the original copied statement in LOOP, and using
482 the induction variable renaming map IV_MAP. Returns true when something
483 has been renamed. codegen_error is set when the code generation cannot
484 continue. */
485
486 bool rename_uses (gimple *copy, gimple_stmt_iterator *gsi_tgt,
487 basic_block old_bb, loop_p loop, vec<tree> iv_map);
488
489 /* Register in RENAME_MAP the rename tuple (OLD_NAME, EXPR).
490 When OLD_NAME and EXPR are the same we assert. */
491
492 void set_rename (tree old_name, tree expr);
493
494 /* Create new names for all the definitions created by COPY and add
495 replacement mappings for each new name. */
496
497 void set_rename_for_each_def (gimple *stmt);
498
499 /* Insert each statement from SEQ at its earliest insertion p. */
500
501 void gsi_insert_earliest (gimple_seq seq);
502
503 /* Rename all the operands of NEW_EXPR by recursively visiting each
504 operand. */
505
506 tree rename_all_uses (tree new_expr, basic_block new_bb, basic_block old_bb);
507
508 bool codegen_error_p () const
509 { return codegen_error; }
510
511 /* Return true when OP is a constant tree. */
512
513 bool is_constant (tree op) const
514 {
515 return TREE_CODE (op) == INTEGER_CST
516 || TREE_CODE (op) == REAL_CST
517 || TREE_CODE (op) == COMPLEX_CST
518 || TREE_CODE (op) == VECTOR_CST;
519 }
520
521 private:
522 /* The region to be translated. */
523 sese_info_p region;
524
525 /* This flag is set when an error occurred during the translation of isl AST
526 to Gimple. */
527 bool codegen_error;
528
529 /* A vector of all the edges at if_condition merge points. */
530 auto_vec<edge, 2> merge_points;
531 };
532
533 /* Return the tree variable that corresponds to the given isl ast identifier
534 expression (an isl_ast_expr of type isl_ast_expr_id).
535
536 FIXME: We should replace blind conversion of id's type with derivation
537 of the optimal type when we get the corresponding isl support. Blindly
538 converting type sizes may be problematic when we switch to smaller
539 types. */
540
541 tree
542 translate_isl_ast_to_gimple::
543 gcc_expression_from_isl_ast_expr_id (tree type,
544 __isl_take isl_ast_expr *expr_id,
545 ivs_params &ip)
546 {
547 gcc_assert (isl_ast_expr_get_type (expr_id) == isl_ast_expr_id);
548 isl_id *tmp_isl_id = isl_ast_expr_get_id (expr_id);
549 std::map<isl_id *, tree>::iterator res;
550 res = ip.find (tmp_isl_id);
551 isl_id_free (tmp_isl_id);
552 gcc_assert (res != ip.end () &&
553 "Could not map isl_id to tree expression");
554 isl_ast_expr_free (expr_id);
555 tree t = res->second;
556 tree *val = region->parameter_rename_map->get(t);
557
558 if (!val)
559 val = &t;
560 return fold_convert (type, *val);
561 }
562
563 /* Converts an isl_ast_expr_int expression E to a GCC expression tree of
564 type TYPE. */
565
566 tree
567 translate_isl_ast_to_gimple::
568 gcc_expression_from_isl_expr_int (tree type, __isl_take isl_ast_expr *expr)
569 {
570 gcc_assert (isl_ast_expr_get_type (expr) == isl_ast_expr_int);
571 isl_val *val = isl_ast_expr_get_val (expr);
572 mpz_t val_mpz_t;
573 mpz_init (val_mpz_t);
574 tree res;
575 if (isl_val_get_num_gmp (val, val_mpz_t) == -1)
576 res = NULL_TREE;
577 else
578 res = gmp_cst_to_tree (type, val_mpz_t);
579 isl_val_free (val);
580 isl_ast_expr_free (expr);
581 mpz_clear (val_mpz_t);
582 return res;
583 }
584
585 /* Converts a binary isl_ast_expr_op expression E to a GCC expression tree of
586 type TYPE. */
587
588 tree
589 translate_isl_ast_to_gimple::
590 binary_op_to_tree (tree type, __isl_take isl_ast_expr *expr, ivs_params &ip)
591 {
592 isl_ast_expr *arg_expr = isl_ast_expr_get_op_arg (expr, 0);
593 tree tree_lhs_expr = gcc_expression_from_isl_expression (type, arg_expr, ip);
594 arg_expr = isl_ast_expr_get_op_arg (expr, 1);
595 tree tree_rhs_expr = gcc_expression_from_isl_expression (type, arg_expr, ip);
596
597 enum isl_ast_op_type expr_type = isl_ast_expr_get_op_type (expr);
598 isl_ast_expr_free (expr);
599
600 if (codegen_error)
601 return NULL_TREE;
602
603 switch (expr_type)
604 {
605 case isl_ast_op_add:
606 return fold_build2 (PLUS_EXPR, type, tree_lhs_expr, tree_rhs_expr);
607
608 case isl_ast_op_sub:
609 return fold_build2 (MINUS_EXPR, type, tree_lhs_expr, tree_rhs_expr);
610
611 case isl_ast_op_mul:
612 return fold_build2 (MULT_EXPR, type, tree_lhs_expr, tree_rhs_expr);
613
614 case isl_ast_op_div:
615 /* As isl operates on arbitrary precision numbers, we may end up with
616 division by 2^64 that is folded to 0. */
617 if (integer_zerop (tree_rhs_expr))
618 {
619 codegen_error = true;
620 return NULL_TREE;
621 }
622 return fold_build2 (EXACT_DIV_EXPR, type, tree_lhs_expr, tree_rhs_expr);
623
624 case isl_ast_op_pdiv_q:
625 /* As isl operates on arbitrary precision numbers, we may end up with
626 division by 2^64 that is folded to 0. */
627 if (integer_zerop (tree_rhs_expr))
628 {
629 codegen_error = true;
630 return NULL_TREE;
631 }
632 return fold_build2 (TRUNC_DIV_EXPR, type, tree_lhs_expr, tree_rhs_expr);
633
634 #if HAVE_ISL_OPTIONS_SET_SCHEDULE_SERIALIZE_SCCS
635 /* isl 0.15 or later. */
636 case isl_ast_op_zdiv_r:
637 #endif
638 case isl_ast_op_pdiv_r:
639 /* As isl operates on arbitrary precision numbers, we may end up with
640 division by 2^64 that is folded to 0. */
641 if (integer_zerop (tree_rhs_expr))
642 {
643 codegen_error = true;
644 return NULL_TREE;
645 }
646 return fold_build2 (TRUNC_MOD_EXPR, type, tree_lhs_expr, tree_rhs_expr);
647
648 case isl_ast_op_fdiv_q:
649 /* As isl operates on arbitrary precision numbers, we may end up with
650 division by 2^64 that is folded to 0. */
651 if (integer_zerop (tree_rhs_expr))
652 {
653 codegen_error = true;
654 return NULL_TREE;
655 }
656 return fold_build2 (FLOOR_DIV_EXPR, type, tree_lhs_expr, tree_rhs_expr);
657
658 case isl_ast_op_and:
659 return fold_build2 (TRUTH_ANDIF_EXPR, type,
660 tree_lhs_expr, tree_rhs_expr);
661
662 case isl_ast_op_or:
663 return fold_build2 (TRUTH_ORIF_EXPR, type, tree_lhs_expr, tree_rhs_expr);
664
665 case isl_ast_op_eq:
666 return fold_build2 (EQ_EXPR, type, tree_lhs_expr, tree_rhs_expr);
667
668 case isl_ast_op_le:
669 return fold_build2 (LE_EXPR, type, tree_lhs_expr, tree_rhs_expr);
670
671 case isl_ast_op_lt:
672 return fold_build2 (LT_EXPR, type, tree_lhs_expr, tree_rhs_expr);
673
674 case isl_ast_op_ge:
675 return fold_build2 (GE_EXPR, type, tree_lhs_expr, tree_rhs_expr);
676
677 case isl_ast_op_gt:
678 return fold_build2 (GT_EXPR, type, tree_lhs_expr, tree_rhs_expr);
679
680 default:
681 gcc_unreachable ();
682 }
683 }
684
685 /* Converts a ternary isl_ast_expr_op expression E to a GCC expression tree of
686 type TYPE. */
687
688 tree
689 translate_isl_ast_to_gimple::
690 ternary_op_to_tree (tree type, __isl_take isl_ast_expr *expr, ivs_params &ip)
691 {
692 enum isl_ast_op_type t = isl_ast_expr_get_op_type (expr);
693 gcc_assert (t == isl_ast_op_cond || t == isl_ast_op_select);
694 isl_ast_expr *arg_expr = isl_ast_expr_get_op_arg (expr, 0);
695 tree a = gcc_expression_from_isl_expression (type, arg_expr, ip);
696 arg_expr = isl_ast_expr_get_op_arg (expr, 1);
697 tree b = gcc_expression_from_isl_expression (type, arg_expr, ip);
698 arg_expr = isl_ast_expr_get_op_arg (expr, 2);
699 tree c = gcc_expression_from_isl_expression (type, arg_expr, ip);
700 isl_ast_expr_free (expr);
701
702 if (codegen_error)
703 return NULL_TREE;
704
705 return fold_build3 (COND_EXPR, type, a, b, c);
706 }
707
708 /* Converts a unary isl_ast_expr_op expression E to a GCC expression tree of
709 type TYPE. */
710
711 tree
712 translate_isl_ast_to_gimple::
713 unary_op_to_tree (tree type, __isl_take isl_ast_expr *expr, ivs_params &ip)
714 {
715 gcc_assert (isl_ast_expr_get_op_type (expr) == isl_ast_op_minus);
716 isl_ast_expr *arg_expr = isl_ast_expr_get_op_arg (expr, 0);
717 tree tree_expr = gcc_expression_from_isl_expression (type, arg_expr, ip);
718 isl_ast_expr_free (expr);
719 return codegen_error ? NULL_TREE : fold_build1 (NEGATE_EXPR, type, tree_expr);
720 }
721
722 /* Converts an isl_ast_expr_op expression E with unknown number of arguments
723 to a GCC expression tree of type TYPE. */
724
725 tree
726 translate_isl_ast_to_gimple::
727 nary_op_to_tree (tree type, __isl_take isl_ast_expr *expr, ivs_params &ip)
728 {
729 enum tree_code op_code;
730 switch (isl_ast_expr_get_op_type (expr))
731 {
732 case isl_ast_op_max:
733 op_code = MAX_EXPR;
734 break;
735
736 case isl_ast_op_min:
737 op_code = MIN_EXPR;
738 break;
739
740 default:
741 gcc_unreachable ();
742 }
743 isl_ast_expr *arg_expr = isl_ast_expr_get_op_arg (expr, 0);
744 tree res = gcc_expression_from_isl_expression (type, arg_expr, ip);
745
746 if (codegen_error)
747 {
748 isl_ast_expr_free (expr);
749 return NULL_TREE;
750 }
751
752 int i;
753 for (i = 1; i < isl_ast_expr_get_op_n_arg (expr); i++)
754 {
755 arg_expr = isl_ast_expr_get_op_arg (expr, i);
756 tree t = gcc_expression_from_isl_expression (type, arg_expr, ip);
757
758 if (codegen_error)
759 {
760 isl_ast_expr_free (expr);
761 return NULL_TREE;
762 }
763
764 res = fold_build2 (op_code, type, res, t);
765 }
766 isl_ast_expr_free (expr);
767 return res;
768 }
769
770 /* Converts an isl_ast_expr_op expression E to a GCC expression tree of
771 type TYPE. */
772
773 tree
774 translate_isl_ast_to_gimple::
775 gcc_expression_from_isl_expr_op (tree type, __isl_take isl_ast_expr *expr,
776 ivs_params &ip)
777 {
778 if (codegen_error)
779 {
780 isl_ast_expr_free (expr);
781 return NULL_TREE;
782 }
783
784 gcc_assert (isl_ast_expr_get_type (expr) == isl_ast_expr_op);
785 switch (isl_ast_expr_get_op_type (expr))
786 {
787 /* These isl ast expressions are not supported yet. */
788 case isl_ast_op_error:
789 case isl_ast_op_call:
790 case isl_ast_op_and_then:
791 case isl_ast_op_or_else:
792 gcc_unreachable ();
793
794 case isl_ast_op_max:
795 case isl_ast_op_min:
796 return nary_op_to_tree (type, expr, ip);
797
798 case isl_ast_op_add:
799 case isl_ast_op_sub:
800 case isl_ast_op_mul:
801 case isl_ast_op_div:
802 case isl_ast_op_pdiv_q:
803 case isl_ast_op_pdiv_r:
804 case isl_ast_op_fdiv_q:
805 #if HAVE_ISL_OPTIONS_SET_SCHEDULE_SERIALIZE_SCCS
806 /* isl 0.15 or later. */
807 case isl_ast_op_zdiv_r:
808 #endif
809 case isl_ast_op_and:
810 case isl_ast_op_or:
811 case isl_ast_op_eq:
812 case isl_ast_op_le:
813 case isl_ast_op_lt:
814 case isl_ast_op_ge:
815 case isl_ast_op_gt:
816 return binary_op_to_tree (type, expr, ip);
817
818 case isl_ast_op_minus:
819 return unary_op_to_tree (type, expr, ip);
820
821 case isl_ast_op_cond:
822 case isl_ast_op_select:
823 return ternary_op_to_tree (type, expr, ip);
824
825 default:
826 gcc_unreachable ();
827 }
828
829 return NULL_TREE;
830 }
831
832 /* Converts an isl AST expression E back to a GCC expression tree of
833 type TYPE. */
834
835 tree
836 translate_isl_ast_to_gimple::
837 gcc_expression_from_isl_expression (tree type, __isl_take isl_ast_expr *expr,
838 ivs_params &ip)
839 {
840 if (codegen_error)
841 {
842 isl_ast_expr_free (expr);
843 return NULL_TREE;
844 }
845
846 switch (isl_ast_expr_get_type (expr))
847 {
848 case isl_ast_expr_id:
849 return gcc_expression_from_isl_ast_expr_id (type, expr, ip);
850
851 case isl_ast_expr_int:
852 return gcc_expression_from_isl_expr_int (type, expr);
853
854 case isl_ast_expr_op:
855 return gcc_expression_from_isl_expr_op (type, expr, ip);
856
857 default:
858 gcc_unreachable ();
859 }
860
861 return NULL_TREE;
862 }
863
864 /* Creates a new LOOP corresponding to isl_ast_node_for. Inserts an
865 induction variable for the new LOOP. New LOOP is attached to CFG
866 starting at ENTRY_EDGE. LOOP is inserted into the loop tree and
867 becomes the child loop of the OUTER_LOOP. NEWIVS_INDEX binds
868 isl's scattering name to the induction variable created for the
869 loop of STMT. The new induction variable is inserted in the NEWIVS
870 vector and is of type TYPE. */
871
872 struct loop *
873 translate_isl_ast_to_gimple::
874 graphite_create_new_loop (edge entry_edge, __isl_keep isl_ast_node *node_for,
875 loop_p outer, tree type, tree lb, tree ub,
876 ivs_params &ip)
877 {
878 isl_ast_expr *for_inc = isl_ast_node_for_get_inc (node_for);
879 tree stride = gcc_expression_from_isl_expression (type, for_inc, ip);
880
881 /* To fail code generation, we generate wrong code until we discard it. */
882 if (codegen_error)
883 stride = integer_zero_node;
884
885 tree ivvar = create_tmp_var (type, "graphite_IV");
886 tree iv, iv_after_increment;
887 loop_p loop = create_empty_loop_on_edge
888 (entry_edge, lb, stride, ub, ivvar, &iv, &iv_after_increment,
889 outer ? outer : entry_edge->src->loop_father);
890
891 isl_ast_expr *for_iterator = isl_ast_node_for_get_iterator (node_for);
892 isl_id *id = isl_ast_expr_get_id (for_iterator);
893 std::map<isl_id *, tree>::iterator res;
894 res = ip.find (id);
895 if (ip.count (id))
896 isl_id_free (res->first);
897 ip[id] = iv;
898 isl_ast_expr_free (for_iterator);
899 return loop;
900 }
901
902 /* Create the loop for a isl_ast_node_for.
903
904 - NEXT_E is the edge where new generated code should be attached. */
905
906 edge
907 translate_isl_ast_to_gimple::
908 translate_isl_ast_for_loop (loop_p context_loop,
909 __isl_keep isl_ast_node *node_for, edge next_e,
910 tree type, tree lb, tree ub,
911 ivs_params &ip)
912 {
913 gcc_assert (isl_ast_node_get_type (node_for) == isl_ast_node_for);
914 struct loop *loop = graphite_create_new_loop (next_e, node_for, context_loop,
915 type, lb, ub, ip);
916 edge last_e = single_exit (loop);
917 edge to_body = single_succ_edge (loop->header);
918 basic_block after = to_body->dest;
919
920 /* Translate the body of the loop. */
921 isl_ast_node *for_body = isl_ast_node_for_get_body (node_for);
922 next_e = translate_isl_ast (loop, for_body, to_body, ip);
923 isl_ast_node_free (for_body);
924
925 /* Early return if we failed to translate loop body. */
926 if (!next_e || codegen_error_p ())
927 return NULL;
928
929 if (next_e->dest != after)
930 redirect_edge_succ_nodup (next_e, after);
931 set_immediate_dominator (CDI_DOMINATORS, next_e->dest, next_e->src);
932
933 if (flag_loop_parallelize_all)
934 {
935 isl_id *id = isl_ast_node_get_annotation (node_for);
936 gcc_assert (id);
937 ast_build_info *for_info = (ast_build_info *) isl_id_get_user (id);
938 loop->can_be_parallel = for_info->is_parallelizable;
939 free (for_info);
940 isl_id_free (id);
941 }
942
943 return last_e;
944 }
945
946 /* We use this function to get the upper bound because of the form,
947 which is used by isl to represent loops:
948
949 for (iterator = init; cond; iterator += inc)
950
951 {
952
953 ...
954
955 }
956
957 The loop condition is an arbitrary expression, which contains the
958 current loop iterator.
959
960 (e.g. iterator + 3 < B && C > iterator + A)
961
962 We have to know the upper bound of the iterator to generate a loop
963 in Gimple form. It can be obtained from the special representation
964 of the loop condition, which is generated by isl,
965 if the ast_build_atomic_upper_bound option is set. In this case,
966 isl generates a loop condition that consists of the current loop
967 iterator, + an operator (< or <=) and an expression not involving
968 the iterator, which is processed and returned by this function.
969
970 (e.g iterator <= upper-bound-expression-without-iterator) */
971
972 static __isl_give isl_ast_expr *
973 get_upper_bound (__isl_keep isl_ast_node *node_for)
974 {
975 gcc_assert (isl_ast_node_get_type (node_for) == isl_ast_node_for);
976 isl_ast_expr *for_cond = isl_ast_node_for_get_cond (node_for);
977 gcc_assert (isl_ast_expr_get_type (for_cond) == isl_ast_expr_op);
978 isl_ast_expr *res;
979 switch (isl_ast_expr_get_op_type (for_cond))
980 {
981 case isl_ast_op_le:
982 res = isl_ast_expr_get_op_arg (for_cond, 1);
983 break;
984
985 case isl_ast_op_lt:
986 {
987 /* (iterator < ub) => (iterator <= ub - 1). */
988 isl_val *one =
989 isl_val_int_from_si (isl_ast_expr_get_ctx (for_cond), 1);
990 isl_ast_expr *ub = isl_ast_expr_get_op_arg (for_cond, 1);
991 res = isl_ast_expr_sub (ub, isl_ast_expr_from_val (one));
992 break;
993 }
994
995 default:
996 gcc_unreachable ();
997 }
998 isl_ast_expr_free (for_cond);
999 return res;
1000 }
1001
1002 /* All loops generated by create_empty_loop_on_edge have the form of
1003 a post-test loop:
1004
1005 do
1006
1007 {
1008 body of the loop;
1009 } while (lower bound < upper bound);
1010
1011 We create a new if region protecting the loop to be executed, if
1012 the execution count is zero (lower bound > upper bound). */
1013
1014 edge
1015 translate_isl_ast_to_gimple::
1016 graphite_create_new_loop_guard (edge entry_edge,
1017 __isl_keep isl_ast_node *node_for, tree *type,
1018 tree *lb, tree *ub, ivs_params &ip)
1019 {
1020 gcc_assert (isl_ast_node_get_type (node_for) == isl_ast_node_for);
1021 tree cond_expr;
1022 edge exit_edge;
1023
1024 *type =
1025 build_nonstandard_integer_type (graphite_expression_type_precision, 0);
1026 isl_ast_expr *for_init = isl_ast_node_for_get_init (node_for);
1027 *lb = gcc_expression_from_isl_expression (*type, for_init, ip);
1028 /* To fail code generation, we generate wrong code until we discard it. */
1029 if (codegen_error)
1030 *lb = integer_zero_node;
1031 isl_ast_expr *upper_bound = get_upper_bound (node_for);
1032 *ub = gcc_expression_from_isl_expression (*type, upper_bound, ip);
1033 /* To fail code generation, we generate wrong code until we discard it. */
1034 if (codegen_error)
1035 *ub = integer_zero_node;
1036
1037 /* When ub is simply a constant or a parameter, use lb <= ub. */
1038 if (TREE_CODE (*ub) == INTEGER_CST || TREE_CODE (*ub) == SSA_NAME)
1039 cond_expr = fold_build2 (LE_EXPR, boolean_type_node, *lb, *ub);
1040 else
1041 {
1042 tree one = (POINTER_TYPE_P (*type)
1043 ? convert_to_ptrofftype (integer_one_node)
1044 : fold_convert (*type, integer_one_node));
1045 /* Adding +1 and using LT_EXPR helps with loop latches that have a
1046 loop iteration count of "PARAMETER - 1". For PARAMETER == 0 this
1047 becomes 2^k-1 due to integer overflow, and the condition lb <= ub
1048 is true, even if we do not want this. However lb < ub + 1 is false,
1049 as expected. */
1050 tree ub_one = fold_build2 (POINTER_TYPE_P (*type) ? POINTER_PLUS_EXPR
1051 : PLUS_EXPR, *type, *ub, one);
1052
1053 cond_expr = fold_build2 (LT_EXPR, boolean_type_node, *lb, ub_one);
1054 }
1055
1056 if (integer_onep (cond_expr))
1057 exit_edge = entry_edge;
1058 else
1059 exit_edge = create_empty_if_region_on_edge (entry_edge, cond_expr);
1060
1061 return exit_edge;
1062 }
1063
1064 /* Translates an isl_ast_node_for to Gimple. */
1065
1066 edge
1067 translate_isl_ast_to_gimple::
1068 translate_isl_ast_node_for (loop_p context_loop, __isl_keep isl_ast_node *node,
1069 edge next_e, ivs_params &ip)
1070 {
1071 gcc_assert (isl_ast_node_get_type (node) == isl_ast_node_for);
1072 tree type, lb, ub;
1073 edge last_e = graphite_create_new_loop_guard (next_e, node, &type,
1074 &lb, &ub, ip);
1075
1076 if (last_e == next_e)
1077 {
1078 /* There was no guard generated. */
1079 last_e = single_succ_edge (split_edge (last_e));
1080
1081 translate_isl_ast_for_loop (context_loop, node, next_e,
1082 type, lb, ub, ip);
1083 return last_e;
1084 }
1085
1086 edge true_e = get_true_edge_from_guard_bb (next_e->dest);
1087 merge_points.safe_push (last_e);
1088
1089 last_e = single_succ_edge (split_edge (last_e));
1090 translate_isl_ast_for_loop (context_loop, node, true_e, type, lb, ub, ip);
1091
1092 return last_e;
1093 }
1094
1095 /* Inserts in iv_map a tuple (OLD_LOOP->num, NEW_NAME) for the induction
1096 variables of the loops around GBB in SESE.
1097
1098 FIXME: Instead of using a vec<tree> that maps each loop id to a possible
1099 chrec, we could consider using a map<int, tree> that maps loop ids to the
1100 corresponding tree expressions. */
1101
1102 void
1103 translate_isl_ast_to_gimple::
1104 build_iv_mapping (vec<tree> iv_map, gimple_poly_bb_p gbb,
1105 __isl_keep isl_ast_expr *user_expr, ivs_params &ip,
1106 sese_l &region)
1107 {
1108 gcc_assert (isl_ast_expr_get_type (user_expr) == isl_ast_expr_op &&
1109 isl_ast_expr_get_op_type (user_expr) == isl_ast_op_call);
1110 int i;
1111 isl_ast_expr *arg_expr;
1112 for (i = 1; i < isl_ast_expr_get_op_n_arg (user_expr); i++)
1113 {
1114 arg_expr = isl_ast_expr_get_op_arg (user_expr, i);
1115 tree type =
1116 build_nonstandard_integer_type (graphite_expression_type_precision, 0);
1117 tree t = gcc_expression_from_isl_expression (type, arg_expr, ip);
1118 /* To fail code generation, we generate wrong code until we discard it. */
1119 if (codegen_error)
1120 t = integer_zero_node;
1121
1122 loop_p old_loop = gbb_loop_at_index (gbb, region, i - 1);
1123 iv_map[old_loop->num] = t;
1124 }
1125 }
1126
1127 /* Translates an isl_ast_node_user to Gimple.
1128
1129 FIXME: We should remove iv_map.create (loop->num + 1), if it is possible. */
1130
1131 edge
1132 translate_isl_ast_to_gimple::
1133 translate_isl_ast_node_user (__isl_keep isl_ast_node *node,
1134 edge next_e, ivs_params &ip)
1135 {
1136 gcc_assert (isl_ast_node_get_type (node) == isl_ast_node_user);
1137
1138 isl_ast_expr *user_expr = isl_ast_node_user_get_expr (node);
1139 isl_ast_expr *name_expr = isl_ast_expr_get_op_arg (user_expr, 0);
1140 gcc_assert (isl_ast_expr_get_type (name_expr) == isl_ast_expr_id);
1141
1142 isl_id *name_id = isl_ast_expr_get_id (name_expr);
1143 poly_bb_p pbb = (poly_bb_p) isl_id_get_user (name_id);
1144 gcc_assert (pbb);
1145
1146 gimple_poly_bb_p gbb = PBB_BLACK_BOX (pbb);
1147
1148 isl_ast_expr_free (name_expr);
1149 isl_id_free (name_id);
1150
1151 gcc_assert (GBB_BB (gbb) != ENTRY_BLOCK_PTR_FOR_FN (cfun) &&
1152 "The entry block should not even appear within a scop");
1153
1154 const int nb_loops = number_of_loops (cfun);
1155 vec<tree> iv_map;
1156 iv_map.create (nb_loops);
1157 iv_map.safe_grow_cleared (nb_loops);
1158
1159 build_iv_mapping (iv_map, gbb, user_expr, ip, pbb->scop->scop_info->region);
1160 isl_ast_expr_free (user_expr);
1161
1162 basic_block old_bb = GBB_BB (gbb);
1163 if (dump_file)
1164 {
1165 fprintf (dump_file,
1166 "[codegen] copying from bb_%d on edge (bb_%d, bb_%d)\n",
1167 old_bb->index, next_e->src->index, next_e->dest->index);
1168 print_loops_bb (dump_file, GBB_BB (gbb), 0, 3);
1169
1170 }
1171
1172 next_e = copy_bb_and_scalar_dependences (old_bb, next_e, iv_map);
1173
1174 iv_map.release ();
1175
1176 if (codegen_error_p ())
1177 return NULL;
1178
1179 if (dump_file)
1180 {
1181 fprintf (dump_file, "[codegen] (after copy) new basic block\n");
1182 print_loops_bb (dump_file, next_e->src, 0, 3);
1183 }
1184
1185 return next_e;
1186 }
1187
1188 /* Translates an isl_ast_node_block to Gimple. */
1189
1190 edge
1191 translate_isl_ast_to_gimple::
1192 translate_isl_ast_node_block (loop_p context_loop,
1193 __isl_keep isl_ast_node *node,
1194 edge next_e, ivs_params &ip)
1195 {
1196 gcc_assert (isl_ast_node_get_type (node) == isl_ast_node_block);
1197 isl_ast_node_list *node_list = isl_ast_node_block_get_children (node);
1198 int i;
1199 for (i = 0; i < isl_ast_node_list_n_ast_node (node_list); i++)
1200 {
1201 isl_ast_node *tmp_node = isl_ast_node_list_get_ast_node (node_list, i);
1202 next_e = translate_isl_ast (context_loop, tmp_node, next_e, ip);
1203 isl_ast_node_free (tmp_node);
1204 }
1205 isl_ast_node_list_free (node_list);
1206 return next_e;
1207 }
1208
1209 /* Creates a new if region corresponding to isl's cond. */
1210
1211 edge
1212 translate_isl_ast_to_gimple::
1213 graphite_create_new_guard (edge entry_edge, __isl_take isl_ast_expr *if_cond,
1214 ivs_params &ip)
1215 {
1216 tree type =
1217 build_nonstandard_integer_type (graphite_expression_type_precision, 0);
1218 tree cond_expr = gcc_expression_from_isl_expression (type, if_cond, ip);
1219 /* To fail code generation, we generate wrong code until we discard it. */
1220 if (codegen_error)
1221 cond_expr = integer_zero_node;
1222
1223 edge exit_edge = create_empty_if_region_on_edge (entry_edge, cond_expr);
1224 return exit_edge;
1225 }
1226
1227 /* Translates an isl_ast_node_if to Gimple. */
1228
1229 edge
1230 translate_isl_ast_to_gimple::
1231 translate_isl_ast_node_if (loop_p context_loop,
1232 __isl_keep isl_ast_node *node,
1233 edge next_e, ivs_params &ip)
1234 {
1235 gcc_assert (isl_ast_node_get_type (node) == isl_ast_node_if);
1236 isl_ast_expr *if_cond = isl_ast_node_if_get_cond (node);
1237 edge last_e = graphite_create_new_guard (next_e, if_cond, ip);
1238 edge true_e = get_true_edge_from_guard_bb (next_e->dest);
1239 merge_points.safe_push (last_e);
1240
1241 isl_ast_node *then_node = isl_ast_node_if_get_then (node);
1242 translate_isl_ast (context_loop, then_node, true_e, ip);
1243 isl_ast_node_free (then_node);
1244
1245 edge false_e = get_false_edge_from_guard_bb (next_e->dest);
1246 isl_ast_node *else_node = isl_ast_node_if_get_else (node);
1247 if (isl_ast_node_get_type (else_node) != isl_ast_node_error)
1248 translate_isl_ast (context_loop, else_node, false_e, ip);
1249
1250 isl_ast_node_free (else_node);
1251 return last_e;
1252 }
1253
1254 /* Translates an isl AST node NODE to GCC representation in the
1255 context of a SESE. */
1256
1257 edge
1258 translate_isl_ast_to_gimple::translate_isl_ast (loop_p context_loop,
1259 __isl_keep isl_ast_node *node,
1260 edge next_e, ivs_params &ip)
1261 {
1262 if (codegen_error_p ())
1263 return NULL;
1264
1265 switch (isl_ast_node_get_type (node))
1266 {
1267 case isl_ast_node_error:
1268 gcc_unreachable ();
1269
1270 case isl_ast_node_for:
1271 return translate_isl_ast_node_for (context_loop, node,
1272 next_e, ip);
1273
1274 case isl_ast_node_if:
1275 return translate_isl_ast_node_if (context_loop, node,
1276 next_e, ip);
1277
1278 case isl_ast_node_user:
1279 return translate_isl_ast_node_user (node, next_e, ip);
1280
1281 case isl_ast_node_block:
1282 return translate_isl_ast_node_block (context_loop, node,
1283 next_e, ip);
1284
1285 #ifdef HAVE_ISL_OPTIONS_SET_SCHEDULE_SERIALIZE_SCCS
1286 case isl_ast_node_mark:
1287 {
1288 isl_ast_node *n = isl_ast_node_mark_get_node (node);
1289 edge e = translate_isl_ast (context_loop, n, next_e, ip);
1290 isl_ast_node_free (n);
1291 return e;
1292 }
1293 #endif
1294
1295 default:
1296 gcc_unreachable ();
1297 }
1298 }
1299
1300 /* Return true when BB contains loop close phi nodes. A loop close phi node is
1301 at the exit of loop which takes one argument that is the last value of the
1302 variable being used out of the loop. */
1303
1304 static bool
1305 bb_contains_loop_close_phi_nodes (basic_block bb)
1306 {
1307 return single_pred_p (bb)
1308 && bb->loop_father != single_pred_edge (bb)->src->loop_father;
1309 }
1310
1311 /* Return true when BB contains loop phi nodes. A loop phi node is the loop
1312 header containing phi nodes which has one init-edge and one back-edge. */
1313
1314 static bool
1315 bb_contains_loop_phi_nodes (basic_block bb)
1316 {
1317 gcc_assert (EDGE_COUNT (bb->preds) <= 2);
1318
1319 if (bb->preds->length () == 1)
1320 return false;
1321
1322 unsigned depth = loop_depth (bb->loop_father);
1323
1324 edge preds[2] = { (*bb->preds)[0], (*bb->preds)[1] };
1325
1326 if (depth > loop_depth (preds[0]->src->loop_father)
1327 || depth > loop_depth (preds[1]->src->loop_father))
1328 return true;
1329
1330 /* When one of the edges correspond to the same loop father and other
1331 doesn't. */
1332 if (bb->loop_father != preds[0]->src->loop_father
1333 && bb->loop_father == preds[1]->src->loop_father)
1334 return true;
1335
1336 if (bb->loop_father != preds[1]->src->loop_father
1337 && bb->loop_father == preds[0]->src->loop_father)
1338 return true;
1339
1340 return false;
1341 }
1342
1343 /* Check if USE is defined in a basic block from where the definition of USE can
1344 propagate from all the paths. FIXME: Verify checks for virtual operands. */
1345
1346 static bool
1347 is_loop_closed_ssa_use (basic_block bb, tree use)
1348 {
1349 if (TREE_CODE (use) != SSA_NAME || virtual_operand_p (use))
1350 return true;
1351
1352 /* For close-phi nodes def always comes from a loop which has a back-edge. */
1353 if (bb_contains_loop_close_phi_nodes (bb))
1354 return true;
1355
1356 gimple *def = SSA_NAME_DEF_STMT (use);
1357 basic_block def_bb = gimple_bb (def);
1358 return (!def_bb
1359 || flow_bb_inside_loop_p (def_bb->loop_father, bb));
1360 }
1361
1362 /* Return the number of phi nodes in BB. */
1363
1364 static int
1365 number_of_phi_nodes (basic_block bb)
1366 {
1367 int num_phis = 0;
1368 for (gphi_iterator psi = gsi_start_phis (bb); !gsi_end_p (psi);
1369 gsi_next (&psi))
1370 num_phis++;
1371 return num_phis;
1372 }
1373
1374 /* Returns true if BB uses name in one of its PHIs. */
1375
1376 static bool
1377 phi_uses_name (basic_block bb, tree name)
1378 {
1379 for (gphi_iterator psi = gsi_start_phis (bb); !gsi_end_p (psi);
1380 gsi_next (&psi))
1381 {
1382 gphi *phi = psi.phi ();
1383 for (unsigned i = 0; i < gimple_phi_num_args (phi); i++)
1384 {
1385 tree use_arg = gimple_phi_arg_def (phi, i);
1386 if (use_arg == name)
1387 return true;
1388 }
1389 }
1390 return false;
1391 }
1392
1393 /* Return true if RENAME (defined in BB) is a valid use in NEW_BB. The
1394 definition should flow into use, and the use should respect the loop-closed
1395 SSA form. */
1396
1397 bool
1398 translate_isl_ast_to_gimple::
1399 is_valid_rename (tree rename, basic_block def_bb, basic_block use_bb,
1400 phi_node_kind phi_kind, tree old_name, basic_block old_bb) const
1401 {
1402 /* The def of the rename must either dominate the uses or come from a
1403 back-edge. Also the def must respect the loop closed ssa form. */
1404 if (!is_loop_closed_ssa_use (use_bb, rename))
1405 {
1406 if (dump_file)
1407 {
1408 fprintf (dump_file, "[codegen] rename not in loop closed ssa: ");
1409 print_generic_expr (dump_file, rename, 0);
1410 fprintf (dump_file, "\n");
1411 }
1412 return false;
1413 }
1414
1415 if (dominated_by_p (CDI_DOMINATORS, use_bb, def_bb))
1416 return true;
1417
1418 if (bb_contains_loop_phi_nodes (use_bb) && phi_kind == loop_phi)
1419 {
1420 /* The loop-header dominates the loop-body. */
1421 if (!dominated_by_p (CDI_DOMINATORS, def_bb, use_bb))
1422 return false;
1423
1424 /* RENAME would be used in loop-phi. */
1425 gcc_assert (number_of_phi_nodes (use_bb));
1426
1427 /* For definitions coming from back edges, we should check that
1428 old_name is used in a loop PHI node.
1429 FIXME: Verify if this is true. */
1430 if (phi_uses_name (old_bb, old_name))
1431 return true;
1432 }
1433 return false;
1434 }
1435
1436 /* Returns the expression associated to OLD_NAME (which is used in OLD_BB), in
1437 NEW_BB from RENAME_MAP. PHI_KIND determines the kind of phi node. */
1438
1439 tree
1440 translate_isl_ast_to_gimple::get_rename (basic_block new_bb,
1441 tree old_name,
1442 basic_block old_bb,
1443 phi_node_kind phi_kind) const
1444 {
1445 gcc_assert (TREE_CODE (old_name) == SSA_NAME);
1446 vec <tree> *renames = region->rename_map->get (old_name);
1447
1448 if (!renames || renames->is_empty ())
1449 return NULL_TREE;
1450
1451 if (1 == renames->length ())
1452 {
1453 tree rename = (*renames)[0];
1454 if (TREE_CODE (rename) == SSA_NAME)
1455 {
1456 basic_block bb = gimple_bb (SSA_NAME_DEF_STMT (rename));
1457 if (is_valid_rename (rename, bb, new_bb, phi_kind, old_name, old_bb)
1458 && (phi_kind == close_phi
1459 || flow_bb_inside_loop_p (bb->loop_father, new_bb)))
1460 return rename;
1461 return NULL_TREE;
1462 }
1463
1464 if (is_constant (rename))
1465 return rename;
1466
1467 return NULL_TREE;
1468 }
1469
1470 /* More than one renames corresponding to the old_name. Find the rename for
1471 which the definition flows into usage at new_bb. */
1472 int i;
1473 tree t1 = NULL_TREE, t2;
1474 basic_block t1_bb = NULL;
1475 FOR_EACH_VEC_ELT (*renames, i, t2)
1476 {
1477 basic_block t2_bb = gimple_bb (SSA_NAME_DEF_STMT (t2));
1478
1479 /* Defined in the same basic block as used. */
1480 if (t2_bb == new_bb)
1481 return t2;
1482
1483 /* NEW_BB and T2_BB are in two unrelated if-clauses. */
1484 if (!dominated_by_p (CDI_DOMINATORS, new_bb, t2_bb))
1485 continue;
1486
1487 if (!flow_bb_inside_loop_p (t2_bb->loop_father, new_bb))
1488 continue;
1489
1490 /* Compute the nearest dominator. */
1491 if (!t1 || dominated_by_p (CDI_DOMINATORS, t2_bb, t1_bb))
1492 {
1493 t1_bb = t2_bb;
1494 t1 = t2;
1495 }
1496 }
1497
1498 return t1;
1499 }
1500
1501 /* Register in RENAME_MAP the rename tuple (OLD_NAME, EXPR).
1502 When OLD_NAME and EXPR are the same we assert. */
1503
1504 void
1505 translate_isl_ast_to_gimple::set_rename (tree old_name, tree expr)
1506 {
1507 if (dump_file)
1508 {
1509 fprintf (dump_file, "[codegen] setting rename: old_name = ");
1510 print_generic_expr (dump_file, old_name, 0);
1511 fprintf (dump_file, ", new_name = ");
1512 print_generic_expr (dump_file, expr, 0);
1513 fprintf (dump_file, "\n");
1514 }
1515
1516 if (old_name == expr)
1517 return;
1518
1519 vec <tree> *renames = region->rename_map->get (old_name);
1520
1521 if (renames)
1522 renames->safe_push (expr);
1523 else
1524 {
1525 vec<tree> r;
1526 r.create (2);
1527 r.safe_push (expr);
1528 region->rename_map->put (old_name, r);
1529 }
1530
1531 tree t;
1532 int i;
1533 /* For a parameter of a scop we don't want to rename it. */
1534 FOR_EACH_VEC_ELT (region->params, i, t)
1535 if (old_name == t)
1536 region->parameter_rename_map->put(old_name, expr);
1537 }
1538
1539 /* Return an iterator to the instructions comes last in the execution order.
1540 Either GSI1 and GSI2 should belong to the same basic block or one of their
1541 respective basic blocks should dominate the other. */
1542
1543 gimple_stmt_iterator
1544 later_of_the_two (gimple_stmt_iterator gsi1, gimple_stmt_iterator gsi2)
1545 {
1546 basic_block bb1 = gsi_bb (gsi1);
1547 basic_block bb2 = gsi_bb (gsi2);
1548
1549 /* Find the iterator which is the latest. */
1550 if (bb1 == bb2)
1551 {
1552 /* For empty basic blocks gsis point to the end of the sequence. Since
1553 there is no operator== defined for gimple_stmt_iterator and for gsis
1554 not pointing to a valid statement gsi_next would assert. */
1555 gimple_stmt_iterator gsi = gsi1;
1556 do {
1557 if (gsi_stmt (gsi) == gsi_stmt (gsi2))
1558 return gsi2;
1559 gsi_next (&gsi);
1560 } while (!gsi_end_p (gsi));
1561
1562 return gsi1;
1563 }
1564
1565 /* Find the basic block closest to the basic block which defines stmt. */
1566 if (dominated_by_p (CDI_DOMINATORS, bb1, bb2))
1567 return gsi1;
1568
1569 gcc_assert (dominated_by_p (CDI_DOMINATORS, bb2, bb1));
1570 return gsi2;
1571 }
1572
1573 /* Insert each statement from SEQ at its earliest insertion p. */
1574
1575 void
1576 translate_isl_ast_to_gimple::gsi_insert_earliest (gimple_seq seq)
1577 {
1578 update_modified_stmts (seq);
1579 sese_l &codegen_region = region->if_region->true_region->region;
1580 basic_block begin_bb = get_entry_bb (codegen_region);
1581
1582 /* Inserting the gimple statements in a vector because gimple_seq behave
1583 in strage ways when inserting the stmts from it into different basic
1584 blocks one at a time. */
1585 auto_vec<gimple *, 3> stmts;
1586 for (gimple_stmt_iterator gsi = gsi_start (seq); !gsi_end_p (gsi);
1587 gsi_next (&gsi))
1588 stmts.safe_push (gsi_stmt (gsi));
1589
1590 int i;
1591 gimple *use_stmt;
1592 FOR_EACH_VEC_ELT (stmts, i, use_stmt)
1593 {
1594 gcc_assert (gimple_code (use_stmt) != GIMPLE_PHI);
1595 gimple_stmt_iterator gsi_def_stmt = gsi_start_bb_nondebug (begin_bb);
1596
1597 use_operand_p use_p;
1598 ssa_op_iter op_iter;
1599 FOR_EACH_SSA_USE_OPERAND (use_p, use_stmt, op_iter, SSA_OP_USE)
1600 {
1601 /* Iterator to the current def of use_p. For function parameters or
1602 anything where def is not found, insert at the beginning of the
1603 generated region. */
1604 gimple_stmt_iterator gsi_stmt = gsi_def_stmt;
1605
1606 tree op = USE_FROM_PTR (use_p);
1607 gimple *stmt = SSA_NAME_DEF_STMT (op);
1608 if (stmt && (gimple_code (stmt) != GIMPLE_NOP))
1609 gsi_stmt = gsi_for_stmt (stmt);
1610
1611 /* For region parameters, insert at the beginning of the generated
1612 region. */
1613 if (!bb_in_sese_p (gsi_bb (gsi_stmt), codegen_region))
1614 gsi_stmt = gsi_def_stmt;
1615
1616 gsi_def_stmt = later_of_the_two (gsi_stmt, gsi_def_stmt);
1617 }
1618
1619 if (!gsi_stmt (gsi_def_stmt))
1620 {
1621 gimple_stmt_iterator gsi = gsi_after_labels (gsi_bb (gsi_def_stmt));
1622 gsi_insert_before (&gsi, use_stmt, GSI_NEW_STMT);
1623 }
1624 else if (gimple_code (gsi_stmt (gsi_def_stmt)) == GIMPLE_PHI)
1625 {
1626 gimple_stmt_iterator bsi
1627 = gsi_start_bb_nondebug (gsi_bb (gsi_def_stmt));
1628 /* Insert right after the PHI statements. */
1629 gsi_insert_before (&bsi, use_stmt, GSI_NEW_STMT);
1630 }
1631 else
1632 gsi_insert_after (&gsi_def_stmt, use_stmt, GSI_NEW_STMT);
1633
1634 if (dump_file)
1635 {
1636 fprintf (dump_file, "[codegen] inserting statement: ");
1637 print_gimple_stmt (dump_file, use_stmt, 0, TDF_VOPS | TDF_MEMSYMS);
1638 print_loops_bb (dump_file, gimple_bb (use_stmt), 0, 3);
1639 }
1640 }
1641 }
1642
1643 /* Collect all the operands of NEW_EXPR by recursively visiting each
1644 operand. */
1645
1646 void
1647 translate_isl_ast_to_gimple::collect_all_ssa_names (tree new_expr,
1648 vec<tree> *vec_ssa)
1649 {
1650
1651 /* Rename all uses in new_expr. */
1652 if (TREE_CODE (new_expr) == SSA_NAME)
1653 {
1654 vec_ssa->safe_push (new_expr);
1655 return;
1656 }
1657
1658 /* Iterate over SSA_NAMES in NEW_EXPR. */
1659 for (int i = 0; i < (TREE_CODE_LENGTH (TREE_CODE (new_expr))); i++)
1660 {
1661 tree op = TREE_OPERAND (new_expr, i);
1662 collect_all_ssa_names (op, vec_ssa);
1663 }
1664 }
1665
1666 /* This is abridged version of the function copied from:
1667 tree.c:substitute_in_expr (tree exp, tree f, tree r). */
1668
1669 static tree
1670 substitute_ssa_name (tree exp, tree f, tree r)
1671 {
1672 enum tree_code code = TREE_CODE (exp);
1673 tree op0, op1, op2, op3;
1674 tree new_tree;
1675
1676 /* We handle TREE_LIST and COMPONENT_REF separately. */
1677 if (code == TREE_LIST)
1678 {
1679 op0 = substitute_ssa_name (TREE_CHAIN (exp), f, r);
1680 op1 = substitute_ssa_name (TREE_VALUE (exp), f, r);
1681 if (op0 == TREE_CHAIN (exp) && op1 == TREE_VALUE (exp))
1682 return exp;
1683
1684 return tree_cons (TREE_PURPOSE (exp), op1, op0);
1685 }
1686 else if (code == COMPONENT_REF)
1687 {
1688 tree inner;
1689
1690 /* If this expression is getting a value from a PLACEHOLDER_EXPR
1691 and it is the right field, replace it with R. */
1692 for (inner = TREE_OPERAND (exp, 0);
1693 REFERENCE_CLASS_P (inner);
1694 inner = TREE_OPERAND (inner, 0))
1695 ;
1696
1697 /* The field. */
1698 op1 = TREE_OPERAND (exp, 1);
1699
1700 if (TREE_CODE (inner) == PLACEHOLDER_EXPR && op1 == f)
1701 return r;
1702
1703 /* If this expression hasn't been completed let, leave it alone. */
1704 if (TREE_CODE (inner) == PLACEHOLDER_EXPR && !TREE_TYPE (inner))
1705 return exp;
1706
1707 op0 = substitute_ssa_name (TREE_OPERAND (exp, 0), f, r);
1708 if (op0 == TREE_OPERAND (exp, 0))
1709 return exp;
1710
1711 new_tree
1712 = fold_build3 (COMPONENT_REF, TREE_TYPE (exp), op0, op1, NULL_TREE);
1713 }
1714 else
1715 switch (TREE_CODE_CLASS (code))
1716 {
1717 case tcc_constant:
1718 return exp;
1719
1720 case tcc_declaration:
1721 if (exp == f)
1722 return r;
1723 else
1724 return exp;
1725
1726 case tcc_expression:
1727 if (exp == f)
1728 return r;
1729
1730 /* Fall through... */
1731
1732 case tcc_exceptional:
1733 case tcc_unary:
1734 case tcc_binary:
1735 case tcc_comparison:
1736 case tcc_reference:
1737 switch (TREE_CODE_LENGTH (code))
1738 {
1739 case 0:
1740 if (exp == f)
1741 return r;
1742 return exp;
1743
1744 case 1:
1745 op0 = substitute_ssa_name (TREE_OPERAND (exp, 0), f, r);
1746 if (op0 == TREE_OPERAND (exp, 0))
1747 return exp;
1748
1749 new_tree = fold_build1 (code, TREE_TYPE (exp), op0);
1750 break;
1751
1752 case 2:
1753 op0 = substitute_ssa_name (TREE_OPERAND (exp, 0), f, r);
1754 op1 = substitute_ssa_name (TREE_OPERAND (exp, 1), f, r);
1755
1756 if (op0 == TREE_OPERAND (exp, 0) && op1 == TREE_OPERAND (exp, 1))
1757 return exp;
1758
1759 new_tree = fold_build2 (code, TREE_TYPE (exp), op0, op1);
1760 break;
1761
1762 case 3:
1763 op0 = substitute_ssa_name (TREE_OPERAND (exp, 0), f, r);
1764 op1 = substitute_ssa_name (TREE_OPERAND (exp, 1), f, r);
1765 op2 = substitute_ssa_name (TREE_OPERAND (exp, 2), f, r);
1766
1767 if (op0 == TREE_OPERAND (exp, 0) && op1 == TREE_OPERAND (exp, 1)
1768 && op2 == TREE_OPERAND (exp, 2))
1769 return exp;
1770
1771 new_tree = fold_build3 (code, TREE_TYPE (exp), op0, op1, op2);
1772 break;
1773
1774 case 4:
1775 op0 = substitute_ssa_name (TREE_OPERAND (exp, 0), f, r);
1776 op1 = substitute_ssa_name (TREE_OPERAND (exp, 1), f, r);
1777 op2 = substitute_ssa_name (TREE_OPERAND (exp, 2), f, r);
1778 op3 = substitute_ssa_name (TREE_OPERAND (exp, 3), f, r);
1779
1780 if (op0 == TREE_OPERAND (exp, 0) && op1 == TREE_OPERAND (exp, 1)
1781 && op2 == TREE_OPERAND (exp, 2)
1782 && op3 == TREE_OPERAND (exp, 3))
1783 return exp;
1784
1785 new_tree
1786 = fold (build4 (code, TREE_TYPE (exp), op0, op1, op2, op3));
1787 break;
1788
1789 default:
1790 gcc_unreachable ();
1791 }
1792 break;
1793
1794 case tcc_vl_exp:
1795 default:
1796 gcc_unreachable ();
1797 }
1798
1799 TREE_READONLY (new_tree) |= TREE_READONLY (exp);
1800
1801 if (code == INDIRECT_REF || code == ARRAY_REF || code == ARRAY_RANGE_REF)
1802 TREE_THIS_NOTRAP (new_tree) |= TREE_THIS_NOTRAP (exp);
1803
1804 return new_tree;
1805 }
1806
1807 /* Rename all the operands of NEW_EXPR by recursively visiting each operand. */
1808
1809 tree
1810 translate_isl_ast_to_gimple::rename_all_uses (tree new_expr, basic_block new_bb,
1811 basic_block old_bb)
1812 {
1813 auto_vec<tree, 2> ssa_names;
1814 collect_all_ssa_names (new_expr, &ssa_names);
1815 tree t;
1816 int i;
1817 FOR_EACH_VEC_ELT (ssa_names, i, t)
1818 if (tree r = get_rename (new_bb, t, old_bb, unknown_phi))
1819 new_expr = substitute_ssa_name (new_expr, t, r);
1820
1821 return new_expr;
1822 }
1823
1824 /* For ops which are scev_analyzeable, we can regenerate a new name from its
1825 scalar evolution around LOOP. */
1826
1827 tree
1828 translate_isl_ast_to_gimple::
1829 get_rename_from_scev (tree old_name, gimple_seq *stmts, loop_p loop,
1830 basic_block new_bb, basic_block old_bb,
1831 vec<tree> iv_map)
1832 {
1833 tree scev = scalar_evolution_in_region (region->region, loop, old_name);
1834
1835 /* At this point we should know the exact scev for each
1836 scalar SSA_NAME used in the scop. */
1837 gcc_assert (!chrec_contains_undetermined (scev));
1838
1839 tree new_expr = chrec_apply_map (scev, iv_map);
1840
1841 /* The apply should produce an expression tree containing
1842 the uses of the new induction variables. We should be
1843 able to use new_expr instead of the old_name in the newly
1844 generated loop nest. */
1845 gcc_assert (!chrec_contains_undetermined (new_expr)
1846 && !tree_contains_chrecs (new_expr, NULL));
1847
1848 if (TREE_CODE (new_expr) == SSA_NAME)
1849 {
1850 basic_block bb = gimple_bb (SSA_NAME_DEF_STMT (new_expr));
1851 if (bb && !dominated_by_p (CDI_DOMINATORS, new_bb, bb))
1852 {
1853 codegen_error = true;
1854 return build_zero_cst (TREE_TYPE (old_name));
1855 }
1856 }
1857
1858 new_expr = rename_all_uses (new_expr, new_bb, old_bb);
1859
1860 /* We check all the operands and all of them should dominate the use at
1861 new_expr. */
1862 auto_vec <tree, 2> new_ssa_names;
1863 collect_all_ssa_names (new_expr, &new_ssa_names);
1864 int i;
1865 tree new_ssa_name;
1866 FOR_EACH_VEC_ELT (new_ssa_names, i, new_ssa_name)
1867 {
1868 if (TREE_CODE (new_ssa_name) == SSA_NAME)
1869 {
1870 basic_block bb = gimple_bb (SSA_NAME_DEF_STMT (new_ssa_name));
1871 if (bb && !dominated_by_p (CDI_DOMINATORS, new_bb, bb))
1872 {
1873 codegen_error = true;
1874 return build_zero_cst (TREE_TYPE (old_name));
1875 }
1876 }
1877 }
1878
1879 /* Replace the old_name with the new_expr. */
1880 return force_gimple_operand (unshare_expr (new_expr), stmts,
1881 true, NULL_TREE);
1882 }
1883
1884 /* Renames the scalar uses of the statement COPY, using the
1885 substitution map RENAME_MAP, inserting the gimplification code at
1886 GSI_TGT, for the translation REGION, with the original copied
1887 statement in LOOP, and using the induction variable renaming map
1888 IV_MAP. Returns true when something has been renamed. */
1889
1890 bool
1891 translate_isl_ast_to_gimple::rename_uses (gimple *copy,
1892 gimple_stmt_iterator *gsi_tgt,
1893 basic_block old_bb,
1894 loop_p loop, vec<tree> iv_map)
1895 {
1896 bool changed = false;
1897
1898 if (is_gimple_debug (copy))
1899 {
1900 if (gimple_debug_bind_p (copy))
1901 gimple_debug_bind_reset_value (copy);
1902 else if (gimple_debug_source_bind_p (copy))
1903 return false;
1904 else
1905 gcc_unreachable ();
1906
1907 return false;
1908 }
1909
1910 if (dump_file)
1911 {
1912 fprintf (dump_file, "[codegen] renaming uses of stmt: ");
1913 print_gimple_stmt (dump_file, copy, 0, 0);
1914 }
1915
1916 use_operand_p use_p;
1917 ssa_op_iter op_iter;
1918 FOR_EACH_SSA_USE_OPERAND (use_p, copy, op_iter, SSA_OP_USE)
1919 {
1920 tree old_name = USE_FROM_PTR (use_p);
1921
1922 if (dump_file)
1923 {
1924 fprintf (dump_file, "[codegen] renaming old_name = ");
1925 print_generic_expr (dump_file, old_name, 0);
1926 fprintf (dump_file, "\n");
1927 }
1928
1929 if (TREE_CODE (old_name) != SSA_NAME
1930 || SSA_NAME_IS_DEFAULT_DEF (old_name))
1931 continue;
1932
1933 changed = true;
1934 tree new_expr = get_rename (gsi_tgt->bb, old_name,
1935 old_bb, unknown_phi);
1936
1937 if (new_expr)
1938 {
1939 tree type_old_name = TREE_TYPE (old_name);
1940 tree type_new_expr = TREE_TYPE (new_expr);
1941
1942 if (dump_file)
1943 {
1944 fprintf (dump_file, "[codegen] from rename_map: new_name = ");
1945 print_generic_expr (dump_file, new_expr, 0);
1946 fprintf (dump_file, "\n");
1947 }
1948
1949 if (type_old_name != type_new_expr
1950 || TREE_CODE (new_expr) != SSA_NAME)
1951 {
1952 tree var = create_tmp_var (type_old_name, "var");
1953
1954 if (!useless_type_conversion_p (type_old_name, type_new_expr))
1955 new_expr = fold_convert (type_old_name, new_expr);
1956
1957 gimple_seq stmts;
1958 new_expr = force_gimple_operand (new_expr, &stmts, true, var);
1959 gsi_insert_earliest (stmts);
1960 }
1961
1962 replace_exp (use_p, new_expr);
1963 continue;
1964 }
1965
1966 gimple_seq stmts;
1967 new_expr = get_rename_from_scev (old_name, &stmts, loop, gimple_bb (copy),
1968 old_bb, iv_map);
1969 if (!new_expr || codegen_error_p ())
1970 return false;
1971
1972 if (dump_file)
1973 {
1974 fprintf (dump_file, "[codegen] not in rename map, scev: ");
1975 print_generic_expr (dump_file, new_expr, 0);
1976 fprintf (dump_file, "\n");
1977 }
1978
1979 gsi_insert_earliest (stmts);
1980 replace_exp (use_p, new_expr);
1981
1982 if (TREE_CODE (new_expr) == INTEGER_CST
1983 && is_gimple_assign (copy))
1984 {
1985 tree rhs = gimple_assign_rhs1 (copy);
1986
1987 if (TREE_CODE (rhs) == ADDR_EXPR)
1988 recompute_tree_invariant_for_addr_expr (rhs);
1989 }
1990
1991 set_rename (old_name, new_expr);
1992 }
1993
1994 return changed;
1995 }
1996
1997 /* Returns a basic block that could correspond to where a constant was defined
1998 in the original code. In the original code OLD_BB had the definition, we
1999 need to find which basic block out of the copies of old_bb, in the new
2000 region, should a definition correspond to if it has to reach BB. */
2001
2002 basic_block
2003 translate_isl_ast_to_gimple::get_def_bb_for_const (basic_block bb,
2004 basic_block old_bb) const
2005 {
2006 vec <basic_block> *bbs = region->copied_bb_map->get (old_bb);
2007
2008 if (!bbs || bbs->is_empty ())
2009 return NULL;
2010
2011 if (1 == bbs->length ())
2012 return (*bbs)[0];
2013
2014 int i;
2015 basic_block b1 = NULL, b2;
2016 FOR_EACH_VEC_ELT (*bbs, i, b2)
2017 {
2018 if (b2 == bb)
2019 return bb;
2020
2021 /* BB and B2 are in two unrelated if-clauses. */
2022 if (!dominated_by_p (CDI_DOMINATORS, bb, b2))
2023 continue;
2024
2025 /* Compute the nearest dominator. */
2026 if (!b1 || dominated_by_p (CDI_DOMINATORS, b2, b1))
2027 b1 = b2;
2028 }
2029
2030 gcc_assert (b1);
2031 return b1;
2032 }
2033
2034 /* Get the new name of OP (from OLD_BB) to be used in NEW_BB. PHI_KIND
2035 determines the kind of phi node. */
2036
2037 tree
2038 translate_isl_ast_to_gimple::
2039 get_new_name (basic_block new_bb, tree op,
2040 basic_block old_bb, phi_node_kind phi_kind) const
2041 {
2042 /* For constants the names are the same. */
2043 if (is_constant (op))
2044 return op;
2045
2046 return get_rename (new_bb, op, old_bb, phi_kind);
2047 }
2048
2049 /* Return a debug location for OP. */
2050
2051 static location_t
2052 get_loc (tree op)
2053 {
2054 location_t loc = UNKNOWN_LOCATION;
2055
2056 if (TREE_CODE (op) == SSA_NAME)
2057 loc = gimple_location (SSA_NAME_DEF_STMT (op));
2058 return loc;
2059 }
2060
2061 /* Returns the incoming edges of basic_block BB in the pair. The first edge is
2062 the init edge (from outside the loop) and the second one is the back edge
2063 from the same loop. */
2064
2065 std::pair<edge, edge>
2066 get_edges (basic_block bb)
2067 {
2068 std::pair<edge, edge> edges;
2069 edge e;
2070 edge_iterator ei;
2071 FOR_EACH_EDGE (e, ei, bb->preds)
2072 if (bb->loop_father != e->src->loop_father)
2073 edges.first = e;
2074 else
2075 edges.second = e;
2076 return edges;
2077 }
2078
2079 /* Copy the PHI arguments from OLD_PHI to the NEW_PHI. The arguments to NEW_PHI
2080 must be found unless they can be POSTPONEd for later. */
2081
2082 bool
2083 translate_isl_ast_to_gimple::
2084 copy_loop_phi_args (gphi *old_phi, init_back_edge_pair_t &ibp_old_bb,
2085 gphi *new_phi, init_back_edge_pair_t &ibp_new_bb,
2086 bool postpone)
2087 {
2088 gcc_assert (gimple_phi_num_args (old_phi) == gimple_phi_num_args (new_phi));
2089
2090 basic_block new_bb = gimple_bb (new_phi);
2091 for (unsigned i = 0; i < gimple_phi_num_args (old_phi); i++)
2092 {
2093 edge e;
2094 if (gimple_phi_arg_edge (old_phi, i) == ibp_old_bb.first)
2095 e = ibp_new_bb.first;
2096 else
2097 e = ibp_new_bb.second;
2098
2099 tree old_name = gimple_phi_arg_def (old_phi, i);
2100 tree new_name = get_new_name (new_bb, old_name,
2101 gimple_bb (old_phi), loop_phi);
2102 if (new_name)
2103 {
2104 add_phi_arg (new_phi, new_name, e, get_loc (old_name));
2105 continue;
2106 }
2107
2108 gimple *old_def_stmt = SSA_NAME_DEF_STMT (old_name);
2109 if (!old_def_stmt || gimple_code (old_def_stmt) == GIMPLE_NOP)
2110 /* If the phi arg was a function arg, or wasn't defined, just use the
2111 old name. */
2112 add_phi_arg (new_phi, old_name, e, get_loc (old_name));
2113 else if (postpone)
2114 {
2115 /* Postpone code gen for later for those back-edges we don't have the
2116 names yet. */
2117 region->incomplete_phis.safe_push (std::make_pair (old_phi, new_phi));
2118 if (dump_file)
2119 fprintf (dump_file, "[codegen] postpone loop phi nodes.\n");
2120 }
2121 else
2122 /* Either we should add the arg to phi or, we should postpone. */
2123 return false;
2124 }
2125 return true;
2126 }
2127
2128 /* Copy loop phi nodes from BB to NEW_BB. */
2129
2130 bool
2131 translate_isl_ast_to_gimple::copy_loop_phi_nodes (basic_block bb,
2132 basic_block new_bb)
2133 {
2134 if (dump_file)
2135 fprintf (dump_file, "[codegen] copying loop phi nodes in bb_%d.\n",
2136 new_bb->index);
2137
2138 /* Loop phi nodes should have only two arguments. */
2139 gcc_assert (2 == EDGE_COUNT (bb->preds));
2140
2141 /* First edge is the init edge and second is the back edge. */
2142 init_back_edge_pair_t ibp_old_bb = get_edges (bb);
2143
2144 /* First edge is the init edge and second is the back edge. */
2145 init_back_edge_pair_t ibp_new_bb = get_edges (new_bb);
2146
2147 for (gphi_iterator psi = gsi_start_phis (bb); !gsi_end_p (psi);
2148 gsi_next (&psi))
2149 {
2150 gphi *phi = psi.phi ();
2151 tree res = gimple_phi_result (phi);
2152 if (virtual_operand_p (res))
2153 continue;
2154 if (is_gimple_reg (res) && scev_analyzable_p (res, region->region))
2155 continue;
2156
2157 gphi *new_phi = create_phi_node (SSA_NAME_VAR (res), new_bb);
2158 tree new_res = create_new_def_for (res, new_phi,
2159 gimple_phi_result_ptr (new_phi));
2160 set_rename (res, new_res);
2161 codegen_error = !copy_loop_phi_args (phi, ibp_old_bb, new_phi,
2162 ibp_new_bb, true);
2163 update_stmt (new_phi);
2164
2165 if (dump_file)
2166 {
2167 fprintf (dump_file, "[codegen] creating loop-phi node: ");
2168 print_gimple_stmt (dump_file, new_phi, 0, 0);
2169 }
2170 }
2171
2172 return true;
2173 }
2174
2175 /* Return the init value of PHI, the value coming from outside the loop. */
2176
2177 static tree
2178 get_loop_init_value (gphi *phi)
2179 {
2180
2181 loop_p loop = gimple_bb (phi)->loop_father;
2182
2183 edge e;
2184 edge_iterator ei;
2185 FOR_EACH_EDGE (e, ei, gimple_bb (phi)->preds)
2186 if (e->src->loop_father != loop)
2187 return gimple_phi_arg_def (phi, e->dest_idx);
2188
2189 return NULL_TREE;
2190 }
2191
2192 /* Find the init value (the value which comes from outside the loop), of one of
2193 the operands of DEF which is defined by a loop phi. */
2194
2195 static tree
2196 find_init_value (gimple *def)
2197 {
2198 if (gimple_code (def) == GIMPLE_PHI)
2199 return get_loop_init_value (as_a <gphi*> (def));
2200
2201 if (gimple_vuse (def))
2202 return NULL_TREE;
2203
2204 ssa_op_iter iter;
2205 use_operand_p use_p;
2206 FOR_EACH_SSA_USE_OPERAND (use_p, def, iter, SSA_OP_USE)
2207 {
2208 tree use = USE_FROM_PTR (use_p);
2209 if (TREE_CODE (use) == SSA_NAME)
2210 {
2211 if (tree res = find_init_value (SSA_NAME_DEF_STMT (use)))
2212 return res;
2213 }
2214 }
2215
2216 return NULL_TREE;
2217 }
2218
2219 /* Return the init value, the value coming from outside the loop. */
2220
2221 static tree
2222 find_init_value_close_phi (gphi *phi)
2223 {
2224 gcc_assert (gimple_phi_num_args (phi) == 1);
2225 tree use_arg = gimple_phi_arg_def (phi, 0);
2226 gimple *def = SSA_NAME_DEF_STMT (use_arg);
2227 return find_init_value (def);
2228 }
2229
2230
2231 tree translate_isl_ast_to_gimple::
2232 add_close_phis_to_outer_loops (tree last_merge_name, edge last_e,
2233 gimple *old_close_phi)
2234 {
2235 sese_l &codegen_region = region->if_region->true_region->region;
2236 gimple *stmt = SSA_NAME_DEF_STMT (last_merge_name);
2237 basic_block bb = gimple_bb (stmt);
2238 if (!bb_in_sese_p (bb, codegen_region))
2239 return last_merge_name;
2240
2241 loop_p loop = bb->loop_father;
2242 if (!loop_in_sese_p (loop, codegen_region))
2243 return last_merge_name;
2244
2245 edge e = single_exit (loop);
2246
2247 if (dominated_by_p (CDI_DOMINATORS, e->dest, last_e->src))
2248 return last_merge_name;
2249
2250 tree old_name = gimple_phi_arg_def (old_close_phi, 0);
2251 tree old_close_phi_name = gimple_phi_result (old_close_phi);
2252
2253 bb = e->dest;
2254 if (!bb_contains_loop_close_phi_nodes (bb) || !single_succ_p (bb))
2255 bb = split_edge (e);
2256
2257 gphi *close_phi = create_phi_node (SSA_NAME_VAR (last_merge_name), bb);
2258 tree res = create_new_def_for (last_merge_name, close_phi,
2259 gimple_phi_result_ptr (close_phi));
2260 set_rename (old_close_phi_name, res);
2261 add_phi_arg (close_phi, last_merge_name, e, get_loc (old_name));
2262 last_merge_name = res;
2263
2264 return add_close_phis_to_outer_loops (last_merge_name, last_e, old_close_phi);
2265 }
2266
2267 /* Add phi nodes to all merge points of all the diamonds enclosing the loop of
2268 the close phi node PHI. */
2269
2270 bool translate_isl_ast_to_gimple::
2271 add_close_phis_to_merge_points (gphi *old_close_phi, gphi *new_close_phi,
2272 tree default_value)
2273 {
2274 sese_l &codegen_region = region->if_region->true_region->region;
2275 basic_block default_value_bb = get_entry_bb (codegen_region);
2276 if (SSA_NAME == TREE_CODE (default_value))
2277 {
2278 gimple *stmt = SSA_NAME_DEF_STMT (default_value);
2279 if (!stmt || gimple_code (stmt) == GIMPLE_NOP)
2280 return false;
2281 default_value_bb = gimple_bb (stmt);
2282 }
2283
2284 basic_block new_close_phi_bb = gimple_bb (new_close_phi);
2285
2286 tree old_close_phi_name = gimple_phi_result (old_close_phi);
2287 tree new_close_phi_name = gimple_phi_result (new_close_phi);
2288 tree last_merge_name = new_close_phi_name;
2289 tree old_name = gimple_phi_arg_def (old_close_phi, 0);
2290
2291 int i;
2292 edge merge_e;
2293 FOR_EACH_VEC_ELT_REVERSE (merge_points, i, merge_e)
2294 {
2295 basic_block new_merge_bb = merge_e->src;
2296 if (!dominated_by_p (CDI_DOMINATORS, new_merge_bb, default_value_bb))
2297 continue;
2298
2299 last_merge_name = add_close_phis_to_outer_loops (last_merge_name, merge_e,
2300 old_close_phi);
2301
2302 gphi *merge_phi = create_phi_node (SSA_NAME_VAR (old_close_phi_name), new_merge_bb);
2303 tree merge_res = create_new_def_for (old_close_phi_name, merge_phi,
2304 gimple_phi_result_ptr (merge_phi));
2305 set_rename (old_close_phi_name, merge_res);
2306
2307 edge from_loop = NULL, from_default_value = NULL;
2308 edge e;
2309 edge_iterator ei;
2310 FOR_EACH_EDGE (e, ei, new_merge_bb->preds)
2311 if (dominated_by_p (CDI_DOMINATORS, e->src, new_close_phi_bb))
2312 from_loop = e;
2313 else
2314 from_default_value = e;
2315
2316 /* Because CDI_POST_DOMINATORS are not updated, we only rely on
2317 CDI_DOMINATORS, which may not handle all cases where new_close_phi_bb
2318 is contained in another condition. */
2319 if (!from_default_value || !from_loop)
2320 return false;
2321
2322 add_phi_arg (merge_phi, last_merge_name, from_loop, get_loc (old_name));
2323 add_phi_arg (merge_phi, default_value, from_default_value, get_loc (old_name));
2324
2325 if (dump_file)
2326 {
2327 fprintf (dump_file, "[codegen] Adding guard-phi: ");
2328 print_gimple_stmt (dump_file, merge_phi, 0, 0);
2329 }
2330
2331 update_stmt (merge_phi);
2332 last_merge_name = merge_res;
2333 }
2334
2335 return true;
2336 }
2337
2338 /* Copy all the loop-close phi args from BB to NEW_BB. */
2339
2340 bool
2341 translate_isl_ast_to_gimple::copy_loop_close_phi_args (basic_block old_bb,
2342 basic_block new_bb,
2343 bool postpone)
2344 {
2345 for (gphi_iterator psi = gsi_start_phis (old_bb); !gsi_end_p (psi);
2346 gsi_next (&psi))
2347 {
2348 gphi *old_close_phi = psi.phi ();
2349 tree res = gimple_phi_result (old_close_phi);
2350 if (virtual_operand_p (res))
2351 continue;
2352
2353 if (is_gimple_reg (res) && scev_analyzable_p (res, region->region))
2354 /* Loop close phi nodes should not be scev_analyzable_p. */
2355 gcc_unreachable ();
2356
2357 gphi *new_close_phi = create_phi_node (SSA_NAME_VAR (res), new_bb);
2358 tree new_res = create_new_def_for (res, new_close_phi,
2359 gimple_phi_result_ptr (new_close_phi));
2360 set_rename (res, new_res);
2361
2362 tree old_name = gimple_phi_arg_def (old_close_phi, 0);
2363 tree new_name = get_new_name (new_bb, old_name, old_bb, close_phi);
2364
2365 /* Predecessor basic blocks of a loop close phi should have been code
2366 generated before. FIXME: This is fixable by merging PHIs from inner
2367 loops as well. See: gfortran.dg/graphite/interchange-3.f90. */
2368 if (!new_name)
2369 return false;
2370
2371 add_phi_arg (new_close_phi, new_name, single_pred_edge (new_bb),
2372 get_loc (old_name));
2373 if (dump_file)
2374 {
2375 fprintf (dump_file, "[codegen] Adding loop close phi: ");
2376 print_gimple_stmt (dump_file, new_close_phi, 0, 0);
2377 }
2378
2379 update_stmt (new_close_phi);
2380
2381 /* When there is no loop guard around this codegenerated loop, there is no
2382 need to collect the close-phi arg. */
2383 if (merge_points.is_empty ())
2384 continue;
2385
2386 /* Add a PHI in the succ_new_bb for each close phi of the loop. */
2387 tree default_value = find_init_value_close_phi (new_close_phi);
2388
2389 /* A close phi must come from a loop-phi having a default value. */
2390 if (!default_value)
2391 {
2392 if (!postpone)
2393 return false;
2394
2395 region->incomplete_phis.safe_push (std::make_pair (old_close_phi,
2396 new_close_phi));
2397 if (dump_file)
2398 {
2399 fprintf (dump_file, "[codegen] postpone close phi nodes: ");
2400 print_gimple_stmt (dump_file, new_close_phi, 0, 0);
2401 }
2402 continue;
2403 }
2404
2405 if (!add_close_phis_to_merge_points (old_close_phi, new_close_phi,
2406 default_value))
2407 return false;
2408 }
2409
2410 return true;
2411 }
2412
2413 /* Copy loop close phi nodes from BB to NEW_BB. */
2414
2415 bool
2416 translate_isl_ast_to_gimple::copy_loop_close_phi_nodes (basic_block old_bb,
2417 basic_block new_bb)
2418 {
2419 if (dump_file)
2420 fprintf (dump_file, "[codegen] copying loop close phi nodes in bb_%d.\n",
2421 new_bb->index);
2422 /* Loop close phi nodes should have only one argument. */
2423 gcc_assert (1 == EDGE_COUNT (old_bb->preds));
2424
2425 return copy_loop_close_phi_args (old_bb, new_bb, true);
2426 }
2427
2428
2429 /* Add NEW_NAME as the ARGNUM-th arg of NEW_PHI which is in NEW_BB.
2430 DOMINATING_PRED is the predecessor basic block of OLD_BB which dominates the
2431 other pred of OLD_BB as well. If no such basic block exists then it is NULL.
2432 NON_DOMINATING_PRED is a pred which does not dominate OLD_BB, it cannot be
2433 NULL.
2434
2435 Case1: OLD_BB->preds {BB1, BB2} and BB1 does not dominate BB2 and vice versa.
2436 In this case DOMINATING_PRED = NULL.
2437
2438 Case2: OLD_BB->preds {BB1, BB2} and BB1 dominates BB2.
2439
2440 Returns true on successful copy of the args, false otherwise. */
2441
2442 bool
2443 translate_isl_ast_to_gimple::
2444 add_phi_arg_for_new_expr (tree old_phi_args[2], tree new_phi_args[2],
2445 edge old_bb_dominating_edge,
2446 edge old_bb_non_dominating_edge,
2447 gphi *phi, gphi *new_phi,
2448 basic_block new_bb)
2449 {
2450 basic_block def_pred[2] = { NULL, NULL };
2451 int not_found_bb_index = -1;
2452 for (int i = 0; i < 2; i++)
2453 {
2454 /* If the corresponding def_bb could not be found the entry will be
2455 NULL. */
2456 if (TREE_CODE (old_phi_args[i]) == INTEGER_CST)
2457 def_pred[i] = get_def_bb_for_const (new_bb,
2458 gimple_phi_arg_edge (phi, i)->src);
2459 else if (new_phi_args[i] && (TREE_CODE (new_phi_args[i]) == SSA_NAME))
2460 def_pred[i] = gimple_bb (SSA_NAME_DEF_STMT (new_phi_args[i]));
2461
2462 if (!def_pred[i])
2463 {
2464 /* When non are available bail out. */
2465 if (not_found_bb_index != -1)
2466 return false;
2467 not_found_bb_index = i;
2468 }
2469 }
2470
2471 /* Here we are pattern matching on the structure of CFG w.r.t. old one. */
2472 if (old_bb_dominating_edge)
2473 {
2474 if (not_found_bb_index != -1)
2475 return false;
2476
2477 basic_block new_pred1 = (*new_bb->preds)[0]->src;
2478 basic_block new_pred2 = (*new_bb->preds)[1]->src;
2479 vec <basic_block> *bbs
2480 = region->copied_bb_map->get (old_bb_non_dominating_edge->src);
2481
2482 /* Could not find a mapping. */
2483 if (!bbs)
2484 return false;
2485
2486 basic_block new_pred = NULL;
2487 basic_block b;
2488 int i;
2489 FOR_EACH_VEC_ELT (*bbs, i, b)
2490 {
2491 if (dominated_by_p (CDI_DOMINATORS, new_pred1, b))
2492 {
2493 /* FIXME: If we have already found new_pred then we have to
2494 disambiguate, bail out for now. */
2495 if (new_pred)
2496 return false;
2497 new_pred = new_pred1;
2498 }
2499 if (dominated_by_p (CDI_DOMINATORS, new_pred2, b))
2500 {
2501 /* FIXME: If we have already found new_pred then we have to either
2502 it dominates both or we have to disambiguate, bail out. */
2503 if (new_pred)
2504 return false;
2505 new_pred = new_pred2;
2506 }
2507 }
2508
2509 if (!new_pred)
2510 return false;
2511
2512 edge new_non_dominating_edge = find_edge (new_pred, new_bb);
2513 gcc_assert (new_non_dominating_edge);
2514 /* FIXME: Validate each args just like in loop-phis. */
2515 /* By the process of elimination we first insert insert phi-edge for
2516 non-dominating pred which is computed above and then we insert the
2517 remaining one. */
2518 int inserted_edge = 0;
2519 for (; inserted_edge < 2; inserted_edge++)
2520 {
2521 edge new_bb_pred_edge = gimple_phi_arg_edge (new_phi, inserted_edge);
2522 if (new_non_dominating_edge == new_bb_pred_edge)
2523 {
2524 add_phi_arg (new_phi, new_phi_args[inserted_edge],
2525 new_non_dominating_edge,
2526 get_loc (old_phi_args[inserted_edge]));
2527 break;
2528 }
2529 }
2530 if (inserted_edge == 2)
2531 return false;
2532
2533 int edge_dominating = inserted_edge == 0 ? 1 : 0;
2534
2535 edge new_dominating_edge = NULL;
2536 for (inserted_edge = 0; inserted_edge < 2; inserted_edge++)
2537 {
2538 edge e = gimple_phi_arg_edge (new_phi, inserted_edge);
2539 if (e != new_non_dominating_edge)
2540 {
2541 new_dominating_edge = e;
2542 add_phi_arg (new_phi, new_phi_args[edge_dominating],
2543 new_dominating_edge,
2544 get_loc (old_phi_args[inserted_edge]));
2545 break;
2546 }
2547 }
2548 gcc_assert (new_dominating_edge);
2549 }
2550 else
2551 {
2552 /* Classic diamond structure: both edges are non-dominating. We need to
2553 find one unique edge then the other can be found be elimination. If
2554 any definition (def_pred) dominates both the preds of new_bb then we
2555 bail out. Entries of def_pred maybe NULL, in that case we must
2556 uniquely find pred with help of only one entry. */
2557 edge new_e[2] = { NULL, NULL };
2558 for (int i = 0; i < 2; i++)
2559 {
2560 edge e;
2561 edge_iterator ei;
2562 FOR_EACH_EDGE (e, ei, new_bb->preds)
2563 if (def_pred[i]
2564 && dominated_by_p (CDI_DOMINATORS, e->src, def_pred[i]))
2565 {
2566 if (new_e[i])
2567 /* We do not know how to handle the case when def_pred
2568 dominates more than a predecessor. */
2569 return false;
2570 new_e[i] = e;
2571 }
2572 }
2573
2574 gcc_assert (new_e[0] || new_e[1]);
2575
2576 /* Find the other edge by process of elimination. */
2577 if (not_found_bb_index != -1)
2578 {
2579 gcc_assert (!new_e[not_found_bb_index]);
2580 int found_bb_index = not_found_bb_index == 1 ? 0 : 1;
2581 edge e;
2582 edge_iterator ei;
2583 FOR_EACH_EDGE (e, ei, new_bb->preds)
2584 {
2585 if (new_e[found_bb_index] == e)
2586 continue;
2587 new_e[not_found_bb_index] = e;
2588 }
2589 }
2590
2591 /* Add edges to phi args. */
2592 for (int i = 0; i < 2; i++)
2593 add_phi_arg (new_phi, new_phi_args[i], new_e[i],
2594 get_loc (old_phi_args[i]));
2595 }
2596
2597 return true;
2598 }
2599
2600 /* Copy the arguments of cond-phi node PHI, to NEW_PHI in the codegenerated
2601 region. If postpone is true and it isn't possible to copy any arg of PHI,
2602 the PHI is added to the REGION->INCOMPLETE_PHIS to be codegenerated later.
2603 Returns false if the copying was unsuccessful. */
2604
2605 bool
2606 translate_isl_ast_to_gimple::copy_cond_phi_args (gphi *phi, gphi *new_phi,
2607 vec<tree> iv_map,
2608 bool postpone)
2609 {
2610 if (dump_file)
2611 fprintf (dump_file, "[codegen] copying cond phi args.\n");
2612 gcc_assert (2 == gimple_phi_num_args (phi));
2613
2614 basic_block new_bb = gimple_bb (new_phi);
2615 loop_p loop = gimple_bb (phi)->loop_father;
2616
2617 basic_block old_bb = gimple_bb (phi);
2618 edge old_bb_non_dominating_edge = NULL, old_bb_dominating_edge = NULL;
2619
2620 edge e;
2621 edge_iterator ei;
2622 FOR_EACH_EDGE (e, ei, old_bb->preds)
2623 if (!dominated_by_p (CDI_DOMINATORS, old_bb, e->src))
2624 old_bb_non_dominating_edge = e;
2625 else
2626 old_bb_dominating_edge = e;
2627
2628 gcc_assert (!dominated_by_p (CDI_DOMINATORS, old_bb,
2629 old_bb_non_dominating_edge->src));
2630
2631 tree new_phi_args[2];
2632 tree old_phi_args[2];
2633
2634 for (unsigned i = 0; i < gimple_phi_num_args (phi); i++)
2635 {
2636 tree old_name = gimple_phi_arg_def (phi, i);
2637 tree new_name = get_new_name (new_bb, old_name, old_bb, cond_phi);
2638 old_phi_args[i] = old_name;
2639 if (new_name)
2640 {
2641 new_phi_args [i] = new_name;
2642 continue;
2643 }
2644
2645 /* If the phi-arg was a parameter. */
2646 if (vec_find (region->params, old_name) != -1)
2647 {
2648 new_phi_args [i] = old_name;
2649 if (dump_file)
2650 {
2651 fprintf (dump_file,
2652 "[codegen] parameter argument to phi, new_expr: ");
2653 print_generic_expr (dump_file, new_phi_args[i], 0);
2654 fprintf (dump_file, "\n");
2655 }
2656 continue;
2657 }
2658
2659 gimple *old_def_stmt = SSA_NAME_DEF_STMT (old_name);
2660 if (!old_def_stmt || gimple_code (old_def_stmt) == GIMPLE_NOP)
2661 /* FIXME: If the phi arg was a function arg, or wasn't defined, just use
2662 the old name. */
2663 return false;
2664
2665 if (postpone)
2666 {
2667 /* If the phi-arg is scev-analyzeable but only in the first stage. */
2668 if (is_gimple_reg (old_name)
2669 && scev_analyzable_p (old_name, region->region))
2670 {
2671 gimple_seq stmts;
2672 tree new_expr = get_rename_from_scev (old_name, &stmts, loop,
2673 new_bb, old_bb, iv_map);
2674 if (codegen_error_p ())
2675 return false;
2676
2677 gcc_assert (new_expr);
2678 if (dump_file)
2679 {
2680 fprintf (dump_file,
2681 "[codegen] scev analyzeable, new_expr: ");
2682 print_generic_expr (dump_file, new_expr, 0);
2683 fprintf (dump_file, "\n");
2684 }
2685 gsi_insert_earliest (stmts);
2686 new_phi_args [i] = new_name;
2687 continue;
2688 }
2689
2690 /* Postpone code gen for later for back-edges. */
2691 region->incomplete_phis.safe_push (std::make_pair (phi, new_phi));
2692
2693 if (dump_file)
2694 {
2695 fprintf (dump_file, "[codegen] postpone cond phi nodes: ");
2696 print_gimple_stmt (dump_file, new_phi, 0, 0);
2697 }
2698
2699 new_phi_args [i] = NULL_TREE;
2700 continue;
2701 }
2702 else
2703 /* Either we should add the arg to phi or, we should postpone. */
2704 return false;
2705 }
2706
2707 /* If none of the args have been determined in the first stage then wait until
2708 later. */
2709 if (postpone && !new_phi_args[0] && !new_phi_args[1])
2710 return true;
2711
2712 return add_phi_arg_for_new_expr (old_phi_args, new_phi_args,
2713 old_bb_dominating_edge,
2714 old_bb_non_dominating_edge,
2715 phi, new_phi, new_bb);
2716 }
2717
2718 /* Copy cond phi nodes from BB to NEW_BB. A cond-phi node is a basic block
2719 containing phi nodes coming from two predecessors, and none of them are back
2720 edges. */
2721
2722 bool
2723 translate_isl_ast_to_gimple::copy_cond_phi_nodes (basic_block bb,
2724 basic_block new_bb,
2725 vec<tree> iv_map)
2726 {
2727
2728 gcc_assert (!bb_contains_loop_close_phi_nodes (bb));
2729
2730 if (dump_file)
2731 fprintf (dump_file, "[codegen] copying cond phi nodes in bb_%d.\n",
2732 new_bb->index);
2733
2734 /* Cond phi nodes should have exactly two arguments. */
2735 gcc_assert (2 == EDGE_COUNT (bb->preds));
2736
2737 for (gphi_iterator psi = gsi_start_phis (bb); !gsi_end_p (psi);
2738 gsi_next (&psi))
2739 {
2740 gphi *phi = psi.phi ();
2741 tree res = gimple_phi_result (phi);
2742 if (virtual_operand_p (res))
2743 continue;
2744 if (is_gimple_reg (res) && scev_analyzable_p (res, region->region))
2745 /* Cond phi nodes should not be scev_analyzable_p. */
2746 gcc_unreachable ();
2747
2748 gphi *new_phi = create_phi_node (SSA_NAME_VAR (res), new_bb);
2749 tree new_res = create_new_def_for (res, new_phi,
2750 gimple_phi_result_ptr (new_phi));
2751 set_rename (res, new_res);
2752
2753 if (!copy_cond_phi_args (phi, new_phi, iv_map, true))
2754 return false;
2755
2756 update_stmt (new_phi);
2757 }
2758
2759 return true;
2760 }
2761
2762 /* Return true if STMT should be copied from region to the new code-generated
2763 region. LABELs, CONDITIONS, induction-variables and region parameters need
2764 not be copied. */
2765
2766 static bool
2767 should_copy_to_new_region (gimple *stmt, sese_info_p region)
2768 {
2769 /* Do not copy labels or conditions. */
2770 if (gimple_code (stmt) == GIMPLE_LABEL
2771 || gimple_code (stmt) == GIMPLE_COND)
2772 return false;
2773
2774 tree lhs;
2775 /* Do not copy induction variables. */
2776 if (is_gimple_assign (stmt)
2777 && (lhs = gimple_assign_lhs (stmt))
2778 && TREE_CODE (lhs) == SSA_NAME
2779 && is_gimple_reg (lhs)
2780 && scev_analyzable_p (lhs, region->region))
2781 return false;
2782
2783 /* Do not copy parameters that have been generated in the header of the
2784 scop. */
2785 if (is_gimple_assign (stmt)
2786 && (lhs = gimple_assign_lhs (stmt))
2787 && TREE_CODE (lhs) == SSA_NAME
2788 && region->parameter_rename_map->get(lhs))
2789 return false;
2790
2791 return true;
2792 }
2793
2794 /* Create new names for all the definitions created by COPY and add replacement
2795 mappings for each new name. */
2796
2797 void
2798 translate_isl_ast_to_gimple::set_rename_for_each_def (gimple *stmt)
2799 {
2800 def_operand_p def_p;
2801 ssa_op_iter op_iter;
2802 FOR_EACH_SSA_DEF_OPERAND (def_p, stmt, op_iter, SSA_OP_ALL_DEFS)
2803 {
2804 tree old_name = DEF_FROM_PTR (def_p);
2805 tree new_name = create_new_def_for (old_name, stmt, def_p);
2806 set_rename (old_name, new_name);
2807 }
2808 }
2809
2810 /* Duplicates the statements of basic block BB into basic block NEW_BB
2811 and compute the new induction variables according to the IV_MAP.
2812 CODEGEN_ERROR is set when the code generation cannot continue. */
2813
2814 bool
2815 translate_isl_ast_to_gimple::graphite_copy_stmts_from_block (basic_block bb,
2816 basic_block new_bb,
2817 vec<tree> iv_map)
2818 {
2819 /* Iterator poining to the place where new statement (s) will be inserted. */
2820 gimple_stmt_iterator gsi_tgt = gsi_last_bb (new_bb);
2821
2822 for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
2823 gsi_next (&gsi))
2824 {
2825 gimple *stmt = gsi_stmt (gsi);
2826 if (!should_copy_to_new_region (stmt, region))
2827 continue;
2828
2829 /* Create a new copy of STMT and duplicate STMT's virtual
2830 operands. */
2831 gimple *copy = gimple_copy (stmt);
2832 gsi_insert_after (&gsi_tgt, copy, GSI_NEW_STMT);
2833
2834 if (dump_file)
2835 {
2836 fprintf (dump_file, "[codegen] inserting statement: ");
2837 print_gimple_stmt (dump_file, copy, 0, 0);
2838 }
2839
2840 maybe_duplicate_eh_stmt (copy, stmt);
2841 gimple_duplicate_stmt_histograms (cfun, copy, cfun, stmt);
2842
2843 /* Crete new names for each def in the copied stmt. */
2844 set_rename_for_each_def (copy);
2845
2846 loop_p loop = bb->loop_father;
2847 if (rename_uses (copy, &gsi_tgt, bb, loop, iv_map))
2848 {
2849 fold_stmt_inplace (&gsi_tgt);
2850 gcc_assert (gsi_stmt (gsi_tgt) == copy);
2851 }
2852
2853 if (codegen_error_p ())
2854 return false;
2855
2856 /* For each SSA_NAME in the parameter_rename_map rename their usage. */
2857 ssa_op_iter iter;
2858 use_operand_p use_p;
2859 if (!is_gimple_debug (copy))
2860 FOR_EACH_SSA_USE_OPERAND (use_p, copy, iter, SSA_OP_USE)
2861 {
2862 tree old_name = USE_FROM_PTR (use_p);
2863
2864 if (TREE_CODE (old_name) != SSA_NAME
2865 || SSA_NAME_IS_DEFAULT_DEF (old_name))
2866 continue;
2867
2868 tree *new_expr = region->parameter_rename_map->get (old_name);
2869 if (!new_expr)
2870 continue;
2871
2872 replace_exp (use_p, *new_expr);
2873 }
2874
2875 update_stmt (copy);
2876 }
2877
2878 return true;
2879 }
2880
2881
2882 /* Given a basic block containing close-phi it returns the new basic block where
2883 to insert a copy of the close-phi nodes. All the uses in close phis should
2884 come from a single loop otherwise it returns NULL. */
2885
2886 edge
2887 translate_isl_ast_to_gimple::edge_for_new_close_phis (basic_block bb)
2888 {
2889 /* Make sure that NEW_BB is the new_loop->exit->dest. We find the definition
2890 of close phi in the original code and then find the mapping of basic block
2891 defining that variable. If there are multiple close-phis and they are
2892 defined in different loops (in the original or in the new code) because of
2893 loop splitting, then we bail out. */
2894 loop_p new_loop = NULL;
2895 for (gphi_iterator psi = gsi_start_phis (bb); !gsi_end_p (psi);
2896 gsi_next (&psi))
2897 {
2898 gphi *phi = psi.phi ();
2899 tree name = gimple_phi_arg_def (phi, 0);
2900 basic_block old_loop_bb = gimple_bb (SSA_NAME_DEF_STMT (name));
2901
2902 vec <basic_block> *bbs = region->copied_bb_map->get (old_loop_bb);
2903 if (!bbs || bbs->length () != 1)
2904 /* This is one of the places which shows preserving original structure
2905 is not always possible, as we may need to insert close PHI for a loop
2906 where the latch does not have any mapping, or the mapping is
2907 ambiguous. */
2908 return NULL;
2909
2910 if (!new_loop)
2911 new_loop = (*bbs)[0]->loop_father;
2912 else if (new_loop != (*bbs)[0]->loop_father)
2913 return NULL;
2914 }
2915
2916 if (!new_loop)
2917 return NULL;
2918
2919 return single_exit (new_loop);
2920 }
2921
2922 /* Copies BB and includes in the copied BB all the statements that can
2923 be reached following the use-def chains from the memory accesses,
2924 and returns the next edge following this new block. codegen_error is
2925 set when the code generation cannot continue. */
2926
2927 edge
2928 translate_isl_ast_to_gimple::copy_bb_and_scalar_dependences (basic_block bb,
2929 edge next_e,
2930 vec<tree> iv_map)
2931 {
2932 int num_phis = number_of_phi_nodes (bb);
2933
2934 if (region->copied_bb_map->get (bb))
2935 {
2936 /* FIXME: we should be able to handle phi nodes with args coming from
2937 outside the region. */
2938 if (num_phis)
2939 {
2940 codegen_error = true;
2941 return NULL;
2942 }
2943 }
2944
2945 basic_block new_bb = NULL;
2946 if (bb_contains_loop_close_phi_nodes (bb))
2947 {
2948 if (dump_file)
2949 fprintf (dump_file, "[codegen] bb_%d contains close phi nodes.\n",
2950 bb->index);
2951
2952 edge e = edge_for_new_close_phis (bb);
2953 if (!e)
2954 {
2955 codegen_error = true;
2956 return NULL;
2957 }
2958
2959 basic_block phi_bb = e->dest;
2960
2961 if (!bb_contains_loop_close_phi_nodes (phi_bb) || !single_succ_p (phi_bb))
2962 phi_bb = split_edge (e);
2963
2964 gcc_assert (single_pred_edge (phi_bb)->src->loop_father
2965 != single_pred_edge (phi_bb)->dest->loop_father);
2966
2967 if (!copy_loop_close_phi_nodes (bb, phi_bb))
2968 {
2969 codegen_error = true;
2970 return NULL;
2971 }
2972
2973 if (e == next_e)
2974 new_bb = phi_bb;
2975 else
2976 new_bb = split_edge (next_e);
2977 }
2978 else
2979 {
2980 new_bb = split_edge (next_e);
2981 if (num_phis > 0 && bb_contains_loop_phi_nodes (bb))
2982 {
2983 basic_block phi_bb = next_e->dest->loop_father->header;
2984
2985 /* At this point we are unable to codegenerate by still preserving the SSA
2986 structure because maybe the loop is completely unrolled and the PHIs
2987 and cross-bb scalar dependencies are untrackable w.r.t. the original
2988 code. See gfortran.dg/graphite/pr29832.f90. */
2989 if (EDGE_COUNT (bb->preds) != EDGE_COUNT (phi_bb->preds))
2990 {
2991 codegen_error = true;
2992 return NULL;
2993 }
2994
2995 /* In case isl did some loop peeling, like this:
2996
2997 S_8(0);
2998 for (int c1 = 1; c1 <= 5; c1 += 1) {
2999 S_8(c1);
3000 }
3001 S_8(6);
3002
3003 there should be no loop-phi nodes in S_8(0).
3004
3005 FIXME: We need to reason about dynamic instances of S_8, i.e., the
3006 values of all scalar variables: for the moment we instantiate only
3007 SCEV analyzable expressions on the iteration domain, and we need to
3008 extend that to reductions that cannot be analyzed by SCEV. */
3009 if (!bb_in_sese_p (phi_bb, region->if_region->true_region->region))
3010 {
3011 codegen_error = true;
3012 return NULL;
3013 }
3014
3015 if (dump_file)
3016 fprintf (dump_file, "[codegen] bb_%d contains loop phi nodes.\n",
3017 bb->index);
3018 if (!copy_loop_phi_nodes (bb, phi_bb))
3019 {
3020 codegen_error = true;
3021 return NULL;
3022 }
3023 }
3024 else if (num_phis > 0)
3025 {
3026 if (dump_file)
3027 fprintf (dump_file, "[codegen] bb_%d contains cond phi nodes.\n",
3028 bb->index);
3029
3030 basic_block phi_bb = single_pred (new_bb);
3031 loop_p loop_father = new_bb->loop_father;
3032
3033 /* Move back until we find the block with two predecessors. */
3034 while (single_pred_p (phi_bb))
3035 phi_bb = single_pred_edge (phi_bb)->src;
3036
3037 /* If a corresponding merge-point was not found, then abort codegen. */
3038 if (phi_bb->loop_father != loop_father
3039 || !bb_in_sese_p (phi_bb, region->if_region->true_region->region)
3040 || !copy_cond_phi_nodes (bb, phi_bb, iv_map))
3041 {
3042 codegen_error = true;
3043 return NULL;
3044 }
3045 }
3046 }
3047
3048 if (dump_file)
3049 fprintf (dump_file, "[codegen] copying from bb_%d to bb_%d.\n",
3050 bb->index, new_bb->index);
3051
3052 vec <basic_block> *copied_bbs = region->copied_bb_map->get (bb);
3053 if (copied_bbs)
3054 copied_bbs->safe_push (new_bb);
3055 else
3056 {
3057 vec<basic_block> bbs;
3058 bbs.create (2);
3059 bbs.safe_push (new_bb);
3060 region->copied_bb_map->put (bb, bbs);
3061 }
3062
3063 if (!graphite_copy_stmts_from_block (bb, new_bb, iv_map))
3064 {
3065 codegen_error = true;
3066 return NULL;
3067 }
3068
3069 return single_succ_edge (new_bb);
3070 }
3071
3072 /* Patch the missing arguments of the phi nodes. */
3073
3074 void
3075 translate_isl_ast_to_gimple::translate_pending_phi_nodes ()
3076 {
3077 int i;
3078 phi_rename *rename;
3079 FOR_EACH_VEC_ELT (region->incomplete_phis, i, rename)
3080 {
3081 gphi *old_phi = rename->first;
3082 gphi *new_phi = rename->second;
3083 basic_block old_bb = gimple_bb (old_phi);
3084 basic_block new_bb = gimple_bb (new_phi);
3085
3086 /* First edge is the init edge and second is the back edge. */
3087 init_back_edge_pair_t ibp_old_bb = get_edges (old_bb);
3088 init_back_edge_pair_t ibp_new_bb = get_edges (new_bb);
3089
3090 if (dump_file)
3091 {
3092 fprintf (dump_file, "[codegen] translating pending old-phi: ");
3093 print_gimple_stmt (dump_file, old_phi, 0, 0);
3094 }
3095
3096 auto_vec <tree, 1> iv_map;
3097 if (bb_contains_loop_phi_nodes (new_bb))
3098 codegen_error = !copy_loop_phi_args (old_phi, ibp_old_bb, new_phi,
3099 ibp_new_bb, false);
3100 else if (bb_contains_loop_close_phi_nodes (new_bb))
3101 codegen_error = !copy_loop_close_phi_args (old_bb, new_bb, false);
3102 else
3103 codegen_error = !copy_cond_phi_args (old_phi, new_phi, iv_map, false);
3104
3105 if (dump_file)
3106 {
3107 fprintf (dump_file, "[codegen] to new-phi: ");
3108 print_gimple_stmt (dump_file, new_phi, 0, 0);
3109 }
3110 if (codegen_error)
3111 return;
3112 }
3113 }
3114
3115 /* Add isl's parameter identifiers and corresponding trees to ivs_params. */
3116
3117 void
3118 translate_isl_ast_to_gimple::add_parameters_to_ivs_params (scop_p scop,
3119 ivs_params &ip)
3120 {
3121 sese_info_p region = scop->scop_info;
3122 unsigned nb_parameters = isl_set_dim (scop->param_context, isl_dim_param);
3123 gcc_assert (nb_parameters == region->params.length ());
3124 unsigned i;
3125 for (i = 0; i < nb_parameters; i++)
3126 {
3127 isl_id *tmp_id = isl_set_get_dim_id (scop->param_context,
3128 isl_dim_param, i);
3129 ip[tmp_id] = region->params[i];
3130 }
3131 }
3132
3133
3134 /* Generates a build, which specifies the constraints on the parameters. */
3135
3136 __isl_give isl_ast_build *
3137 translate_isl_ast_to_gimple::generate_isl_context (scop_p scop)
3138 {
3139 isl_set *context_isl = isl_set_params (isl_set_copy (scop->param_context));
3140 return isl_ast_build_from_context (context_isl);
3141 }
3142
3143 /* This method is executed before the construction of a for node. */
3144 __isl_give isl_id *
3145 ast_build_before_for (__isl_keep isl_ast_build *build, void *user)
3146 {
3147 isl_union_map *dependences = (isl_union_map *) user;
3148 ast_build_info *for_info = XNEW (struct ast_build_info);
3149 isl_union_map *schedule = isl_ast_build_get_schedule (build);
3150 isl_space *schedule_space = isl_ast_build_get_schedule_space (build);
3151 int dimension = isl_space_dim (schedule_space, isl_dim_out);
3152 for_info->is_parallelizable =
3153 !carries_deps (schedule, dependences, dimension);
3154 isl_union_map_free (schedule);
3155 isl_space_free (schedule_space);
3156 isl_id *id = isl_id_alloc (isl_ast_build_get_ctx (build), "", for_info);
3157 return id;
3158 }
3159
3160 #ifdef HAVE_ISL_OPTIONS_SET_SCHEDULE_SERIALIZE_SCCS
3161
3162 /* Generate isl AST from schedule of SCOP. */
3163
3164 __isl_give isl_ast_node *
3165 translate_isl_ast_to_gimple::scop_to_isl_ast (scop_p scop)
3166 {
3167 gcc_assert (scop->transformed_schedule);
3168
3169 /* Set the separate option to reduce control flow overhead. */
3170 isl_schedule *schedule = isl_schedule_map_schedule_node_bottom_up
3171 (isl_schedule_copy (scop->transformed_schedule), set_separate_option, NULL);
3172 isl_ast_build *context_isl = generate_isl_context (scop);
3173
3174 if (flag_loop_parallelize_all)
3175 {
3176 scop_get_dependences (scop);
3177 context_isl =
3178 isl_ast_build_set_before_each_for (context_isl, ast_build_before_for,
3179 scop->dependence);
3180 }
3181
3182 isl_ast_node *ast_isl = isl_ast_build_node_from_schedule
3183 (context_isl, schedule);
3184 isl_ast_build_free (context_isl);
3185 return ast_isl;
3186 }
3187
3188 #else
3189 /* Get the maximal number of schedule dimensions in the scop SCOP. */
3190
3191 int
3192 translate_isl_ast_to_gimple::get_max_schedule_dimensions (scop_p scop)
3193 {
3194 int i;
3195 poly_bb_p pbb;
3196 int schedule_dims = 0;
3197
3198 FOR_EACH_VEC_ELT (scop->pbbs, i, pbb)
3199 {
3200 int pbb_schedule_dims = isl_map_dim (pbb->transformed, isl_dim_out);
3201 if (pbb_schedule_dims > schedule_dims)
3202 schedule_dims = pbb_schedule_dims;
3203 }
3204
3205 return schedule_dims;
3206 }
3207
3208 /* Extend the schedule to NB_SCHEDULE_DIMS schedule dimensions.
3209
3210 For schedules with different dimensionality, the isl AST generator can not
3211 define an order and will just randomly choose an order. The solution to this
3212 problem is to extend all schedules to the maximal number of schedule
3213 dimensions (using '0's for the remaining values). */
3214
3215 __isl_give isl_map *
3216 translate_isl_ast_to_gimple::extend_schedule (__isl_take isl_map *schedule,
3217 int nb_schedule_dims)
3218 {
3219 int tmp_dims = isl_map_dim (schedule, isl_dim_out);
3220 schedule =
3221 isl_map_add_dims (schedule, isl_dim_out, nb_schedule_dims - tmp_dims);
3222 isl_val *zero =
3223 isl_val_int_from_si (isl_map_get_ctx (schedule), 0);
3224 int i;
3225 for (i = tmp_dims; i < nb_schedule_dims; i++)
3226 {
3227 schedule
3228 = isl_map_fix_val (schedule, isl_dim_out, i, isl_val_copy (zero));
3229 }
3230 isl_val_free (zero);
3231 return schedule;
3232 }
3233
3234 /* Generates a schedule, which specifies an order used to
3235 visit elements in a domain. */
3236
3237 __isl_give isl_union_map *
3238 translate_isl_ast_to_gimple::generate_isl_schedule (scop_p scop)
3239 {
3240 int nb_schedule_dims = get_max_schedule_dimensions (scop);
3241 int i;
3242 poly_bb_p pbb;
3243 isl_union_map *schedule_isl =
3244 isl_union_map_empty (isl_set_get_space (scop->param_context));
3245
3246 FOR_EACH_VEC_ELT (scop->pbbs, i, pbb)
3247 {
3248 /* Dead code elimination: when the domain of a PBB is empty,
3249 don't generate code for the PBB. */
3250 if (isl_set_is_empty (pbb->domain))
3251 continue;
3252
3253 isl_map *bb_schedule = isl_map_copy (pbb->transformed);
3254 bb_schedule = isl_map_intersect_domain (bb_schedule,
3255 isl_set_copy (pbb->domain));
3256 bb_schedule = extend_schedule (bb_schedule, nb_schedule_dims);
3257 bb_schedule = isl_map_coalesce (bb_schedule);
3258 schedule_isl
3259 = isl_union_map_union (schedule_isl,
3260 isl_union_map_from_map (bb_schedule));
3261 schedule_isl = isl_union_map_coalesce (schedule_isl);
3262 }
3263 return schedule_isl;
3264 }
3265
3266 /* Set the separate option for all dimensions.
3267 This helps to reduce control overhead. */
3268
3269 __isl_give isl_ast_build *
3270 translate_isl_ast_to_gimple::set_options (__isl_take isl_ast_build *control,
3271 __isl_keep isl_union_map *schedule)
3272 {
3273 isl_ctx *ctx = isl_union_map_get_ctx (schedule);
3274 isl_space *range_space = isl_space_set_alloc (ctx, 0, 1);
3275 range_space =
3276 isl_space_set_tuple_name (range_space, isl_dim_set, "separate");
3277 isl_union_set *range =
3278 isl_union_set_from_set (isl_set_universe (range_space));
3279 isl_union_set *domain = isl_union_map_range (isl_union_map_copy (schedule));
3280 domain = isl_union_set_universe (domain);
3281 isl_union_map *options = isl_union_map_from_domain_and_range (domain, range);
3282 return isl_ast_build_set_options (control, options);
3283 }
3284
3285 /* Generate isl AST from schedule of SCOP. Also, collects IVS_PARAMS in IP. */
3286
3287 __isl_give isl_ast_node *
3288 translate_isl_ast_to_gimple::scop_to_isl_ast (scop_p scop, ivs_params &ip)
3289 {
3290 /* Generate loop upper bounds that consist of the current loop iterator, an
3291 operator (< or <=) and an expression not involving the iterator. If this
3292 option is not set, then the current loop iterator may appear several times
3293 in the upper bound. See the isl manual for more details. */
3294 isl_options_set_ast_build_atomic_upper_bound (scop->isl_context, true);
3295
3296 add_parameters_to_ivs_params (scop, ip);
3297 isl_union_map *schedule_isl = generate_isl_schedule (scop);
3298 isl_ast_build *context_isl = generate_isl_context (scop);
3299 context_isl = set_options (context_isl, schedule_isl);
3300 if (flag_loop_parallelize_all)
3301 {
3302 isl_union_map *dependence = scop_get_dependences (scop);
3303 context_isl =
3304 isl_ast_build_set_before_each_for (context_isl, ast_build_before_for,
3305 dependence);
3306 }
3307
3308 isl_ast_node *ast_isl = isl_ast_build_ast_from_schedule (context_isl,
3309 schedule_isl);
3310 if (scop->schedule)
3311 {
3312 isl_schedule_free (scop->schedule);
3313 scop->schedule = NULL;
3314 }
3315
3316 isl_ast_build_free (context_isl);
3317 return ast_isl;
3318 }
3319 #endif
3320
3321 /* Copy def from sese REGION to the newly created TO_REGION. TR is defined by
3322 DEF_STMT. GSI points to entry basic block of the TO_REGION. */
3323
3324 static void
3325 copy_def (tree tr, gimple *def_stmt, sese_info_p region, sese_info_p to_region,
3326 gimple_stmt_iterator *gsi)
3327 {
3328 if (!defined_in_sese_p (tr, region->region))
3329 return;
3330
3331 ssa_op_iter iter;
3332 use_operand_p use_p;
3333 FOR_EACH_SSA_USE_OPERAND (use_p, def_stmt, iter, SSA_OP_USE)
3334 {
3335 tree use_tr = USE_FROM_PTR (use_p);
3336
3337 /* Do not copy parameters that have been generated in the header of the
3338 scop. */
3339 if (region->parameter_rename_map->get(use_tr))
3340 continue;
3341
3342 gimple *def_of_use = SSA_NAME_DEF_STMT (use_tr);
3343 if (!def_of_use)
3344 continue;
3345
3346 copy_def (use_tr, def_of_use, region, to_region, gsi);
3347 }
3348
3349 gimple *copy = gimple_copy (def_stmt);
3350 gsi_insert_after (gsi, copy, GSI_NEW_STMT);
3351
3352 /* Create new names for all the definitions created by COPY and
3353 add replacement mappings for each new name. */
3354 def_operand_p def_p;
3355 ssa_op_iter op_iter;
3356 FOR_EACH_SSA_DEF_OPERAND (def_p, copy, op_iter, SSA_OP_ALL_DEFS)
3357 {
3358 tree old_name = DEF_FROM_PTR (def_p);
3359 tree new_name = create_new_def_for (old_name, copy, def_p);
3360 region->parameter_rename_map->put(old_name, new_name);
3361 }
3362
3363 update_stmt (copy);
3364 }
3365
3366 static void
3367 copy_internal_parameters (sese_info_p region, sese_info_p to_region)
3368 {
3369 /* For all the parameters which definitino is in the if_region->false_region,
3370 insert code on true_region (if_region->true_region->entry). */
3371
3372 int i;
3373 tree tr;
3374 gimple_stmt_iterator gsi = gsi_start_bb(to_region->region.entry->dest);
3375
3376 FOR_EACH_VEC_ELT (region->params, i, tr)
3377 {
3378 // If def is not in region.
3379 gimple *def_stmt = SSA_NAME_DEF_STMT (tr);
3380 if (def_stmt)
3381 copy_def (tr, def_stmt, region, to_region, &gsi);
3382 }
3383 }
3384
3385 /* GIMPLE Loop Generator: generates loops from STMT in GIMPLE form for
3386 the given SCOP. Return true if code generation succeeded.
3387
3388 FIXME: This is not yet a full implementation of the code generator
3389 with isl ASTs. Generation of GIMPLE code has to be completed. */
3390
3391 bool
3392 graphite_regenerate_ast_isl (scop_p scop)
3393 {
3394 sese_info_p region = scop->scop_info;
3395 translate_isl_ast_to_gimple t (region);
3396
3397 ifsese if_region = NULL;
3398 isl_ast_node *root_node;
3399 ivs_params ip;
3400
3401 timevar_push (TV_GRAPHITE_CODE_GEN);
3402 #ifdef HAVE_ISL_OPTIONS_SET_SCHEDULE_SERIALIZE_SCCS
3403 t.add_parameters_to_ivs_params (scop, ip);
3404 root_node = t.scop_to_isl_ast (scop);
3405 #else
3406 root_node = t.scop_to_isl_ast (scop, ip);
3407 #endif
3408
3409 if (dump_file && (dump_flags & TDF_DETAILS))
3410 {
3411 #ifdef HAVE_ISL_OPTIONS_SET_SCHEDULE_SERIALIZE_SCCS
3412 fprintf (dump_file, "[scheduler] original schedule:\n");
3413 print_isl_schedule (dump_file, scop->original_schedule);
3414 fprintf (dump_file, "[scheduler] isl transformed schedule:\n");
3415 print_isl_schedule (dump_file, scop->transformed_schedule);
3416
3417 fprintf (dump_file, "[scheduler] original ast:\n");
3418 print_schedule_ast (dump_file, scop->original_schedule, scop);
3419 #endif
3420 fprintf (dump_file, "[scheduler] AST generated by isl:\n");
3421 print_isl_ast (dump_file, root_node);
3422 }
3423
3424 recompute_all_dominators ();
3425 graphite_verify ();
3426
3427 if_region = move_sese_in_condition (region);
3428 region->if_region = if_region;
3429 recompute_all_dominators ();
3430
3431 loop_p context_loop = region->region.entry->src->loop_father;
3432
3433 /* Copy all the parameters which are defined in the region. */
3434 copy_internal_parameters(if_region->false_region, if_region->true_region);
3435
3436 edge e = single_succ_edge (if_region->true_region->region.entry->dest);
3437 basic_block bb = split_edge (e);
3438
3439 /* Update the true_region exit edge. */
3440 region->if_region->true_region->region.exit = single_succ_edge (bb);
3441
3442 t.translate_isl_ast (context_loop, root_node, e, ip);
3443 if (t.codegen_error_p ())
3444 {
3445 if (dump_file)
3446 fprintf (dump_file, "codegen error: "
3447 "reverting back to the original code.\n");
3448 set_ifsese_condition (if_region, integer_zero_node);
3449 }
3450 else
3451 {
3452 t.translate_pending_phi_nodes ();
3453 if (!t.codegen_error_p ())
3454 {
3455 sese_insert_phis_for_liveouts (region,
3456 if_region->region->region.exit->src,
3457 if_region->false_region->region.exit,
3458 if_region->true_region->region.exit);
3459 mark_virtual_operands_for_renaming (cfun);
3460 update_ssa (TODO_update_ssa);
3461
3462
3463 graphite_verify ();
3464 scev_reset ();
3465 recompute_all_dominators ();
3466 graphite_verify ();
3467
3468 if (dump_file)
3469 fprintf (dump_file, "[codegen] isl AST to Gimple succeeded.\n");
3470 }
3471 else
3472 {
3473 if (dump_file)
3474 fprintf (dump_file, "[codegen] unsuccessful in translating"
3475 " pending phis, reverting back to the original code.\n");
3476 set_ifsese_condition (if_region, integer_zero_node);
3477 }
3478 }
3479
3480 free (if_region->true_region);
3481 free (if_region->region);
3482 free (if_region);
3483
3484 ivs_params_clear (ip);
3485 isl_ast_node_free (root_node);
3486 timevar_pop (TV_GRAPHITE_CODE_GEN);
3487
3488 if (dump_file && (dump_flags & TDF_DETAILS))
3489 {
3490 loop_p loop;
3491 int num_no_dependency = 0;
3492
3493 FOR_EACH_LOOP (loop, 0)
3494 if (loop->can_be_parallel)
3495 num_no_dependency++;
3496
3497 fprintf (dump_file, "%d loops carried no dependency.\n",
3498 num_no_dependency);
3499 }
3500
3501 return !t.codegen_error_p ();
3502 }
3503
3504 #endif /* HAVE_isl */