use codegen_error_p
[gcc.git] / gcc / graphite-isl-ast-to-gimple.c
1 /* Translation of isl AST to Gimple.
2 Copyright (C) 2014-2016 Free Software Foundation, Inc.
3 Contributed by Roman Gareev <gareevroman@gmail.com>.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #define USES_ISL
22
23 #include "config.h"
24
25 #ifdef HAVE_isl
26
27 #include "system.h"
28 #include "coretypes.h"
29 #include "backend.h"
30 #include "cfghooks.h"
31 #include "tree.h"
32 #include "gimple.h"
33 #include "params.h"
34 #include "fold-const.h"
35 #include "gimple-fold.h"
36 #include "gimple-iterator.h"
37 #include "gimplify.h"
38 #include "gimplify-me.h"
39 #include "tree-eh.h"
40 #include "tree-ssa-loop.h"
41 #include "tree-ssa-operands.h"
42 #include "tree-ssa-propagate.h"
43 #include "tree-pass.h"
44 #include "cfgloop.h"
45 #include "tree-data-ref.h"
46 #include "tree-ssa-loop-manip.h"
47 #include "tree-scalar-evolution.h"
48 #include "gimple-ssa.h"
49 #include "tree-phinodes.h"
50 #include "tree-into-ssa.h"
51 #include "ssa-iterators.h"
52 #include "tree-cfg.h"
53 #include "gimple-pretty-print.h"
54 #include "cfganal.h"
55 #include "value-prof.h"
56 #include "graphite.h"
57 #include <map>
58
59 /* We always try to use signed 128 bit types, but fall back to smaller types
60 in case a platform does not provide types of these sizes. In the future we
61 should use isl to derive the optimal type for each subexpression. */
62
63 static int max_mode_int_precision =
64 GET_MODE_PRECISION (mode_for_size (MAX_FIXED_MODE_SIZE, MODE_INT, 0));
65 static int graphite_expression_type_precision = 128 <= max_mode_int_precision ?
66 128 : max_mode_int_precision;
67
68 struct ast_build_info
69 {
70 ast_build_info()
71 : is_parallelizable(false)
72 { }
73 bool is_parallelizable;
74 };
75
76 /* Converts a GMP constant VAL to a tree and returns it. */
77
78 static tree
79 gmp_cst_to_tree (tree type, mpz_t val)
80 {
81 tree t = type ? type : integer_type_node;
82 mpz_t tmp;
83
84 mpz_init (tmp);
85 mpz_set (tmp, val);
86 wide_int wi = wi::from_mpz (t, tmp, true);
87 mpz_clear (tmp);
88
89 return wide_int_to_tree (t, wi);
90 }
91
92 /* Verifies properties that GRAPHITE should maintain during translation. */
93
94 static inline void
95 graphite_verify (void)
96 {
97 checking_verify_loop_structure ();
98 checking_verify_loop_closed_ssa (true);
99 }
100
101 /* IVS_PARAMS maps isl's scattering and parameter identifiers
102 to corresponding trees. */
103
104 typedef std::map<isl_id *, tree> ivs_params;
105
106 /* Free all memory allocated for isl's identifiers. */
107
108 static void ivs_params_clear (ivs_params &ip)
109 {
110 std::map<isl_id *, tree>::iterator it;
111 for (it = ip.begin ();
112 it != ip.end (); it++)
113 {
114 isl_id_free (it->first);
115 }
116 }
117
118 #ifdef HAVE_ISL_OPTIONS_SET_SCHEDULE_SERIALIZE_SCCS
119
120 /* Set the "separate" option for the schedule node. */
121
122 static isl_schedule_node *
123 set_separate_option (__isl_take isl_schedule_node *node, void *user)
124 {
125 if (user)
126 return node;
127
128 if (isl_schedule_node_get_type (node) != isl_schedule_node_band)
129 return node;
130
131 /* Set the "separate" option unless it is set earlier to another option. */
132 if (isl_schedule_node_band_member_get_ast_loop_type (node, 0)
133 == isl_ast_loop_default)
134 return isl_schedule_node_band_member_set_ast_loop_type
135 (node, 0, isl_ast_loop_separate);
136
137 return node;
138 }
139
140 /* Print SCHEDULE under an AST form on file F. */
141
142 void
143 print_schedule_ast (FILE *f, __isl_keep isl_schedule *schedule, scop_p scop)
144 {
145 isl_set *set = isl_set_params (isl_set_copy (scop->param_context));
146 isl_ast_build *context = isl_ast_build_from_context (set);
147 isl_ast_node *ast
148 = isl_ast_build_node_from_schedule (context, isl_schedule_copy (schedule));
149 isl_ast_build_free (context);
150 print_isl_ast (f, ast);
151 isl_ast_node_free (ast);
152 }
153
154 DEBUG_FUNCTION void
155 debug_schedule_ast (__isl_keep isl_schedule *s, scop_p scop)
156 {
157 print_schedule_ast (stderr, s, scop);
158 }
159
160 #endif
161
162 enum phi_node_kind
163 {
164 unknown_phi,
165 loop_phi,
166 close_phi,
167 cond_phi
168 };
169
170 class translate_isl_ast_to_gimple
171 {
172 public:
173 translate_isl_ast_to_gimple (sese_info_p r)
174 : region (r), codegen_error (false)
175 { }
176
177 /* Translates an isl AST node NODE to GCC representation in the
178 context of a SESE. */
179 edge translate_isl_ast (loop_p context_loop, __isl_keep isl_ast_node *node,
180 edge next_e, ivs_params &ip);
181
182 /* Translates an isl_ast_node_for to Gimple. */
183 edge translate_isl_ast_node_for (loop_p context_loop,
184 __isl_keep isl_ast_node *node,
185 edge next_e, ivs_params &ip);
186
187 /* Create the loop for a isl_ast_node_for.
188
189 - NEXT_E is the edge where new generated code should be attached. */
190 edge translate_isl_ast_for_loop (loop_p context_loop,
191 __isl_keep isl_ast_node *node_for,
192 edge next_e,
193 tree type, tree lb, tree ub,
194 ivs_params &ip);
195
196 /* Translates an isl_ast_node_if to Gimple. */
197 edge translate_isl_ast_node_if (loop_p context_loop,
198 __isl_keep isl_ast_node *node,
199 edge next_e, ivs_params &ip);
200
201 /* Translates an isl_ast_node_user to Gimple.
202
203 FIXME: We should remove iv_map.create (loop->num + 1), if it is
204 possible. */
205 edge translate_isl_ast_node_user (__isl_keep isl_ast_node *node,
206 edge next_e, ivs_params &ip);
207
208 /* Translates an isl_ast_node_block to Gimple. */
209 edge translate_isl_ast_node_block (loop_p context_loop,
210 __isl_keep isl_ast_node *node,
211 edge next_e, ivs_params &ip);
212
213 /* Converts a unary isl_ast_expr_op expression E to a GCC expression tree of
214 type TYPE. */
215 tree unary_op_to_tree (tree type, __isl_take isl_ast_expr *expr,
216 ivs_params &ip);
217
218 /* Converts a binary isl_ast_expr_op expression E to a GCC expression tree of
219 type TYPE. */
220 tree binary_op_to_tree (tree type, __isl_take isl_ast_expr *expr,
221 ivs_params &ip);
222
223 /* Converts a ternary isl_ast_expr_op expression E to a GCC expression tree of
224 type TYPE. */
225 tree ternary_op_to_tree (tree type, __isl_take isl_ast_expr *expr,
226 ivs_params &ip);
227
228 /* Converts an isl_ast_expr_op expression E with unknown number of arguments
229 to a GCC expression tree of type TYPE. */
230 tree nary_op_to_tree (tree type, __isl_take isl_ast_expr *expr,
231 ivs_params &ip);
232
233 /* Converts an isl AST expression E back to a GCC expression tree of
234 type TYPE. */
235 tree gcc_expression_from_isl_expression (tree type,
236 __isl_take isl_ast_expr *,
237 ivs_params &ip);
238
239 /* Return the tree variable that corresponds to the given isl ast identifier
240 expression (an isl_ast_expr of type isl_ast_expr_id).
241
242 FIXME: We should replace blind conversation of id's type with derivation
243 of the optimal type when we get the corresponding isl support. Blindly
244 converting type sizes may be problematic when we switch to smaller
245 types. */
246 tree gcc_expression_from_isl_ast_expr_id (tree type,
247 __isl_keep isl_ast_expr *expr_id,
248 ivs_params &ip);
249
250 /* Converts an isl_ast_expr_int expression E to a GCC expression tree of
251 type TYPE. */
252 tree gcc_expression_from_isl_expr_int (tree type,
253 __isl_take isl_ast_expr *expr);
254
255 /* Converts an isl_ast_expr_op expression E to a GCC expression tree of
256 type TYPE. */
257 tree gcc_expression_from_isl_expr_op (tree type,
258 __isl_take isl_ast_expr *expr,
259 ivs_params &ip);
260
261 /* Creates a new LOOP corresponding to isl_ast_node_for. Inserts an
262 induction variable for the new LOOP. New LOOP is attached to CFG
263 starting at ENTRY_EDGE. LOOP is inserted into the loop tree and
264 becomes the child loop of the OUTER_LOOP. NEWIVS_INDEX binds
265 isl's scattering name to the induction variable created for the
266 loop of STMT. The new induction variable is inserted in the NEWIVS
267 vector and is of type TYPE. */
268 struct loop *graphite_create_new_loop (edge entry_edge,
269 __isl_keep isl_ast_node *node_for,
270 loop_p outer, tree type,
271 tree lb, tree ub, ivs_params &ip);
272
273 /* All loops generated by create_empty_loop_on_edge have the form of
274 a post-test loop:
275
276 do
277
278 {
279 body of the loop;
280 } while (lower bound < upper bound);
281
282 We create a new if region protecting the loop to be executed, if
283 the execution count is zero (lower bound > upper bound). */
284 edge graphite_create_new_loop_guard (edge entry_edge,
285 __isl_keep isl_ast_node *node_for,
286 tree *type,
287 tree *lb, tree *ub, ivs_params &ip);
288
289 /* Creates a new if region corresponding to isl's cond. */
290 edge graphite_create_new_guard (edge entry_edge,
291 __isl_take isl_ast_expr *if_cond,
292 ivs_params &ip);
293
294 /* Inserts in iv_map a tuple (OLD_LOOP->num, NEW_NAME) for the induction
295 variables of the loops around GBB in SESE.
296
297 FIXME: Instead of using a vec<tree> that maps each loop id to a possible
298 chrec, we could consider using a map<int, tree> that maps loop ids to the
299 corresponding tree expressions. */
300 void build_iv_mapping (vec<tree> iv_map, gimple_poly_bb_p gbb,
301 __isl_keep isl_ast_expr *user_expr, ivs_params &ip,
302 sese_l &region);
303
304 /* Patch the missing arguments of the phi nodes. */
305
306 void translate_pending_phi_nodes (void);
307
308 /* Add isl's parameter identifiers and corresponding trees to ivs_params. */
309
310 void add_parameters_to_ivs_params (scop_p scop, ivs_params &ip);
311
312 /* Generates a build, which specifies the constraints on the parameters. */
313
314 __isl_give isl_ast_build *generate_isl_context (scop_p scop);
315
316 #ifdef HAVE_ISL_OPTIONS_SET_SCHEDULE_SERIALIZE_SCCS
317 /* Generate isl AST from schedule of SCOP. */
318 __isl_give isl_ast_node * scop_to_isl_ast (scop_p scop);
319 #else
320 /* Get the maximal number of schedule dimensions in the scop SCOP. */
321 int get_max_schedule_dimensions (scop_p scop);
322
323 /* Extend the schedule to NB_SCHEDULE_DIMS schedule dimensions.
324
325 For schedules with different dimensionality, the isl AST generator can not
326 define an order and will just randomly choose an order. The solution to
327 this problem is to extend all schedules to the maximal number of schedule
328 dimensions (using '0's for the remaining values). */
329 __isl_give isl_map *extend_schedule (__isl_take isl_map *schedule,
330 int nb_schedule_dims);
331
332 /* Generates a schedule, which specifies an order used to
333 visit elements in a domain. */
334 __isl_give isl_union_map *generate_isl_schedule (scop_p scop);
335
336 /* Set the separate option for all dimensions.
337 This helps to reduce control overhead. */
338 __isl_give isl_ast_build *set_options (__isl_take isl_ast_build *control,
339 __isl_keep isl_union_map *schedule);
340
341 /* Generate isl AST from schedule of SCOP. Also, collects IVS_PARAMS in
342 IP. */
343 __isl_give isl_ast_node *scop_to_isl_ast (scop_p scop, ivs_params &ip);
344
345 /* Prints NODE to FILE. */
346 void print_isl_ast_node (FILE *file, __isl_keep isl_ast_node *node,
347 __isl_keep isl_ctx *ctx) const
348 {
349 isl_printer *prn = isl_printer_to_file (ctx, file);
350 prn = isl_printer_set_output_format (prn, ISL_FORMAT_C);
351 prn = isl_printer_print_ast_node (prn, node);
352 prn = isl_printer_print_str (prn, "\n");
353 isl_printer_free (prn);
354 }
355 #endif
356
357 /* Return true if RENAME (defined in BB) is a valid use in NEW_BB. The
358 definition should flow into use, and the use should respect the loop-closed
359 SSA form. */
360
361 bool is_valid_rename (tree rename, basic_block def_bb, basic_block use_bb,
362 phi_node_kind, tree old_name, basic_block old_bb) const;
363
364 /* Returns the expression associated to OLD_NAME (which is used in OLD_BB), in
365 NEW_BB from RENAME_MAP. LOOP_PHI is true when we want to rename OLD_NAME
366 within a loop PHI instruction. */
367
368 tree get_rename (basic_block new_bb, tree old_name,
369 basic_block old_bb, phi_node_kind) const;
370
371 /* For ops which are scev_analyzeable, we can regenerate a new name from
372 its scalar evolution around LOOP. */
373
374 tree get_rename_from_scev (tree old_name, gimple_seq *stmts, loop_p loop,
375 basic_block new_bb, basic_block old_bb,
376 vec<tree> iv_map);
377
378 /* Returns a basic block that could correspond to where a constant was defined
379 in the original code. In the original code OLD_BB had the definition, we
380 need to find which basic block out of the copies of old_bb, in the new
381 region, should a definition correspond to if it has to reach BB. */
382
383 basic_block get_def_bb_for_const (basic_block bb, basic_block old_bb) const;
384
385 /* Get the new name of OP (from OLD_BB) to be used in NEW_BB. LOOP_PHI is
386 true when we want to rename an OP within a loop PHI instruction. */
387
388 tree get_new_name (basic_block new_bb, tree op,
389 basic_block old_bb, phi_node_kind) const;
390
391 /* Collect all the operands of NEW_EXPR by recursively visiting each
392 operand. */
393
394 void collect_all_ssa_names (tree new_expr, vec<tree> *vec_ssa);
395
396 /* Copy the PHI arguments from OLD_PHI to the NEW_PHI. The arguments to
397 NEW_PHI must be found unless they can be POSTPONEd for later. */
398
399 bool copy_loop_phi_args (gphi *old_phi, init_back_edge_pair_t &ibp_old_bb,
400 gphi *new_phi, init_back_edge_pair_t &ibp_new_bb,
401 bool postpone);
402
403 /* Copy loop phi nodes from BB to NEW_BB. */
404
405 bool copy_loop_phi_nodes (basic_block bb, basic_block new_bb);
406
407 /* Add phi nodes to all merge points of all the diamonds enclosing the loop of
408 the close phi node PHI. */
409
410 bool add_close_phis_to_merge_points (gphi *old_phi, gphi *new_phi,
411 tree default_value);
412
413 tree add_close_phis_to_outer_loops (tree last_merge_name, edge merge_e,
414 gimple *old_close_phi);
415
416 /* Copy all the loop-close phi args from BB to NEW_BB. */
417
418 bool copy_loop_close_phi_args (basic_block old_bb, basic_block new_bb,
419 bool postpone);
420
421 /* Copy loop close phi nodes from BB to NEW_BB. */
422
423 bool copy_loop_close_phi_nodes (basic_block old_bb, basic_block new_bb);
424
425 /* Copy the arguments of cond-phi node PHI, to NEW_PHI in the codegenerated
426 region. If postpone is true and it isn't possible to copy any arg of PHI,
427 the PHI is added to the REGION->INCOMPLETE_PHIS to be codegenerated later.
428 Returns false if the copying was unsuccessful. */
429
430 bool copy_cond_phi_args (gphi *phi, gphi *new_phi, vec<tree> iv_map,
431 bool postpone);
432
433 /* Copy cond phi nodes from BB to NEW_BB. A cond-phi node is a basic block
434 containing phi nodes coming from two predecessors, and none of them are back
435 edges. */
436
437 bool copy_cond_phi_nodes (basic_block bb, basic_block new_bb,
438 vec<tree> iv_map);
439
440 /* Duplicates the statements of basic block BB into basic block NEW_BB
441 and compute the new induction variables according to the IV_MAP. */
442
443 bool graphite_copy_stmts_from_block (basic_block bb, basic_block new_bb,
444 vec<tree> iv_map);
445
446 /* Copies BB and includes in the copied BB all the statements that can
447 be reached following the use-def chains from the memory accesses,
448 and returns the next edge following this new block. */
449
450 edge copy_bb_and_scalar_dependences (basic_block bb, edge next_e,
451 vec<tree> iv_map);
452
453 /* Given a basic block containing close-phi it returns the new basic block
454 where to insert a copy of the close-phi nodes. All the uses in close phis
455 should come from a single loop otherwise it returns NULL. */
456 edge edge_for_new_close_phis (basic_block bb);
457
458 /* Add NEW_NAME as the ARGNUM-th arg of NEW_PHI which is in NEW_BB.
459 DOMINATING_PRED is the predecessor basic block of OLD_BB which dominates
460 the other pred of OLD_BB as well. If no such basic block exists then it is
461 NULL. NON_DOMINATING_PRED is a pred which does not dominate OLD_BB, it
462 cannot be NULL.
463
464 Case1: OLD_BB->preds {BB1, BB2} and BB1 does not dominate BB2 and vice
465 versa. In this case DOMINATING_PRED = NULL.
466
467 Case2: OLD_BB->preds {BB1, BB2} and BB1 dominates BB2.
468
469 Returns true on successful copy of the args, false otherwise. */
470
471 bool add_phi_arg_for_new_expr (tree old_phi_args[2], tree new_phi_args[2],
472 edge old_bb_dominating_edge,
473 edge old_bb_non_dominating_edge,
474 gphi *phi, gphi *new_phi,
475 basic_block new_bb);
476
477 /* Renames the scalar uses of the statement COPY, using the substitution map
478 RENAME_MAP, inserting the gimplification code at GSI_TGT, for the
479 translation REGION, with the original copied statement in LOOP, and using
480 the induction variable renaming map IV_MAP. Returns true when something
481 has been renamed. */
482
483 bool rename_uses (gimple *copy, gimple_stmt_iterator *gsi_tgt,
484 basic_block old_bb, loop_p loop, vec<tree> iv_map);
485
486 /* Register in RENAME_MAP the rename tuple (OLD_NAME, EXPR).
487 When OLD_NAME and EXPR are the same we assert. */
488
489 void set_rename (tree old_name, tree expr);
490
491 /* Create new names for all the definitions created by COPY and add
492 replacement mappings for each new name. */
493
494 void set_rename_for_each_def (gimple *stmt);
495
496 /* Insert each statement from SEQ at its earliest insertion p. */
497
498 void gsi_insert_earliest (gimple_seq seq);
499
500 /* Rename all the operands of NEW_EXPR by recursively visiting each
501 operand. */
502
503 tree rename_all_uses (tree new_expr, basic_block new_bb, basic_block old_bb);
504
505 bool codegen_error_p () const
506 { return codegen_error; }
507
508 /* Return true when OP is a constant tree. */
509
510 bool is_constant (tree op) const
511 {
512 return TREE_CODE (op) == INTEGER_CST
513 || TREE_CODE (op) == REAL_CST
514 || TREE_CODE (op) == COMPLEX_CST
515 || TREE_CODE (op) == VECTOR_CST;
516 }
517
518 private:
519 /* The region to be translated. */
520 sese_info_p region;
521
522 /* This flag is set when an error occurred during the translation of isl AST
523 to Gimple. */
524 bool codegen_error;
525
526 /* A vector of all the edges at if_condition merge points. */
527 auto_vec<edge, 2> merge_points;
528 };
529
530 /* Return the tree variable that corresponds to the given isl ast identifier
531 expression (an isl_ast_expr of type isl_ast_expr_id).
532
533 FIXME: We should replace blind conversion of id's type with derivation
534 of the optimal type when we get the corresponding isl support. Blindly
535 converting type sizes may be problematic when we switch to smaller
536 types. */
537
538 tree
539 translate_isl_ast_to_gimple::
540 gcc_expression_from_isl_ast_expr_id (tree type,
541 __isl_take isl_ast_expr *expr_id,
542 ivs_params &ip)
543 {
544 gcc_assert (isl_ast_expr_get_type (expr_id) == isl_ast_expr_id);
545 isl_id *tmp_isl_id = isl_ast_expr_get_id (expr_id);
546 std::map<isl_id *, tree>::iterator res;
547 res = ip.find (tmp_isl_id);
548 isl_id_free (tmp_isl_id);
549 gcc_assert (res != ip.end () &&
550 "Could not map isl_id to tree expression");
551 isl_ast_expr_free (expr_id);
552 tree t = res->second;
553 tree *val = region->parameter_rename_map->get(t);
554
555 if (!val)
556 val = &t;
557 return fold_convert (type, *val);
558 }
559
560 /* Converts an isl_ast_expr_int expression E to a GCC expression tree of
561 type TYPE. */
562
563 tree
564 translate_isl_ast_to_gimple::
565 gcc_expression_from_isl_expr_int (tree type, __isl_take isl_ast_expr *expr)
566 {
567 gcc_assert (isl_ast_expr_get_type (expr) == isl_ast_expr_int);
568 isl_val *val = isl_ast_expr_get_val (expr);
569 mpz_t val_mpz_t;
570 mpz_init (val_mpz_t);
571 tree res;
572 if (isl_val_get_num_gmp (val, val_mpz_t) == -1)
573 res = NULL_TREE;
574 else
575 res = gmp_cst_to_tree (type, val_mpz_t);
576 isl_val_free (val);
577 isl_ast_expr_free (expr);
578 mpz_clear (val_mpz_t);
579 return res;
580 }
581
582 /* Converts a binary isl_ast_expr_op expression E to a GCC expression tree of
583 type TYPE. */
584
585 tree
586 translate_isl_ast_to_gimple::
587 binary_op_to_tree (tree type, __isl_take isl_ast_expr *expr, ivs_params &ip)
588 {
589 isl_ast_expr *arg_expr = isl_ast_expr_get_op_arg (expr, 0);
590 tree tree_lhs_expr = gcc_expression_from_isl_expression (type, arg_expr, ip);
591 arg_expr = isl_ast_expr_get_op_arg (expr, 1);
592 tree tree_rhs_expr = gcc_expression_from_isl_expression (type, arg_expr, ip);
593
594 enum isl_ast_op_type expr_type = isl_ast_expr_get_op_type (expr);
595 isl_ast_expr_free (expr);
596
597 if (codegen_error_p ())
598 return NULL_TREE;
599
600 switch (expr_type)
601 {
602 case isl_ast_op_add:
603 return fold_build2 (PLUS_EXPR, type, tree_lhs_expr, tree_rhs_expr);
604
605 case isl_ast_op_sub:
606 return fold_build2 (MINUS_EXPR, type, tree_lhs_expr, tree_rhs_expr);
607
608 case isl_ast_op_mul:
609 return fold_build2 (MULT_EXPR, type, tree_lhs_expr, tree_rhs_expr);
610
611 case isl_ast_op_div:
612 /* As isl operates on arbitrary precision numbers, we may end up with
613 division by 2^64 that is folded to 0. */
614 if (integer_zerop (tree_rhs_expr))
615 {
616 codegen_error = true;
617 return NULL_TREE;
618 }
619 return fold_build2 (EXACT_DIV_EXPR, type, tree_lhs_expr, tree_rhs_expr);
620
621 case isl_ast_op_pdiv_q:
622 /* As isl operates on arbitrary precision numbers, we may end up with
623 division by 2^64 that is folded to 0. */
624 if (integer_zerop (tree_rhs_expr))
625 {
626 codegen_error = true;
627 return NULL_TREE;
628 }
629 return fold_build2 (TRUNC_DIV_EXPR, type, tree_lhs_expr, tree_rhs_expr);
630
631 #if HAVE_ISL_OPTIONS_SET_SCHEDULE_SERIALIZE_SCCS
632 /* isl 0.15 or later. */
633 case isl_ast_op_zdiv_r:
634 #endif
635 case isl_ast_op_pdiv_r:
636 /* As isl operates on arbitrary precision numbers, we may end up with
637 division by 2^64 that is folded to 0. */
638 if (integer_zerop (tree_rhs_expr))
639 {
640 codegen_error = true;
641 return NULL_TREE;
642 }
643 return fold_build2 (TRUNC_MOD_EXPR, type, tree_lhs_expr, tree_rhs_expr);
644
645 case isl_ast_op_fdiv_q:
646 /* As isl operates on arbitrary precision numbers, we may end up with
647 division by 2^64 that is folded to 0. */
648 if (integer_zerop (tree_rhs_expr))
649 {
650 codegen_error = true;
651 return NULL_TREE;
652 }
653 return fold_build2 (FLOOR_DIV_EXPR, type, tree_lhs_expr, tree_rhs_expr);
654
655 case isl_ast_op_and:
656 return fold_build2 (TRUTH_ANDIF_EXPR, type,
657 tree_lhs_expr, tree_rhs_expr);
658
659 case isl_ast_op_or:
660 return fold_build2 (TRUTH_ORIF_EXPR, type, tree_lhs_expr, tree_rhs_expr);
661
662 case isl_ast_op_eq:
663 return fold_build2 (EQ_EXPR, type, tree_lhs_expr, tree_rhs_expr);
664
665 case isl_ast_op_le:
666 return fold_build2 (LE_EXPR, type, tree_lhs_expr, tree_rhs_expr);
667
668 case isl_ast_op_lt:
669 return fold_build2 (LT_EXPR, type, tree_lhs_expr, tree_rhs_expr);
670
671 case isl_ast_op_ge:
672 return fold_build2 (GE_EXPR, type, tree_lhs_expr, tree_rhs_expr);
673
674 case isl_ast_op_gt:
675 return fold_build2 (GT_EXPR, type, tree_lhs_expr, tree_rhs_expr);
676
677 default:
678 gcc_unreachable ();
679 }
680 }
681
682 /* Converts a ternary isl_ast_expr_op expression E to a GCC expression tree of
683 type TYPE. */
684
685 tree
686 translate_isl_ast_to_gimple::
687 ternary_op_to_tree (tree type, __isl_take isl_ast_expr *expr, ivs_params &ip)
688 {
689 enum isl_ast_op_type t = isl_ast_expr_get_op_type (expr);
690 gcc_assert (t == isl_ast_op_cond || t == isl_ast_op_select);
691 isl_ast_expr *arg_expr = isl_ast_expr_get_op_arg (expr, 0);
692 tree a = gcc_expression_from_isl_expression (type, arg_expr, ip);
693 arg_expr = isl_ast_expr_get_op_arg (expr, 1);
694 tree b = gcc_expression_from_isl_expression (type, arg_expr, ip);
695 arg_expr = isl_ast_expr_get_op_arg (expr, 2);
696 tree c = gcc_expression_from_isl_expression (type, arg_expr, ip);
697 isl_ast_expr_free (expr);
698
699 if (codegen_error_p ())
700 return NULL_TREE;
701
702 return fold_build3 (COND_EXPR, type, a, b, c);
703 }
704
705 /* Converts a unary isl_ast_expr_op expression E to a GCC expression tree of
706 type TYPE. */
707
708 tree
709 translate_isl_ast_to_gimple::
710 unary_op_to_tree (tree type, __isl_take isl_ast_expr *expr, ivs_params &ip)
711 {
712 gcc_assert (isl_ast_expr_get_op_type (expr) == isl_ast_op_minus);
713 isl_ast_expr *arg_expr = isl_ast_expr_get_op_arg (expr, 0);
714 tree tree_expr = gcc_expression_from_isl_expression (type, arg_expr, ip);
715 isl_ast_expr_free (expr);
716 return codegen_error_p () ? NULL_TREE
717 : fold_build1 (NEGATE_EXPR, type, tree_expr);
718 }
719
720 /* Converts an isl_ast_expr_op expression E with unknown number of arguments
721 to a GCC expression tree of type TYPE. */
722
723 tree
724 translate_isl_ast_to_gimple::
725 nary_op_to_tree (tree type, __isl_take isl_ast_expr *expr, ivs_params &ip)
726 {
727 enum tree_code op_code;
728 switch (isl_ast_expr_get_op_type (expr))
729 {
730 case isl_ast_op_max:
731 op_code = MAX_EXPR;
732 break;
733
734 case isl_ast_op_min:
735 op_code = MIN_EXPR;
736 break;
737
738 default:
739 gcc_unreachable ();
740 }
741 isl_ast_expr *arg_expr = isl_ast_expr_get_op_arg (expr, 0);
742 tree res = gcc_expression_from_isl_expression (type, arg_expr, ip);
743
744 if (codegen_error_p ())
745 {
746 isl_ast_expr_free (expr);
747 return NULL_TREE;
748 }
749
750 int i;
751 for (i = 1; i < isl_ast_expr_get_op_n_arg (expr); i++)
752 {
753 arg_expr = isl_ast_expr_get_op_arg (expr, i);
754 tree t = gcc_expression_from_isl_expression (type, arg_expr, ip);
755
756 if (codegen_error_p ())
757 {
758 isl_ast_expr_free (expr);
759 return NULL_TREE;
760 }
761
762 res = fold_build2 (op_code, type, res, t);
763 }
764 isl_ast_expr_free (expr);
765 return res;
766 }
767
768 /* Converts an isl_ast_expr_op expression E to a GCC expression tree of
769 type TYPE. */
770
771 tree
772 translate_isl_ast_to_gimple::
773 gcc_expression_from_isl_expr_op (tree type, __isl_take isl_ast_expr *expr,
774 ivs_params &ip)
775 {
776 if (codegen_error_p ())
777 {
778 isl_ast_expr_free (expr);
779 return NULL_TREE;
780 }
781
782 gcc_assert (isl_ast_expr_get_type (expr) == isl_ast_expr_op);
783 switch (isl_ast_expr_get_op_type (expr))
784 {
785 /* These isl ast expressions are not supported yet. */
786 case isl_ast_op_error:
787 case isl_ast_op_call:
788 case isl_ast_op_and_then:
789 case isl_ast_op_or_else:
790 gcc_unreachable ();
791
792 case isl_ast_op_max:
793 case isl_ast_op_min:
794 return nary_op_to_tree (type, expr, ip);
795
796 case isl_ast_op_add:
797 case isl_ast_op_sub:
798 case isl_ast_op_mul:
799 case isl_ast_op_div:
800 case isl_ast_op_pdiv_q:
801 case isl_ast_op_pdiv_r:
802 case isl_ast_op_fdiv_q:
803 #if HAVE_ISL_OPTIONS_SET_SCHEDULE_SERIALIZE_SCCS
804 /* isl 0.15 or later. */
805 case isl_ast_op_zdiv_r:
806 #endif
807 case isl_ast_op_and:
808 case isl_ast_op_or:
809 case isl_ast_op_eq:
810 case isl_ast_op_le:
811 case isl_ast_op_lt:
812 case isl_ast_op_ge:
813 case isl_ast_op_gt:
814 return binary_op_to_tree (type, expr, ip);
815
816 case isl_ast_op_minus:
817 return unary_op_to_tree (type, expr, ip);
818
819 case isl_ast_op_cond:
820 case isl_ast_op_select:
821 return ternary_op_to_tree (type, expr, ip);
822
823 default:
824 gcc_unreachable ();
825 }
826
827 return NULL_TREE;
828 }
829
830 /* Converts an isl AST expression E back to a GCC expression tree of
831 type TYPE. */
832
833 tree
834 translate_isl_ast_to_gimple::
835 gcc_expression_from_isl_expression (tree type, __isl_take isl_ast_expr *expr,
836 ivs_params &ip)
837 {
838 if (codegen_error_p ())
839 {
840 isl_ast_expr_free (expr);
841 return NULL_TREE;
842 }
843
844 switch (isl_ast_expr_get_type (expr))
845 {
846 case isl_ast_expr_id:
847 return gcc_expression_from_isl_ast_expr_id (type, expr, ip);
848
849 case isl_ast_expr_int:
850 return gcc_expression_from_isl_expr_int (type, expr);
851
852 case isl_ast_expr_op:
853 return gcc_expression_from_isl_expr_op (type, expr, ip);
854
855 default:
856 gcc_unreachable ();
857 }
858
859 return NULL_TREE;
860 }
861
862 /* Creates a new LOOP corresponding to isl_ast_node_for. Inserts an
863 induction variable for the new LOOP. New LOOP is attached to CFG
864 starting at ENTRY_EDGE. LOOP is inserted into the loop tree and
865 becomes the child loop of the OUTER_LOOP. NEWIVS_INDEX binds
866 isl's scattering name to the induction variable created for the
867 loop of STMT. The new induction variable is inserted in the NEWIVS
868 vector and is of type TYPE. */
869
870 struct loop *
871 translate_isl_ast_to_gimple::
872 graphite_create_new_loop (edge entry_edge, __isl_keep isl_ast_node *node_for,
873 loop_p outer, tree type, tree lb, tree ub,
874 ivs_params &ip)
875 {
876 isl_ast_expr *for_inc = isl_ast_node_for_get_inc (node_for);
877 tree stride = gcc_expression_from_isl_expression (type, for_inc, ip);
878
879 /* To fail code generation, we generate wrong code until we discard it. */
880 if (codegen_error_p ())
881 stride = integer_zero_node;
882
883 tree ivvar = create_tmp_var (type, "graphite_IV");
884 tree iv, iv_after_increment;
885 loop_p loop = create_empty_loop_on_edge
886 (entry_edge, lb, stride, ub, ivvar, &iv, &iv_after_increment,
887 outer ? outer : entry_edge->src->loop_father);
888
889 isl_ast_expr *for_iterator = isl_ast_node_for_get_iterator (node_for);
890 isl_id *id = isl_ast_expr_get_id (for_iterator);
891 std::map<isl_id *, tree>::iterator res;
892 res = ip.find (id);
893 if (ip.count (id))
894 isl_id_free (res->first);
895 ip[id] = iv;
896 isl_ast_expr_free (for_iterator);
897 return loop;
898 }
899
900 /* Create the loop for a isl_ast_node_for.
901
902 - NEXT_E is the edge where new generated code should be attached. */
903
904 edge
905 translate_isl_ast_to_gimple::
906 translate_isl_ast_for_loop (loop_p context_loop,
907 __isl_keep isl_ast_node *node_for, edge next_e,
908 tree type, tree lb, tree ub,
909 ivs_params &ip)
910 {
911 gcc_assert (isl_ast_node_get_type (node_for) == isl_ast_node_for);
912 struct loop *loop = graphite_create_new_loop (next_e, node_for, context_loop,
913 type, lb, ub, ip);
914 edge last_e = single_exit (loop);
915 edge to_body = single_succ_edge (loop->header);
916 basic_block after = to_body->dest;
917
918 /* Translate the body of the loop. */
919 isl_ast_node *for_body = isl_ast_node_for_get_body (node_for);
920 next_e = translate_isl_ast (loop, for_body, to_body, ip);
921 isl_ast_node_free (for_body);
922
923 /* Early return if we failed to translate loop body. */
924 if (!next_e || codegen_error_p ())
925 return NULL;
926
927 if (next_e->dest != after)
928 redirect_edge_succ_nodup (next_e, after);
929 set_immediate_dominator (CDI_DOMINATORS, next_e->dest, next_e->src);
930
931 if (flag_loop_parallelize_all)
932 {
933 isl_id *id = isl_ast_node_get_annotation (node_for);
934 gcc_assert (id);
935 ast_build_info *for_info = (ast_build_info *) isl_id_get_user (id);
936 loop->can_be_parallel = for_info->is_parallelizable;
937 free (for_info);
938 isl_id_free (id);
939 }
940
941 return last_e;
942 }
943
944 /* We use this function to get the upper bound because of the form,
945 which is used by isl to represent loops:
946
947 for (iterator = init; cond; iterator += inc)
948
949 {
950
951 ...
952
953 }
954
955 The loop condition is an arbitrary expression, which contains the
956 current loop iterator.
957
958 (e.g. iterator + 3 < B && C > iterator + A)
959
960 We have to know the upper bound of the iterator to generate a loop
961 in Gimple form. It can be obtained from the special representation
962 of the loop condition, which is generated by isl,
963 if the ast_build_atomic_upper_bound option is set. In this case,
964 isl generates a loop condition that consists of the current loop
965 iterator, + an operator (< or <=) and an expression not involving
966 the iterator, which is processed and returned by this function.
967
968 (e.g iterator <= upper-bound-expression-without-iterator) */
969
970 static __isl_give isl_ast_expr *
971 get_upper_bound (__isl_keep isl_ast_node *node_for)
972 {
973 gcc_assert (isl_ast_node_get_type (node_for) == isl_ast_node_for);
974 isl_ast_expr *for_cond = isl_ast_node_for_get_cond (node_for);
975 gcc_assert (isl_ast_expr_get_type (for_cond) == isl_ast_expr_op);
976 isl_ast_expr *res;
977 switch (isl_ast_expr_get_op_type (for_cond))
978 {
979 case isl_ast_op_le:
980 res = isl_ast_expr_get_op_arg (for_cond, 1);
981 break;
982
983 case isl_ast_op_lt:
984 {
985 /* (iterator < ub) => (iterator <= ub - 1). */
986 isl_val *one =
987 isl_val_int_from_si (isl_ast_expr_get_ctx (for_cond), 1);
988 isl_ast_expr *ub = isl_ast_expr_get_op_arg (for_cond, 1);
989 res = isl_ast_expr_sub (ub, isl_ast_expr_from_val (one));
990 break;
991 }
992
993 default:
994 gcc_unreachable ();
995 }
996 isl_ast_expr_free (for_cond);
997 return res;
998 }
999
1000 /* All loops generated by create_empty_loop_on_edge have the form of
1001 a post-test loop:
1002
1003 do
1004
1005 {
1006 body of the loop;
1007 } while (lower bound < upper bound);
1008
1009 We create a new if region protecting the loop to be executed, if
1010 the execution count is zero (lower bound > upper bound). */
1011
1012 edge
1013 translate_isl_ast_to_gimple::
1014 graphite_create_new_loop_guard (edge entry_edge,
1015 __isl_keep isl_ast_node *node_for, tree *type,
1016 tree *lb, tree *ub, ivs_params &ip)
1017 {
1018 gcc_assert (isl_ast_node_get_type (node_for) == isl_ast_node_for);
1019 tree cond_expr;
1020 edge exit_edge;
1021
1022 *type =
1023 build_nonstandard_integer_type (graphite_expression_type_precision, 0);
1024 isl_ast_expr *for_init = isl_ast_node_for_get_init (node_for);
1025 *lb = gcc_expression_from_isl_expression (*type, for_init, ip);
1026
1027 /* To fail code generation, we generate wrong code until we discard it. */
1028 if (codegen_error_p ())
1029 *lb = integer_zero_node;
1030
1031 isl_ast_expr *upper_bound = get_upper_bound (node_for);
1032 *ub = gcc_expression_from_isl_expression (*type, upper_bound, ip);
1033
1034 /* To fail code generation, we generate wrong code until we discard it. */
1035 if (codegen_error_p ())
1036 *ub = integer_zero_node;
1037
1038 /* When ub is simply a constant or a parameter, use lb <= ub. */
1039 if (TREE_CODE (*ub) == INTEGER_CST || TREE_CODE (*ub) == SSA_NAME)
1040 cond_expr = fold_build2 (LE_EXPR, boolean_type_node, *lb, *ub);
1041 else
1042 {
1043 tree one = (POINTER_TYPE_P (*type)
1044 ? convert_to_ptrofftype (integer_one_node)
1045 : fold_convert (*type, integer_one_node));
1046 /* Adding +1 and using LT_EXPR helps with loop latches that have a
1047 loop iteration count of "PARAMETER - 1". For PARAMETER == 0 this
1048 becomes 2^k-1 due to integer overflow, and the condition lb <= ub
1049 is true, even if we do not want this. However lb < ub + 1 is false,
1050 as expected. */
1051 tree ub_one = fold_build2 (POINTER_TYPE_P (*type) ? POINTER_PLUS_EXPR
1052 : PLUS_EXPR, *type, *ub, one);
1053
1054 cond_expr = fold_build2 (LT_EXPR, boolean_type_node, *lb, ub_one);
1055 }
1056
1057 if (integer_onep (cond_expr))
1058 exit_edge = entry_edge;
1059 else
1060 exit_edge = create_empty_if_region_on_edge (entry_edge, cond_expr);
1061
1062 return exit_edge;
1063 }
1064
1065 /* Translates an isl_ast_node_for to Gimple. */
1066
1067 edge
1068 translate_isl_ast_to_gimple::
1069 translate_isl_ast_node_for (loop_p context_loop, __isl_keep isl_ast_node *node,
1070 edge next_e, ivs_params &ip)
1071 {
1072 gcc_assert (isl_ast_node_get_type (node) == isl_ast_node_for);
1073 tree type, lb, ub;
1074 edge last_e = graphite_create_new_loop_guard (next_e, node, &type,
1075 &lb, &ub, ip);
1076
1077 if (last_e == next_e)
1078 {
1079 /* There was no guard generated. */
1080 last_e = single_succ_edge (split_edge (last_e));
1081
1082 translate_isl_ast_for_loop (context_loop, node, next_e,
1083 type, lb, ub, ip);
1084 return last_e;
1085 }
1086
1087 edge true_e = get_true_edge_from_guard_bb (next_e->dest);
1088 merge_points.safe_push (last_e);
1089
1090 last_e = single_succ_edge (split_edge (last_e));
1091 translate_isl_ast_for_loop (context_loop, node, true_e, type, lb, ub, ip);
1092
1093 return last_e;
1094 }
1095
1096 /* Inserts in iv_map a tuple (OLD_LOOP->num, NEW_NAME) for the induction
1097 variables of the loops around GBB in SESE.
1098
1099 FIXME: Instead of using a vec<tree> that maps each loop id to a possible
1100 chrec, we could consider using a map<int, tree> that maps loop ids to the
1101 corresponding tree expressions. */
1102
1103 void
1104 translate_isl_ast_to_gimple::
1105 build_iv_mapping (vec<tree> iv_map, gimple_poly_bb_p gbb,
1106 __isl_keep isl_ast_expr *user_expr, ivs_params &ip,
1107 sese_l &region)
1108 {
1109 gcc_assert (isl_ast_expr_get_type (user_expr) == isl_ast_expr_op &&
1110 isl_ast_expr_get_op_type (user_expr) == isl_ast_op_call);
1111 int i;
1112 isl_ast_expr *arg_expr;
1113 for (i = 1; i < isl_ast_expr_get_op_n_arg (user_expr); i++)
1114 {
1115 arg_expr = isl_ast_expr_get_op_arg (user_expr, i);
1116 tree type =
1117 build_nonstandard_integer_type (graphite_expression_type_precision, 0);
1118 tree t = gcc_expression_from_isl_expression (type, arg_expr, ip);
1119
1120 /* To fail code generation, we generate wrong code until we discard it. */
1121 if (codegen_error_p ())
1122 t = integer_zero_node;
1123
1124 loop_p old_loop = gbb_loop_at_index (gbb, region, i - 1);
1125 iv_map[old_loop->num] = t;
1126 }
1127 }
1128
1129 /* Translates an isl_ast_node_user to Gimple.
1130
1131 FIXME: We should remove iv_map.create (loop->num + 1), if it is possible. */
1132
1133 edge
1134 translate_isl_ast_to_gimple::
1135 translate_isl_ast_node_user (__isl_keep isl_ast_node *node,
1136 edge next_e, ivs_params &ip)
1137 {
1138 gcc_assert (isl_ast_node_get_type (node) == isl_ast_node_user);
1139
1140 isl_ast_expr *user_expr = isl_ast_node_user_get_expr (node);
1141 isl_ast_expr *name_expr = isl_ast_expr_get_op_arg (user_expr, 0);
1142 gcc_assert (isl_ast_expr_get_type (name_expr) == isl_ast_expr_id);
1143
1144 isl_id *name_id = isl_ast_expr_get_id (name_expr);
1145 poly_bb_p pbb = (poly_bb_p) isl_id_get_user (name_id);
1146 gcc_assert (pbb);
1147
1148 gimple_poly_bb_p gbb = PBB_BLACK_BOX (pbb);
1149
1150 isl_ast_expr_free (name_expr);
1151 isl_id_free (name_id);
1152
1153 gcc_assert (GBB_BB (gbb) != ENTRY_BLOCK_PTR_FOR_FN (cfun) &&
1154 "The entry block should not even appear within a scop");
1155
1156 const int nb_loops = number_of_loops (cfun);
1157 vec<tree> iv_map;
1158 iv_map.create (nb_loops);
1159 iv_map.safe_grow_cleared (nb_loops);
1160
1161 build_iv_mapping (iv_map, gbb, user_expr, ip, pbb->scop->scop_info->region);
1162 isl_ast_expr_free (user_expr);
1163
1164 basic_block old_bb = GBB_BB (gbb);
1165 if (dump_file)
1166 {
1167 fprintf (dump_file,
1168 "[codegen] copying from bb_%d on edge (bb_%d, bb_%d)\n",
1169 old_bb->index, next_e->src->index, next_e->dest->index);
1170 print_loops_bb (dump_file, GBB_BB (gbb), 0, 3);
1171
1172 }
1173
1174 next_e = copy_bb_and_scalar_dependences (old_bb, next_e, iv_map);
1175
1176 iv_map.release ();
1177
1178 if (codegen_error_p ())
1179 return NULL;
1180
1181 if (dump_file)
1182 {
1183 fprintf (dump_file, "[codegen] (after copy) new basic block\n");
1184 print_loops_bb (dump_file, next_e->src, 0, 3);
1185 }
1186
1187 return next_e;
1188 }
1189
1190 /* Translates an isl_ast_node_block to Gimple. */
1191
1192 edge
1193 translate_isl_ast_to_gimple::
1194 translate_isl_ast_node_block (loop_p context_loop,
1195 __isl_keep isl_ast_node *node,
1196 edge next_e, ivs_params &ip)
1197 {
1198 gcc_assert (isl_ast_node_get_type (node) == isl_ast_node_block);
1199 isl_ast_node_list *node_list = isl_ast_node_block_get_children (node);
1200 int i;
1201 for (i = 0; i < isl_ast_node_list_n_ast_node (node_list); i++)
1202 {
1203 isl_ast_node *tmp_node = isl_ast_node_list_get_ast_node (node_list, i);
1204 next_e = translate_isl_ast (context_loop, tmp_node, next_e, ip);
1205 isl_ast_node_free (tmp_node);
1206 }
1207 isl_ast_node_list_free (node_list);
1208 return next_e;
1209 }
1210
1211 /* Creates a new if region corresponding to isl's cond. */
1212
1213 edge
1214 translate_isl_ast_to_gimple::
1215 graphite_create_new_guard (edge entry_edge, __isl_take isl_ast_expr *if_cond,
1216 ivs_params &ip)
1217 {
1218 tree type =
1219 build_nonstandard_integer_type (graphite_expression_type_precision, 0);
1220 tree cond_expr = gcc_expression_from_isl_expression (type, if_cond, ip);
1221
1222 /* To fail code generation, we generate wrong code until we discard it. */
1223 if (codegen_error_p ())
1224 cond_expr = integer_zero_node;
1225
1226 edge exit_edge = create_empty_if_region_on_edge (entry_edge, cond_expr);
1227 return exit_edge;
1228 }
1229
1230 /* Translates an isl_ast_node_if to Gimple. */
1231
1232 edge
1233 translate_isl_ast_to_gimple::
1234 translate_isl_ast_node_if (loop_p context_loop,
1235 __isl_keep isl_ast_node *node,
1236 edge next_e, ivs_params &ip)
1237 {
1238 gcc_assert (isl_ast_node_get_type (node) == isl_ast_node_if);
1239 isl_ast_expr *if_cond = isl_ast_node_if_get_cond (node);
1240 edge last_e = graphite_create_new_guard (next_e, if_cond, ip);
1241 edge true_e = get_true_edge_from_guard_bb (next_e->dest);
1242 merge_points.safe_push (last_e);
1243
1244 isl_ast_node *then_node = isl_ast_node_if_get_then (node);
1245 translate_isl_ast (context_loop, then_node, true_e, ip);
1246 isl_ast_node_free (then_node);
1247
1248 edge false_e = get_false_edge_from_guard_bb (next_e->dest);
1249 isl_ast_node *else_node = isl_ast_node_if_get_else (node);
1250 if (isl_ast_node_get_type (else_node) != isl_ast_node_error)
1251 translate_isl_ast (context_loop, else_node, false_e, ip);
1252
1253 isl_ast_node_free (else_node);
1254 return last_e;
1255 }
1256
1257 /* Translates an isl AST node NODE to GCC representation in the
1258 context of a SESE. */
1259
1260 edge
1261 translate_isl_ast_to_gimple::translate_isl_ast (loop_p context_loop,
1262 __isl_keep isl_ast_node *node,
1263 edge next_e, ivs_params &ip)
1264 {
1265 if (codegen_error_p ())
1266 return NULL;
1267
1268 switch (isl_ast_node_get_type (node))
1269 {
1270 case isl_ast_node_error:
1271 gcc_unreachable ();
1272
1273 case isl_ast_node_for:
1274 return translate_isl_ast_node_for (context_loop, node,
1275 next_e, ip);
1276
1277 case isl_ast_node_if:
1278 return translate_isl_ast_node_if (context_loop, node,
1279 next_e, ip);
1280
1281 case isl_ast_node_user:
1282 return translate_isl_ast_node_user (node, next_e, ip);
1283
1284 case isl_ast_node_block:
1285 return translate_isl_ast_node_block (context_loop, node,
1286 next_e, ip);
1287
1288 #ifdef HAVE_ISL_OPTIONS_SET_SCHEDULE_SERIALIZE_SCCS
1289 case isl_ast_node_mark:
1290 {
1291 isl_ast_node *n = isl_ast_node_mark_get_node (node);
1292 edge e = translate_isl_ast (context_loop, n, next_e, ip);
1293 isl_ast_node_free (n);
1294 return e;
1295 }
1296 #endif
1297
1298 default:
1299 gcc_unreachable ();
1300 }
1301 }
1302
1303 /* Return true when BB contains loop close phi nodes. A loop close phi node is
1304 at the exit of loop which takes one argument that is the last value of the
1305 variable being used out of the loop. */
1306
1307 static bool
1308 bb_contains_loop_close_phi_nodes (basic_block bb)
1309 {
1310 return single_pred_p (bb)
1311 && bb->loop_father != single_pred_edge (bb)->src->loop_father;
1312 }
1313
1314 /* Return true when BB contains loop phi nodes. A loop phi node is the loop
1315 header containing phi nodes which has one init-edge and one back-edge. */
1316
1317 static bool
1318 bb_contains_loop_phi_nodes (basic_block bb)
1319 {
1320 gcc_assert (EDGE_COUNT (bb->preds) <= 2);
1321
1322 if (bb->preds->length () == 1)
1323 return false;
1324
1325 unsigned depth = loop_depth (bb->loop_father);
1326
1327 edge preds[2] = { (*bb->preds)[0], (*bb->preds)[1] };
1328
1329 if (depth > loop_depth (preds[0]->src->loop_father)
1330 || depth > loop_depth (preds[1]->src->loop_father))
1331 return true;
1332
1333 /* When one of the edges correspond to the same loop father and other
1334 doesn't. */
1335 if (bb->loop_father != preds[0]->src->loop_father
1336 && bb->loop_father == preds[1]->src->loop_father)
1337 return true;
1338
1339 if (bb->loop_father != preds[1]->src->loop_father
1340 && bb->loop_father == preds[0]->src->loop_father)
1341 return true;
1342
1343 return false;
1344 }
1345
1346 /* Check if USE is defined in a basic block from where the definition of USE can
1347 propagate from all the paths. FIXME: Verify checks for virtual operands. */
1348
1349 static bool
1350 is_loop_closed_ssa_use (basic_block bb, tree use)
1351 {
1352 if (TREE_CODE (use) != SSA_NAME || virtual_operand_p (use))
1353 return true;
1354
1355 /* For close-phi nodes def always comes from a loop which has a back-edge. */
1356 if (bb_contains_loop_close_phi_nodes (bb))
1357 return true;
1358
1359 gimple *def = SSA_NAME_DEF_STMT (use);
1360 basic_block def_bb = gimple_bb (def);
1361 return (!def_bb
1362 || flow_bb_inside_loop_p (def_bb->loop_father, bb));
1363 }
1364
1365 /* Return the number of phi nodes in BB. */
1366
1367 static int
1368 number_of_phi_nodes (basic_block bb)
1369 {
1370 int num_phis = 0;
1371 for (gphi_iterator psi = gsi_start_phis (bb); !gsi_end_p (psi);
1372 gsi_next (&psi))
1373 num_phis++;
1374 return num_phis;
1375 }
1376
1377 /* Returns true if BB uses name in one of its PHIs. */
1378
1379 static bool
1380 phi_uses_name (basic_block bb, tree name)
1381 {
1382 for (gphi_iterator psi = gsi_start_phis (bb); !gsi_end_p (psi);
1383 gsi_next (&psi))
1384 {
1385 gphi *phi = psi.phi ();
1386 for (unsigned i = 0; i < gimple_phi_num_args (phi); i++)
1387 {
1388 tree use_arg = gimple_phi_arg_def (phi, i);
1389 if (use_arg == name)
1390 return true;
1391 }
1392 }
1393 return false;
1394 }
1395
1396 /* Return true if RENAME (defined in BB) is a valid use in NEW_BB. The
1397 definition should flow into use, and the use should respect the loop-closed
1398 SSA form. */
1399
1400 bool
1401 translate_isl_ast_to_gimple::
1402 is_valid_rename (tree rename, basic_block def_bb, basic_block use_bb,
1403 phi_node_kind phi_kind, tree old_name, basic_block old_bb) const
1404 {
1405 /* The def of the rename must either dominate the uses or come from a
1406 back-edge. Also the def must respect the loop closed ssa form. */
1407 if (!is_loop_closed_ssa_use (use_bb, rename))
1408 {
1409 if (dump_file)
1410 {
1411 fprintf (dump_file, "[codegen] rename not in loop closed ssa: ");
1412 print_generic_expr (dump_file, rename, 0);
1413 fprintf (dump_file, "\n");
1414 }
1415 return false;
1416 }
1417
1418 if (dominated_by_p (CDI_DOMINATORS, use_bb, def_bb))
1419 return true;
1420
1421 if (bb_contains_loop_phi_nodes (use_bb) && phi_kind == loop_phi)
1422 {
1423 /* The loop-header dominates the loop-body. */
1424 if (!dominated_by_p (CDI_DOMINATORS, def_bb, use_bb))
1425 return false;
1426
1427 /* RENAME would be used in loop-phi. */
1428 gcc_assert (number_of_phi_nodes (use_bb));
1429
1430 /* For definitions coming from back edges, we should check that
1431 old_name is used in a loop PHI node.
1432 FIXME: Verify if this is true. */
1433 if (phi_uses_name (old_bb, old_name))
1434 return true;
1435 }
1436 return false;
1437 }
1438
1439 /* Returns the expression associated to OLD_NAME (which is used in OLD_BB), in
1440 NEW_BB from RENAME_MAP. PHI_KIND determines the kind of phi node. */
1441
1442 tree
1443 translate_isl_ast_to_gimple::get_rename (basic_block new_bb,
1444 tree old_name,
1445 basic_block old_bb,
1446 phi_node_kind phi_kind) const
1447 {
1448 gcc_assert (TREE_CODE (old_name) == SSA_NAME);
1449 vec <tree> *renames = region->rename_map->get (old_name);
1450
1451 if (!renames || renames->is_empty ())
1452 return NULL_TREE;
1453
1454 if (1 == renames->length ())
1455 {
1456 tree rename = (*renames)[0];
1457 if (TREE_CODE (rename) == SSA_NAME)
1458 {
1459 basic_block bb = gimple_bb (SSA_NAME_DEF_STMT (rename));
1460 if (is_valid_rename (rename, bb, new_bb, phi_kind, old_name, old_bb)
1461 && (phi_kind == close_phi
1462 || flow_bb_inside_loop_p (bb->loop_father, new_bb)))
1463 return rename;
1464 return NULL_TREE;
1465 }
1466
1467 if (is_constant (rename))
1468 return rename;
1469
1470 return NULL_TREE;
1471 }
1472
1473 /* More than one renames corresponding to the old_name. Find the rename for
1474 which the definition flows into usage at new_bb. */
1475 int i;
1476 tree t1 = NULL_TREE, t2;
1477 basic_block t1_bb = NULL;
1478 FOR_EACH_VEC_ELT (*renames, i, t2)
1479 {
1480 basic_block t2_bb = gimple_bb (SSA_NAME_DEF_STMT (t2));
1481
1482 /* Defined in the same basic block as used. */
1483 if (t2_bb == new_bb)
1484 return t2;
1485
1486 /* NEW_BB and T2_BB are in two unrelated if-clauses. */
1487 if (!dominated_by_p (CDI_DOMINATORS, new_bb, t2_bb))
1488 continue;
1489
1490 if (!flow_bb_inside_loop_p (t2_bb->loop_father, new_bb))
1491 continue;
1492
1493 /* Compute the nearest dominator. */
1494 if (!t1 || dominated_by_p (CDI_DOMINATORS, t2_bb, t1_bb))
1495 {
1496 t1_bb = t2_bb;
1497 t1 = t2;
1498 }
1499 }
1500
1501 return t1;
1502 }
1503
1504 /* Register in RENAME_MAP the rename tuple (OLD_NAME, EXPR).
1505 When OLD_NAME and EXPR are the same we assert. */
1506
1507 void
1508 translate_isl_ast_to_gimple::set_rename (tree old_name, tree expr)
1509 {
1510 if (dump_file)
1511 {
1512 fprintf (dump_file, "[codegen] setting rename: old_name = ");
1513 print_generic_expr (dump_file, old_name, 0);
1514 fprintf (dump_file, ", new_name = ");
1515 print_generic_expr (dump_file, expr, 0);
1516 fprintf (dump_file, "\n");
1517 }
1518
1519 if (old_name == expr)
1520 return;
1521
1522 vec <tree> *renames = region->rename_map->get (old_name);
1523
1524 if (renames)
1525 renames->safe_push (expr);
1526 else
1527 {
1528 vec<tree> r;
1529 r.create (2);
1530 r.safe_push (expr);
1531 region->rename_map->put (old_name, r);
1532 }
1533
1534 tree t;
1535 int i;
1536 /* For a parameter of a scop we don't want to rename it. */
1537 FOR_EACH_VEC_ELT (region->params, i, t)
1538 if (old_name == t)
1539 region->parameter_rename_map->put(old_name, expr);
1540 }
1541
1542 /* Return an iterator to the instructions comes last in the execution order.
1543 Either GSI1 and GSI2 should belong to the same basic block or one of their
1544 respective basic blocks should dominate the other. */
1545
1546 gimple_stmt_iterator
1547 later_of_the_two (gimple_stmt_iterator gsi1, gimple_stmt_iterator gsi2)
1548 {
1549 basic_block bb1 = gsi_bb (gsi1);
1550 basic_block bb2 = gsi_bb (gsi2);
1551
1552 /* Find the iterator which is the latest. */
1553 if (bb1 == bb2)
1554 {
1555 /* For empty basic blocks gsis point to the end of the sequence. Since
1556 there is no operator== defined for gimple_stmt_iterator and for gsis
1557 not pointing to a valid statement gsi_next would assert. */
1558 gimple_stmt_iterator gsi = gsi1;
1559 do {
1560 if (gsi_stmt (gsi) == gsi_stmt (gsi2))
1561 return gsi2;
1562 gsi_next (&gsi);
1563 } while (!gsi_end_p (gsi));
1564
1565 return gsi1;
1566 }
1567
1568 /* Find the basic block closest to the basic block which defines stmt. */
1569 if (dominated_by_p (CDI_DOMINATORS, bb1, bb2))
1570 return gsi1;
1571
1572 gcc_assert (dominated_by_p (CDI_DOMINATORS, bb2, bb1));
1573 return gsi2;
1574 }
1575
1576 /* Insert each statement from SEQ at its earliest insertion p. */
1577
1578 void
1579 translate_isl_ast_to_gimple::gsi_insert_earliest (gimple_seq seq)
1580 {
1581 update_modified_stmts (seq);
1582 sese_l &codegen_region = region->if_region->true_region->region;
1583 basic_block begin_bb = get_entry_bb (codegen_region);
1584
1585 /* Inserting the gimple statements in a vector because gimple_seq behave
1586 in strage ways when inserting the stmts from it into different basic
1587 blocks one at a time. */
1588 auto_vec<gimple *, 3> stmts;
1589 for (gimple_stmt_iterator gsi = gsi_start (seq); !gsi_end_p (gsi);
1590 gsi_next (&gsi))
1591 stmts.safe_push (gsi_stmt (gsi));
1592
1593 int i;
1594 gimple *use_stmt;
1595 FOR_EACH_VEC_ELT (stmts, i, use_stmt)
1596 {
1597 gcc_assert (gimple_code (use_stmt) != GIMPLE_PHI);
1598 gimple_stmt_iterator gsi_def_stmt = gsi_start_bb_nondebug (begin_bb);
1599
1600 use_operand_p use_p;
1601 ssa_op_iter op_iter;
1602 FOR_EACH_SSA_USE_OPERAND (use_p, use_stmt, op_iter, SSA_OP_USE)
1603 {
1604 /* Iterator to the current def of use_p. For function parameters or
1605 anything where def is not found, insert at the beginning of the
1606 generated region. */
1607 gimple_stmt_iterator gsi_stmt = gsi_def_stmt;
1608
1609 tree op = USE_FROM_PTR (use_p);
1610 gimple *stmt = SSA_NAME_DEF_STMT (op);
1611 if (stmt && (gimple_code (stmt) != GIMPLE_NOP))
1612 gsi_stmt = gsi_for_stmt (stmt);
1613
1614 /* For region parameters, insert at the beginning of the generated
1615 region. */
1616 if (!bb_in_sese_p (gsi_bb (gsi_stmt), codegen_region))
1617 gsi_stmt = gsi_def_stmt;
1618
1619 gsi_def_stmt = later_of_the_two (gsi_stmt, gsi_def_stmt);
1620 }
1621
1622 if (!gsi_stmt (gsi_def_stmt))
1623 {
1624 gimple_stmt_iterator gsi = gsi_after_labels (gsi_bb (gsi_def_stmt));
1625 gsi_insert_before (&gsi, use_stmt, GSI_NEW_STMT);
1626 }
1627 else if (gimple_code (gsi_stmt (gsi_def_stmt)) == GIMPLE_PHI)
1628 {
1629 gimple_stmt_iterator bsi
1630 = gsi_start_bb_nondebug (gsi_bb (gsi_def_stmt));
1631 /* Insert right after the PHI statements. */
1632 gsi_insert_before (&bsi, use_stmt, GSI_NEW_STMT);
1633 }
1634 else
1635 gsi_insert_after (&gsi_def_stmt, use_stmt, GSI_NEW_STMT);
1636
1637 if (dump_file)
1638 {
1639 fprintf (dump_file, "[codegen] inserting statement: ");
1640 print_gimple_stmt (dump_file, use_stmt, 0, TDF_VOPS | TDF_MEMSYMS);
1641 print_loops_bb (dump_file, gimple_bb (use_stmt), 0, 3);
1642 }
1643 }
1644 }
1645
1646 /* Collect all the operands of NEW_EXPR by recursively visiting each
1647 operand. */
1648
1649 void
1650 translate_isl_ast_to_gimple::collect_all_ssa_names (tree new_expr,
1651 vec<tree> *vec_ssa)
1652 {
1653
1654 /* Rename all uses in new_expr. */
1655 if (TREE_CODE (new_expr) == SSA_NAME)
1656 {
1657 vec_ssa->safe_push (new_expr);
1658 return;
1659 }
1660
1661 /* Iterate over SSA_NAMES in NEW_EXPR. */
1662 for (int i = 0; i < (TREE_CODE_LENGTH (TREE_CODE (new_expr))); i++)
1663 {
1664 tree op = TREE_OPERAND (new_expr, i);
1665 collect_all_ssa_names (op, vec_ssa);
1666 }
1667 }
1668
1669 /* This is abridged version of the function copied from:
1670 tree.c:substitute_in_expr (tree exp, tree f, tree r). */
1671
1672 static tree
1673 substitute_ssa_name (tree exp, tree f, tree r)
1674 {
1675 enum tree_code code = TREE_CODE (exp);
1676 tree op0, op1, op2, op3;
1677 tree new_tree;
1678
1679 /* We handle TREE_LIST and COMPONENT_REF separately. */
1680 if (code == TREE_LIST)
1681 {
1682 op0 = substitute_ssa_name (TREE_CHAIN (exp), f, r);
1683 op1 = substitute_ssa_name (TREE_VALUE (exp), f, r);
1684 if (op0 == TREE_CHAIN (exp) && op1 == TREE_VALUE (exp))
1685 return exp;
1686
1687 return tree_cons (TREE_PURPOSE (exp), op1, op0);
1688 }
1689 else if (code == COMPONENT_REF)
1690 {
1691 tree inner;
1692
1693 /* If this expression is getting a value from a PLACEHOLDER_EXPR
1694 and it is the right field, replace it with R. */
1695 for (inner = TREE_OPERAND (exp, 0);
1696 REFERENCE_CLASS_P (inner);
1697 inner = TREE_OPERAND (inner, 0))
1698 ;
1699
1700 /* The field. */
1701 op1 = TREE_OPERAND (exp, 1);
1702
1703 if (TREE_CODE (inner) == PLACEHOLDER_EXPR && op1 == f)
1704 return r;
1705
1706 /* If this expression hasn't been completed let, leave it alone. */
1707 if (TREE_CODE (inner) == PLACEHOLDER_EXPR && !TREE_TYPE (inner))
1708 return exp;
1709
1710 op0 = substitute_ssa_name (TREE_OPERAND (exp, 0), f, r);
1711 if (op0 == TREE_OPERAND (exp, 0))
1712 return exp;
1713
1714 new_tree
1715 = fold_build3 (COMPONENT_REF, TREE_TYPE (exp), op0, op1, NULL_TREE);
1716 }
1717 else
1718 switch (TREE_CODE_CLASS (code))
1719 {
1720 case tcc_constant:
1721 return exp;
1722
1723 case tcc_declaration:
1724 if (exp == f)
1725 return r;
1726 else
1727 return exp;
1728
1729 case tcc_expression:
1730 if (exp == f)
1731 return r;
1732
1733 /* Fall through... */
1734
1735 case tcc_exceptional:
1736 case tcc_unary:
1737 case tcc_binary:
1738 case tcc_comparison:
1739 case tcc_reference:
1740 switch (TREE_CODE_LENGTH (code))
1741 {
1742 case 0:
1743 if (exp == f)
1744 return r;
1745 return exp;
1746
1747 case 1:
1748 op0 = substitute_ssa_name (TREE_OPERAND (exp, 0), f, r);
1749 if (op0 == TREE_OPERAND (exp, 0))
1750 return exp;
1751
1752 new_tree = fold_build1 (code, TREE_TYPE (exp), op0);
1753 break;
1754
1755 case 2:
1756 op0 = substitute_ssa_name (TREE_OPERAND (exp, 0), f, r);
1757 op1 = substitute_ssa_name (TREE_OPERAND (exp, 1), f, r);
1758
1759 if (op0 == TREE_OPERAND (exp, 0) && op1 == TREE_OPERAND (exp, 1))
1760 return exp;
1761
1762 new_tree = fold_build2 (code, TREE_TYPE (exp), op0, op1);
1763 break;
1764
1765 case 3:
1766 op0 = substitute_ssa_name (TREE_OPERAND (exp, 0), f, r);
1767 op1 = substitute_ssa_name (TREE_OPERAND (exp, 1), f, r);
1768 op2 = substitute_ssa_name (TREE_OPERAND (exp, 2), f, r);
1769
1770 if (op0 == TREE_OPERAND (exp, 0) && op1 == TREE_OPERAND (exp, 1)
1771 && op2 == TREE_OPERAND (exp, 2))
1772 return exp;
1773
1774 new_tree = fold_build3 (code, TREE_TYPE (exp), op0, op1, op2);
1775 break;
1776
1777 case 4:
1778 op0 = substitute_ssa_name (TREE_OPERAND (exp, 0), f, r);
1779 op1 = substitute_ssa_name (TREE_OPERAND (exp, 1), f, r);
1780 op2 = substitute_ssa_name (TREE_OPERAND (exp, 2), f, r);
1781 op3 = substitute_ssa_name (TREE_OPERAND (exp, 3), f, r);
1782
1783 if (op0 == TREE_OPERAND (exp, 0) && op1 == TREE_OPERAND (exp, 1)
1784 && op2 == TREE_OPERAND (exp, 2)
1785 && op3 == TREE_OPERAND (exp, 3))
1786 return exp;
1787
1788 new_tree
1789 = fold (build4 (code, TREE_TYPE (exp), op0, op1, op2, op3));
1790 break;
1791
1792 default:
1793 gcc_unreachable ();
1794 }
1795 break;
1796
1797 case tcc_vl_exp:
1798 default:
1799 gcc_unreachable ();
1800 }
1801
1802 TREE_READONLY (new_tree) |= TREE_READONLY (exp);
1803
1804 if (code == INDIRECT_REF || code == ARRAY_REF || code == ARRAY_RANGE_REF)
1805 TREE_THIS_NOTRAP (new_tree) |= TREE_THIS_NOTRAP (exp);
1806
1807 return new_tree;
1808 }
1809
1810 /* Rename all the operands of NEW_EXPR by recursively visiting each operand. */
1811
1812 tree
1813 translate_isl_ast_to_gimple::rename_all_uses (tree new_expr, basic_block new_bb,
1814 basic_block old_bb)
1815 {
1816 auto_vec<tree, 2> ssa_names;
1817 collect_all_ssa_names (new_expr, &ssa_names);
1818 tree t;
1819 int i;
1820 FOR_EACH_VEC_ELT (ssa_names, i, t)
1821 if (tree r = get_rename (new_bb, t, old_bb, unknown_phi))
1822 new_expr = substitute_ssa_name (new_expr, t, r);
1823
1824 return new_expr;
1825 }
1826
1827 /* For ops which are scev_analyzeable, we can regenerate a new name from its
1828 scalar evolution around LOOP. */
1829
1830 tree
1831 translate_isl_ast_to_gimple::
1832 get_rename_from_scev (tree old_name, gimple_seq *stmts, loop_p loop,
1833 basic_block new_bb, basic_block old_bb,
1834 vec<tree> iv_map)
1835 {
1836 tree scev = scalar_evolution_in_region (region->region, loop, old_name);
1837
1838 /* At this point we should know the exact scev for each
1839 scalar SSA_NAME used in the scop. */
1840 gcc_assert (!chrec_contains_undetermined (scev));
1841
1842 tree new_expr = chrec_apply_map (scev, iv_map);
1843
1844 /* The apply should produce an expression tree containing
1845 the uses of the new induction variables. We should be
1846 able to use new_expr instead of the old_name in the newly
1847 generated loop nest. */
1848 gcc_assert (!chrec_contains_undetermined (new_expr)
1849 && !tree_contains_chrecs (new_expr, NULL));
1850
1851 if (TREE_CODE (new_expr) == SSA_NAME)
1852 {
1853 basic_block bb = gimple_bb (SSA_NAME_DEF_STMT (new_expr));
1854 if (bb && !dominated_by_p (CDI_DOMINATORS, new_bb, bb))
1855 {
1856 codegen_error = true;
1857 return build_zero_cst (TREE_TYPE (old_name));
1858 }
1859 }
1860
1861 new_expr = rename_all_uses (new_expr, new_bb, old_bb);
1862
1863 /* We check all the operands and all of them should dominate the use at
1864 new_expr. */
1865 auto_vec <tree, 2> new_ssa_names;
1866 collect_all_ssa_names (new_expr, &new_ssa_names);
1867 int i;
1868 tree new_ssa_name;
1869 FOR_EACH_VEC_ELT (new_ssa_names, i, new_ssa_name)
1870 {
1871 if (TREE_CODE (new_ssa_name) == SSA_NAME)
1872 {
1873 basic_block bb = gimple_bb (SSA_NAME_DEF_STMT (new_ssa_name));
1874 if (bb && !dominated_by_p (CDI_DOMINATORS, new_bb, bb))
1875 {
1876 codegen_error = true;
1877 return build_zero_cst (TREE_TYPE (old_name));
1878 }
1879 }
1880 }
1881
1882 /* Replace the old_name with the new_expr. */
1883 return force_gimple_operand (unshare_expr (new_expr), stmts,
1884 true, NULL_TREE);
1885 }
1886
1887 /* Renames the scalar uses of the statement COPY, using the
1888 substitution map RENAME_MAP, inserting the gimplification code at
1889 GSI_TGT, for the translation REGION, with the original copied
1890 statement in LOOP, and using the induction variable renaming map
1891 IV_MAP. Returns true when something has been renamed. */
1892
1893 bool
1894 translate_isl_ast_to_gimple::rename_uses (gimple *copy,
1895 gimple_stmt_iterator *gsi_tgt,
1896 basic_block old_bb,
1897 loop_p loop, vec<tree> iv_map)
1898 {
1899 bool changed = false;
1900
1901 if (is_gimple_debug (copy))
1902 {
1903 if (gimple_debug_bind_p (copy))
1904 gimple_debug_bind_reset_value (copy);
1905 else if (gimple_debug_source_bind_p (copy))
1906 return false;
1907 else
1908 gcc_unreachable ();
1909
1910 return false;
1911 }
1912
1913 if (dump_file)
1914 {
1915 fprintf (dump_file, "[codegen] renaming uses of stmt: ");
1916 print_gimple_stmt (dump_file, copy, 0, 0);
1917 }
1918
1919 use_operand_p use_p;
1920 ssa_op_iter op_iter;
1921 FOR_EACH_SSA_USE_OPERAND (use_p, copy, op_iter, SSA_OP_USE)
1922 {
1923 tree old_name = USE_FROM_PTR (use_p);
1924
1925 if (dump_file)
1926 {
1927 fprintf (dump_file, "[codegen] renaming old_name = ");
1928 print_generic_expr (dump_file, old_name, 0);
1929 fprintf (dump_file, "\n");
1930 }
1931
1932 if (TREE_CODE (old_name) != SSA_NAME
1933 || SSA_NAME_IS_DEFAULT_DEF (old_name))
1934 continue;
1935
1936 changed = true;
1937 tree new_expr = get_rename (gsi_tgt->bb, old_name,
1938 old_bb, unknown_phi);
1939
1940 if (new_expr)
1941 {
1942 tree type_old_name = TREE_TYPE (old_name);
1943 tree type_new_expr = TREE_TYPE (new_expr);
1944
1945 if (dump_file)
1946 {
1947 fprintf (dump_file, "[codegen] from rename_map: new_name = ");
1948 print_generic_expr (dump_file, new_expr, 0);
1949 fprintf (dump_file, "\n");
1950 }
1951
1952 if (type_old_name != type_new_expr
1953 || TREE_CODE (new_expr) != SSA_NAME)
1954 {
1955 tree var = create_tmp_var (type_old_name, "var");
1956
1957 if (!useless_type_conversion_p (type_old_name, type_new_expr))
1958 new_expr = fold_convert (type_old_name, new_expr);
1959
1960 gimple_seq stmts;
1961 new_expr = force_gimple_operand (new_expr, &stmts, true, var);
1962 gsi_insert_earliest (stmts);
1963 }
1964
1965 replace_exp (use_p, new_expr);
1966 continue;
1967 }
1968
1969 gimple_seq stmts;
1970 new_expr = get_rename_from_scev (old_name, &stmts, loop, gimple_bb (copy),
1971 old_bb, iv_map);
1972 if (!new_expr || codegen_error_p ())
1973 return false;
1974
1975 if (dump_file)
1976 {
1977 fprintf (dump_file, "[codegen] not in rename map, scev: ");
1978 print_generic_expr (dump_file, new_expr, 0);
1979 fprintf (dump_file, "\n");
1980 }
1981
1982 gsi_insert_earliest (stmts);
1983 replace_exp (use_p, new_expr);
1984
1985 if (TREE_CODE (new_expr) == INTEGER_CST
1986 && is_gimple_assign (copy))
1987 {
1988 tree rhs = gimple_assign_rhs1 (copy);
1989
1990 if (TREE_CODE (rhs) == ADDR_EXPR)
1991 recompute_tree_invariant_for_addr_expr (rhs);
1992 }
1993
1994 set_rename (old_name, new_expr);
1995 }
1996
1997 return changed;
1998 }
1999
2000 /* Returns a basic block that could correspond to where a constant was defined
2001 in the original code. In the original code OLD_BB had the definition, we
2002 need to find which basic block out of the copies of old_bb, in the new
2003 region, should a definition correspond to if it has to reach BB. */
2004
2005 basic_block
2006 translate_isl_ast_to_gimple::get_def_bb_for_const (basic_block bb,
2007 basic_block old_bb) const
2008 {
2009 vec <basic_block> *bbs = region->copied_bb_map->get (old_bb);
2010
2011 if (!bbs || bbs->is_empty ())
2012 return NULL;
2013
2014 if (1 == bbs->length ())
2015 return (*bbs)[0];
2016
2017 int i;
2018 basic_block b1 = NULL, b2;
2019 FOR_EACH_VEC_ELT (*bbs, i, b2)
2020 {
2021 if (b2 == bb)
2022 return bb;
2023
2024 /* BB and B2 are in two unrelated if-clauses. */
2025 if (!dominated_by_p (CDI_DOMINATORS, bb, b2))
2026 continue;
2027
2028 /* Compute the nearest dominator. */
2029 if (!b1 || dominated_by_p (CDI_DOMINATORS, b2, b1))
2030 b1 = b2;
2031 }
2032
2033 gcc_assert (b1);
2034 return b1;
2035 }
2036
2037 /* Get the new name of OP (from OLD_BB) to be used in NEW_BB. PHI_KIND
2038 determines the kind of phi node. */
2039
2040 tree
2041 translate_isl_ast_to_gimple::
2042 get_new_name (basic_block new_bb, tree op,
2043 basic_block old_bb, phi_node_kind phi_kind) const
2044 {
2045 /* For constants the names are the same. */
2046 if (is_constant (op))
2047 return op;
2048
2049 return get_rename (new_bb, op, old_bb, phi_kind);
2050 }
2051
2052 /* Return a debug location for OP. */
2053
2054 static location_t
2055 get_loc (tree op)
2056 {
2057 location_t loc = UNKNOWN_LOCATION;
2058
2059 if (TREE_CODE (op) == SSA_NAME)
2060 loc = gimple_location (SSA_NAME_DEF_STMT (op));
2061 return loc;
2062 }
2063
2064 /* Returns the incoming edges of basic_block BB in the pair. The first edge is
2065 the init edge (from outside the loop) and the second one is the back edge
2066 from the same loop. */
2067
2068 std::pair<edge, edge>
2069 get_edges (basic_block bb)
2070 {
2071 std::pair<edge, edge> edges;
2072 edge e;
2073 edge_iterator ei;
2074 FOR_EACH_EDGE (e, ei, bb->preds)
2075 if (bb->loop_father != e->src->loop_father)
2076 edges.first = e;
2077 else
2078 edges.second = e;
2079 return edges;
2080 }
2081
2082 /* Copy the PHI arguments from OLD_PHI to the NEW_PHI. The arguments to NEW_PHI
2083 must be found unless they can be POSTPONEd for later. */
2084
2085 bool
2086 translate_isl_ast_to_gimple::
2087 copy_loop_phi_args (gphi *old_phi, init_back_edge_pair_t &ibp_old_bb,
2088 gphi *new_phi, init_back_edge_pair_t &ibp_new_bb,
2089 bool postpone)
2090 {
2091 gcc_assert (gimple_phi_num_args (old_phi) == gimple_phi_num_args (new_phi));
2092
2093 basic_block new_bb = gimple_bb (new_phi);
2094 for (unsigned i = 0; i < gimple_phi_num_args (old_phi); i++)
2095 {
2096 edge e;
2097 if (gimple_phi_arg_edge (old_phi, i) == ibp_old_bb.first)
2098 e = ibp_new_bb.first;
2099 else
2100 e = ibp_new_bb.second;
2101
2102 tree old_name = gimple_phi_arg_def (old_phi, i);
2103 tree new_name = get_new_name (new_bb, old_name,
2104 gimple_bb (old_phi), loop_phi);
2105 if (new_name)
2106 {
2107 add_phi_arg (new_phi, new_name, e, get_loc (old_name));
2108 continue;
2109 }
2110
2111 gimple *old_def_stmt = SSA_NAME_DEF_STMT (old_name);
2112 if (!old_def_stmt || gimple_code (old_def_stmt) == GIMPLE_NOP)
2113 /* If the phi arg was a function arg, or wasn't defined, just use the
2114 old name. */
2115 add_phi_arg (new_phi, old_name, e, get_loc (old_name));
2116 else if (postpone)
2117 {
2118 /* Postpone code gen for later for those back-edges we don't have the
2119 names yet. */
2120 region->incomplete_phis.safe_push (std::make_pair (old_phi, new_phi));
2121 if (dump_file)
2122 fprintf (dump_file, "[codegen] postpone loop phi nodes.\n");
2123 }
2124 else
2125 /* Either we should add the arg to phi or, we should postpone. */
2126 return false;
2127 }
2128 return true;
2129 }
2130
2131 /* Copy loop phi nodes from BB to NEW_BB. */
2132
2133 bool
2134 translate_isl_ast_to_gimple::copy_loop_phi_nodes (basic_block bb,
2135 basic_block new_bb)
2136 {
2137 if (dump_file)
2138 fprintf (dump_file, "[codegen] copying loop phi nodes in bb_%d.\n",
2139 new_bb->index);
2140
2141 /* Loop phi nodes should have only two arguments. */
2142 gcc_assert (2 == EDGE_COUNT (bb->preds));
2143
2144 /* First edge is the init edge and second is the back edge. */
2145 init_back_edge_pair_t ibp_old_bb = get_edges (bb);
2146
2147 /* First edge is the init edge and second is the back edge. */
2148 init_back_edge_pair_t ibp_new_bb = get_edges (new_bb);
2149
2150 for (gphi_iterator psi = gsi_start_phis (bb); !gsi_end_p (psi);
2151 gsi_next (&psi))
2152 {
2153 gphi *phi = psi.phi ();
2154 tree res = gimple_phi_result (phi);
2155 if (virtual_operand_p (res))
2156 continue;
2157 if (is_gimple_reg (res) && scev_analyzable_p (res, region->region))
2158 continue;
2159
2160 gphi *new_phi = create_phi_node (SSA_NAME_VAR (res), new_bb);
2161 tree new_res = create_new_def_for (res, new_phi,
2162 gimple_phi_result_ptr (new_phi));
2163 set_rename (res, new_res);
2164 codegen_error = !copy_loop_phi_args (phi, ibp_old_bb, new_phi,
2165 ibp_new_bb, true);
2166 update_stmt (new_phi);
2167
2168 if (dump_file)
2169 {
2170 fprintf (dump_file, "[codegen] creating loop-phi node: ");
2171 print_gimple_stmt (dump_file, new_phi, 0, 0);
2172 }
2173 }
2174
2175 return true;
2176 }
2177
2178 /* Return the init value of PHI, the value coming from outside the loop. */
2179
2180 static tree
2181 get_loop_init_value (gphi *phi)
2182 {
2183
2184 loop_p loop = gimple_bb (phi)->loop_father;
2185
2186 edge e;
2187 edge_iterator ei;
2188 FOR_EACH_EDGE (e, ei, gimple_bb (phi)->preds)
2189 if (e->src->loop_father != loop)
2190 return gimple_phi_arg_def (phi, e->dest_idx);
2191
2192 return NULL_TREE;
2193 }
2194
2195 /* Find the init value (the value which comes from outside the loop), of one of
2196 the operands of DEF which is defined by a loop phi. */
2197
2198 static tree
2199 find_init_value (gimple *def)
2200 {
2201 if (gimple_code (def) == GIMPLE_PHI)
2202 return get_loop_init_value (as_a <gphi*> (def));
2203
2204 if (gimple_vuse (def))
2205 return NULL_TREE;
2206
2207 ssa_op_iter iter;
2208 use_operand_p use_p;
2209 FOR_EACH_SSA_USE_OPERAND (use_p, def, iter, SSA_OP_USE)
2210 {
2211 tree use = USE_FROM_PTR (use_p);
2212 if (TREE_CODE (use) == SSA_NAME)
2213 {
2214 if (tree res = find_init_value (SSA_NAME_DEF_STMT (use)))
2215 return res;
2216 }
2217 }
2218
2219 return NULL_TREE;
2220 }
2221
2222 /* Return the init value, the value coming from outside the loop. */
2223
2224 static tree
2225 find_init_value_close_phi (gphi *phi)
2226 {
2227 gcc_assert (gimple_phi_num_args (phi) == 1);
2228 tree use_arg = gimple_phi_arg_def (phi, 0);
2229 gimple *def = SSA_NAME_DEF_STMT (use_arg);
2230 return find_init_value (def);
2231 }
2232
2233
2234 tree translate_isl_ast_to_gimple::
2235 add_close_phis_to_outer_loops (tree last_merge_name, edge last_e,
2236 gimple *old_close_phi)
2237 {
2238 sese_l &codegen_region = region->if_region->true_region->region;
2239 gimple *stmt = SSA_NAME_DEF_STMT (last_merge_name);
2240 basic_block bb = gimple_bb (stmt);
2241 if (!bb_in_sese_p (bb, codegen_region))
2242 return last_merge_name;
2243
2244 loop_p loop = bb->loop_father;
2245 if (!loop_in_sese_p (loop, codegen_region))
2246 return last_merge_name;
2247
2248 edge e = single_exit (loop);
2249
2250 if (dominated_by_p (CDI_DOMINATORS, e->dest, last_e->src))
2251 return last_merge_name;
2252
2253 tree old_name = gimple_phi_arg_def (old_close_phi, 0);
2254 tree old_close_phi_name = gimple_phi_result (old_close_phi);
2255
2256 bb = e->dest;
2257 if (!bb_contains_loop_close_phi_nodes (bb) || !single_succ_p (bb))
2258 bb = split_edge (e);
2259
2260 gphi *close_phi = create_phi_node (SSA_NAME_VAR (last_merge_name), bb);
2261 tree res = create_new_def_for (last_merge_name, close_phi,
2262 gimple_phi_result_ptr (close_phi));
2263 set_rename (old_close_phi_name, res);
2264 add_phi_arg (close_phi, last_merge_name, e, get_loc (old_name));
2265 last_merge_name = res;
2266
2267 return add_close_phis_to_outer_loops (last_merge_name, last_e, old_close_phi);
2268 }
2269
2270 /* Add phi nodes to all merge points of all the diamonds enclosing the loop of
2271 the close phi node PHI. */
2272
2273 bool translate_isl_ast_to_gimple::
2274 add_close_phis_to_merge_points (gphi *old_close_phi, gphi *new_close_phi,
2275 tree default_value)
2276 {
2277 sese_l &codegen_region = region->if_region->true_region->region;
2278 basic_block default_value_bb = get_entry_bb (codegen_region);
2279 if (SSA_NAME == TREE_CODE (default_value))
2280 {
2281 gimple *stmt = SSA_NAME_DEF_STMT (default_value);
2282 if (!stmt || gimple_code (stmt) == GIMPLE_NOP)
2283 return false;
2284 default_value_bb = gimple_bb (stmt);
2285 }
2286
2287 basic_block new_close_phi_bb = gimple_bb (new_close_phi);
2288
2289 tree old_close_phi_name = gimple_phi_result (old_close_phi);
2290 tree new_close_phi_name = gimple_phi_result (new_close_phi);
2291 tree last_merge_name = new_close_phi_name;
2292 tree old_name = gimple_phi_arg_def (old_close_phi, 0);
2293
2294 int i;
2295 edge merge_e;
2296 FOR_EACH_VEC_ELT_REVERSE (merge_points, i, merge_e)
2297 {
2298 basic_block new_merge_bb = merge_e->src;
2299 if (!dominated_by_p (CDI_DOMINATORS, new_merge_bb, default_value_bb))
2300 continue;
2301
2302 last_merge_name = add_close_phis_to_outer_loops (last_merge_name, merge_e,
2303 old_close_phi);
2304
2305 gphi *merge_phi = create_phi_node (SSA_NAME_VAR (old_close_phi_name), new_merge_bb);
2306 tree merge_res = create_new_def_for (old_close_phi_name, merge_phi,
2307 gimple_phi_result_ptr (merge_phi));
2308 set_rename (old_close_phi_name, merge_res);
2309
2310 edge from_loop = NULL, from_default_value = NULL;
2311 edge e;
2312 edge_iterator ei;
2313 FOR_EACH_EDGE (e, ei, new_merge_bb->preds)
2314 if (dominated_by_p (CDI_DOMINATORS, e->src, new_close_phi_bb))
2315 from_loop = e;
2316 else
2317 from_default_value = e;
2318
2319 /* Because CDI_POST_DOMINATORS are not updated, we only rely on
2320 CDI_DOMINATORS, which may not handle all cases where new_close_phi_bb
2321 is contained in another condition. */
2322 if (!from_default_value || !from_loop)
2323 return false;
2324
2325 add_phi_arg (merge_phi, last_merge_name, from_loop, get_loc (old_name));
2326 add_phi_arg (merge_phi, default_value, from_default_value, get_loc (old_name));
2327
2328 if (dump_file)
2329 {
2330 fprintf (dump_file, "[codegen] Adding guard-phi: ");
2331 print_gimple_stmt (dump_file, merge_phi, 0, 0);
2332 }
2333
2334 update_stmt (merge_phi);
2335 last_merge_name = merge_res;
2336 }
2337
2338 return true;
2339 }
2340
2341 /* Copy all the loop-close phi args from BB to NEW_BB. */
2342
2343 bool
2344 translate_isl_ast_to_gimple::copy_loop_close_phi_args (basic_block old_bb,
2345 basic_block new_bb,
2346 bool postpone)
2347 {
2348 for (gphi_iterator psi = gsi_start_phis (old_bb); !gsi_end_p (psi);
2349 gsi_next (&psi))
2350 {
2351 gphi *old_close_phi = psi.phi ();
2352 tree res = gimple_phi_result (old_close_phi);
2353 if (virtual_operand_p (res))
2354 continue;
2355
2356 if (is_gimple_reg (res) && scev_analyzable_p (res, region->region))
2357 /* Loop close phi nodes should not be scev_analyzable_p. */
2358 gcc_unreachable ();
2359
2360 gphi *new_close_phi = create_phi_node (SSA_NAME_VAR (res), new_bb);
2361 tree new_res = create_new_def_for (res, new_close_phi,
2362 gimple_phi_result_ptr (new_close_phi));
2363 set_rename (res, new_res);
2364
2365 tree old_name = gimple_phi_arg_def (old_close_phi, 0);
2366 tree new_name = get_new_name (new_bb, old_name, old_bb, close_phi);
2367
2368 /* Predecessor basic blocks of a loop close phi should have been code
2369 generated before. FIXME: This is fixable by merging PHIs from inner
2370 loops as well. See: gfortran.dg/graphite/interchange-3.f90. */
2371 if (!new_name)
2372 return false;
2373
2374 add_phi_arg (new_close_phi, new_name, single_pred_edge (new_bb),
2375 get_loc (old_name));
2376 if (dump_file)
2377 {
2378 fprintf (dump_file, "[codegen] Adding loop close phi: ");
2379 print_gimple_stmt (dump_file, new_close_phi, 0, 0);
2380 }
2381
2382 update_stmt (new_close_phi);
2383
2384 /* When there is no loop guard around this codegenerated loop, there is no
2385 need to collect the close-phi arg. */
2386 if (merge_points.is_empty ())
2387 continue;
2388
2389 /* Add a PHI in the succ_new_bb for each close phi of the loop. */
2390 tree default_value = find_init_value_close_phi (new_close_phi);
2391
2392 /* A close phi must come from a loop-phi having a default value. */
2393 if (!default_value)
2394 {
2395 if (!postpone)
2396 return false;
2397
2398 region->incomplete_phis.safe_push (std::make_pair (old_close_phi,
2399 new_close_phi));
2400 if (dump_file)
2401 {
2402 fprintf (dump_file, "[codegen] postpone close phi nodes: ");
2403 print_gimple_stmt (dump_file, new_close_phi, 0, 0);
2404 }
2405 continue;
2406 }
2407
2408 if (!add_close_phis_to_merge_points (old_close_phi, new_close_phi,
2409 default_value))
2410 return false;
2411 }
2412
2413 return true;
2414 }
2415
2416 /* Copy loop close phi nodes from BB to NEW_BB. */
2417
2418 bool
2419 translate_isl_ast_to_gimple::copy_loop_close_phi_nodes (basic_block old_bb,
2420 basic_block new_bb)
2421 {
2422 if (dump_file)
2423 fprintf (dump_file, "[codegen] copying loop close phi nodes in bb_%d.\n",
2424 new_bb->index);
2425 /* Loop close phi nodes should have only one argument. */
2426 gcc_assert (1 == EDGE_COUNT (old_bb->preds));
2427
2428 return copy_loop_close_phi_args (old_bb, new_bb, true);
2429 }
2430
2431
2432 /* Add NEW_NAME as the ARGNUM-th arg of NEW_PHI which is in NEW_BB.
2433 DOMINATING_PRED is the predecessor basic block of OLD_BB which dominates the
2434 other pred of OLD_BB as well. If no such basic block exists then it is NULL.
2435 NON_DOMINATING_PRED is a pred which does not dominate OLD_BB, it cannot be
2436 NULL.
2437
2438 Case1: OLD_BB->preds {BB1, BB2} and BB1 does not dominate BB2 and vice versa.
2439 In this case DOMINATING_PRED = NULL.
2440
2441 Case2: OLD_BB->preds {BB1, BB2} and BB1 dominates BB2.
2442
2443 Returns true on successful copy of the args, false otherwise. */
2444
2445 bool
2446 translate_isl_ast_to_gimple::
2447 add_phi_arg_for_new_expr (tree old_phi_args[2], tree new_phi_args[2],
2448 edge old_bb_dominating_edge,
2449 edge old_bb_non_dominating_edge,
2450 gphi *phi, gphi *new_phi,
2451 basic_block new_bb)
2452 {
2453 basic_block def_pred[2] = { NULL, NULL };
2454 int not_found_bb_index = -1;
2455 for (int i = 0; i < 2; i++)
2456 {
2457 /* If the corresponding def_bb could not be found the entry will be
2458 NULL. */
2459 if (TREE_CODE (old_phi_args[i]) == INTEGER_CST)
2460 def_pred[i] = get_def_bb_for_const (new_bb,
2461 gimple_phi_arg_edge (phi, i)->src);
2462 else if (new_phi_args[i] && (TREE_CODE (new_phi_args[i]) == SSA_NAME))
2463 def_pred[i] = gimple_bb (SSA_NAME_DEF_STMT (new_phi_args[i]));
2464
2465 if (!def_pred[i])
2466 {
2467 /* When non are available bail out. */
2468 if (not_found_bb_index != -1)
2469 return false;
2470 not_found_bb_index = i;
2471 }
2472 }
2473
2474 /* Here we are pattern matching on the structure of CFG w.r.t. old one. */
2475 if (old_bb_dominating_edge)
2476 {
2477 if (not_found_bb_index != -1)
2478 return false;
2479
2480 basic_block new_pred1 = (*new_bb->preds)[0]->src;
2481 basic_block new_pred2 = (*new_bb->preds)[1]->src;
2482 vec <basic_block> *bbs
2483 = region->copied_bb_map->get (old_bb_non_dominating_edge->src);
2484
2485 /* Could not find a mapping. */
2486 if (!bbs)
2487 return false;
2488
2489 basic_block new_pred = NULL;
2490 basic_block b;
2491 int i;
2492 FOR_EACH_VEC_ELT (*bbs, i, b)
2493 {
2494 if (dominated_by_p (CDI_DOMINATORS, new_pred1, b))
2495 {
2496 /* FIXME: If we have already found new_pred then we have to
2497 disambiguate, bail out for now. */
2498 if (new_pred)
2499 return false;
2500 new_pred = new_pred1;
2501 }
2502 if (dominated_by_p (CDI_DOMINATORS, new_pred2, b))
2503 {
2504 /* FIXME: If we have already found new_pred then we have to either
2505 it dominates both or we have to disambiguate, bail out. */
2506 if (new_pred)
2507 return false;
2508 new_pred = new_pred2;
2509 }
2510 }
2511
2512 if (!new_pred)
2513 return false;
2514
2515 edge new_non_dominating_edge = find_edge (new_pred, new_bb);
2516 gcc_assert (new_non_dominating_edge);
2517 /* FIXME: Validate each args just like in loop-phis. */
2518 /* By the process of elimination we first insert insert phi-edge for
2519 non-dominating pred which is computed above and then we insert the
2520 remaining one. */
2521 int inserted_edge = 0;
2522 for (; inserted_edge < 2; inserted_edge++)
2523 {
2524 edge new_bb_pred_edge = gimple_phi_arg_edge (new_phi, inserted_edge);
2525 if (new_non_dominating_edge == new_bb_pred_edge)
2526 {
2527 add_phi_arg (new_phi, new_phi_args[inserted_edge],
2528 new_non_dominating_edge,
2529 get_loc (old_phi_args[inserted_edge]));
2530 break;
2531 }
2532 }
2533 if (inserted_edge == 2)
2534 return false;
2535
2536 int edge_dominating = inserted_edge == 0 ? 1 : 0;
2537
2538 edge new_dominating_edge = NULL;
2539 for (inserted_edge = 0; inserted_edge < 2; inserted_edge++)
2540 {
2541 edge e = gimple_phi_arg_edge (new_phi, inserted_edge);
2542 if (e != new_non_dominating_edge)
2543 {
2544 new_dominating_edge = e;
2545 add_phi_arg (new_phi, new_phi_args[edge_dominating],
2546 new_dominating_edge,
2547 get_loc (old_phi_args[inserted_edge]));
2548 break;
2549 }
2550 }
2551 gcc_assert (new_dominating_edge);
2552 }
2553 else
2554 {
2555 /* Classic diamond structure: both edges are non-dominating. We need to
2556 find one unique edge then the other can be found be elimination. If
2557 any definition (def_pred) dominates both the preds of new_bb then we
2558 bail out. Entries of def_pred maybe NULL, in that case we must
2559 uniquely find pred with help of only one entry. */
2560 edge new_e[2] = { NULL, NULL };
2561 for (int i = 0; i < 2; i++)
2562 {
2563 edge e;
2564 edge_iterator ei;
2565 FOR_EACH_EDGE (e, ei, new_bb->preds)
2566 if (def_pred[i]
2567 && dominated_by_p (CDI_DOMINATORS, e->src, def_pred[i]))
2568 {
2569 if (new_e[i])
2570 /* We do not know how to handle the case when def_pred
2571 dominates more than a predecessor. */
2572 return false;
2573 new_e[i] = e;
2574 }
2575 }
2576
2577 gcc_assert (new_e[0] || new_e[1]);
2578
2579 /* Find the other edge by process of elimination. */
2580 if (not_found_bb_index != -1)
2581 {
2582 gcc_assert (!new_e[not_found_bb_index]);
2583 int found_bb_index = not_found_bb_index == 1 ? 0 : 1;
2584 edge e;
2585 edge_iterator ei;
2586 FOR_EACH_EDGE (e, ei, new_bb->preds)
2587 {
2588 if (new_e[found_bb_index] == e)
2589 continue;
2590 new_e[not_found_bb_index] = e;
2591 }
2592 }
2593
2594 /* Add edges to phi args. */
2595 for (int i = 0; i < 2; i++)
2596 add_phi_arg (new_phi, new_phi_args[i], new_e[i],
2597 get_loc (old_phi_args[i]));
2598 }
2599
2600 return true;
2601 }
2602
2603 /* Copy the arguments of cond-phi node PHI, to NEW_PHI in the codegenerated
2604 region. If postpone is true and it isn't possible to copy any arg of PHI,
2605 the PHI is added to the REGION->INCOMPLETE_PHIS to be codegenerated later.
2606 Returns false if the copying was unsuccessful. */
2607
2608 bool
2609 translate_isl_ast_to_gimple::copy_cond_phi_args (gphi *phi, gphi *new_phi,
2610 vec<tree> iv_map,
2611 bool postpone)
2612 {
2613 if (dump_file)
2614 fprintf (dump_file, "[codegen] copying cond phi args.\n");
2615 gcc_assert (2 == gimple_phi_num_args (phi));
2616
2617 basic_block new_bb = gimple_bb (new_phi);
2618 loop_p loop = gimple_bb (phi)->loop_father;
2619
2620 basic_block old_bb = gimple_bb (phi);
2621 edge old_bb_non_dominating_edge = NULL, old_bb_dominating_edge = NULL;
2622
2623 edge e;
2624 edge_iterator ei;
2625 FOR_EACH_EDGE (e, ei, old_bb->preds)
2626 if (!dominated_by_p (CDI_DOMINATORS, old_bb, e->src))
2627 old_bb_non_dominating_edge = e;
2628 else
2629 old_bb_dominating_edge = e;
2630
2631 gcc_assert (!dominated_by_p (CDI_DOMINATORS, old_bb,
2632 old_bb_non_dominating_edge->src));
2633
2634 tree new_phi_args[2];
2635 tree old_phi_args[2];
2636
2637 for (unsigned i = 0; i < gimple_phi_num_args (phi); i++)
2638 {
2639 tree old_name = gimple_phi_arg_def (phi, i);
2640 tree new_name = get_new_name (new_bb, old_name, old_bb, cond_phi);
2641 old_phi_args[i] = old_name;
2642 if (new_name)
2643 {
2644 new_phi_args [i] = new_name;
2645 continue;
2646 }
2647
2648 /* If the phi-arg was a parameter. */
2649 if (vec_find (region->params, old_name) != -1)
2650 {
2651 new_phi_args [i] = old_name;
2652 if (dump_file)
2653 {
2654 fprintf (dump_file,
2655 "[codegen] parameter argument to phi, new_expr: ");
2656 print_generic_expr (dump_file, new_phi_args[i], 0);
2657 fprintf (dump_file, "\n");
2658 }
2659 continue;
2660 }
2661
2662 gimple *old_def_stmt = SSA_NAME_DEF_STMT (old_name);
2663 if (!old_def_stmt || gimple_code (old_def_stmt) == GIMPLE_NOP)
2664 /* FIXME: If the phi arg was a function arg, or wasn't defined, just use
2665 the old name. */
2666 return false;
2667
2668 if (postpone)
2669 {
2670 /* If the phi-arg is scev-analyzeable but only in the first stage. */
2671 if (is_gimple_reg (old_name)
2672 && scev_analyzable_p (old_name, region->region))
2673 {
2674 gimple_seq stmts;
2675 tree new_expr = get_rename_from_scev (old_name, &stmts, loop,
2676 new_bb, old_bb, iv_map);
2677 if (codegen_error_p ())
2678 return false;
2679
2680 gcc_assert (new_expr);
2681 if (dump_file)
2682 {
2683 fprintf (dump_file,
2684 "[codegen] scev analyzeable, new_expr: ");
2685 print_generic_expr (dump_file, new_expr, 0);
2686 fprintf (dump_file, "\n");
2687 }
2688 gsi_insert_earliest (stmts);
2689 new_phi_args [i] = new_name;
2690 continue;
2691 }
2692
2693 /* Postpone code gen for later for back-edges. */
2694 region->incomplete_phis.safe_push (std::make_pair (phi, new_phi));
2695
2696 if (dump_file)
2697 {
2698 fprintf (dump_file, "[codegen] postpone cond phi nodes: ");
2699 print_gimple_stmt (dump_file, new_phi, 0, 0);
2700 }
2701
2702 new_phi_args [i] = NULL_TREE;
2703 continue;
2704 }
2705 else
2706 /* Either we should add the arg to phi or, we should postpone. */
2707 return false;
2708 }
2709
2710 /* If none of the args have been determined in the first stage then wait until
2711 later. */
2712 if (postpone && !new_phi_args[0] && !new_phi_args[1])
2713 return true;
2714
2715 return add_phi_arg_for_new_expr (old_phi_args, new_phi_args,
2716 old_bb_dominating_edge,
2717 old_bb_non_dominating_edge,
2718 phi, new_phi, new_bb);
2719 }
2720
2721 /* Copy cond phi nodes from BB to NEW_BB. A cond-phi node is a basic block
2722 containing phi nodes coming from two predecessors, and none of them are back
2723 edges. */
2724
2725 bool
2726 translate_isl_ast_to_gimple::copy_cond_phi_nodes (basic_block bb,
2727 basic_block new_bb,
2728 vec<tree> iv_map)
2729 {
2730
2731 gcc_assert (!bb_contains_loop_close_phi_nodes (bb));
2732
2733 if (dump_file)
2734 fprintf (dump_file, "[codegen] copying cond phi nodes in bb_%d.\n",
2735 new_bb->index);
2736
2737 /* Cond phi nodes should have exactly two arguments. */
2738 gcc_assert (2 == EDGE_COUNT (bb->preds));
2739
2740 for (gphi_iterator psi = gsi_start_phis (bb); !gsi_end_p (psi);
2741 gsi_next (&psi))
2742 {
2743 gphi *phi = psi.phi ();
2744 tree res = gimple_phi_result (phi);
2745 if (virtual_operand_p (res))
2746 continue;
2747 if (is_gimple_reg (res) && scev_analyzable_p (res, region->region))
2748 /* Cond phi nodes should not be scev_analyzable_p. */
2749 gcc_unreachable ();
2750
2751 gphi *new_phi = create_phi_node (SSA_NAME_VAR (res), new_bb);
2752 tree new_res = create_new_def_for (res, new_phi,
2753 gimple_phi_result_ptr (new_phi));
2754 set_rename (res, new_res);
2755
2756 if (!copy_cond_phi_args (phi, new_phi, iv_map, true))
2757 return false;
2758
2759 update_stmt (new_phi);
2760 }
2761
2762 return true;
2763 }
2764
2765 /* Return true if STMT should be copied from region to the new code-generated
2766 region. LABELs, CONDITIONS, induction-variables and region parameters need
2767 not be copied. */
2768
2769 static bool
2770 should_copy_to_new_region (gimple *stmt, sese_info_p region)
2771 {
2772 /* Do not copy labels or conditions. */
2773 if (gimple_code (stmt) == GIMPLE_LABEL
2774 || gimple_code (stmt) == GIMPLE_COND)
2775 return false;
2776
2777 tree lhs;
2778 /* Do not copy induction variables. */
2779 if (is_gimple_assign (stmt)
2780 && (lhs = gimple_assign_lhs (stmt))
2781 && TREE_CODE (lhs) == SSA_NAME
2782 && is_gimple_reg (lhs)
2783 && scev_analyzable_p (lhs, region->region))
2784 return false;
2785
2786 /* Do not copy parameters that have been generated in the header of the
2787 scop. */
2788 if (is_gimple_assign (stmt)
2789 && (lhs = gimple_assign_lhs (stmt))
2790 && TREE_CODE (lhs) == SSA_NAME
2791 && region->parameter_rename_map->get(lhs))
2792 return false;
2793
2794 return true;
2795 }
2796
2797 /* Create new names for all the definitions created by COPY and add replacement
2798 mappings for each new name. */
2799
2800 void
2801 translate_isl_ast_to_gimple::set_rename_for_each_def (gimple *stmt)
2802 {
2803 def_operand_p def_p;
2804 ssa_op_iter op_iter;
2805 FOR_EACH_SSA_DEF_OPERAND (def_p, stmt, op_iter, SSA_OP_ALL_DEFS)
2806 {
2807 tree old_name = DEF_FROM_PTR (def_p);
2808 tree new_name = create_new_def_for (old_name, stmt, def_p);
2809 set_rename (old_name, new_name);
2810 }
2811 }
2812
2813 /* Duplicates the statements of basic block BB into basic block NEW_BB
2814 and compute the new induction variables according to the IV_MAP. */
2815
2816 bool
2817 translate_isl_ast_to_gimple::graphite_copy_stmts_from_block (basic_block bb,
2818 basic_block new_bb,
2819 vec<tree> iv_map)
2820 {
2821 /* Iterator poining to the place where new statement (s) will be inserted. */
2822 gimple_stmt_iterator gsi_tgt = gsi_last_bb (new_bb);
2823
2824 for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
2825 gsi_next (&gsi))
2826 {
2827 gimple *stmt = gsi_stmt (gsi);
2828 if (!should_copy_to_new_region (stmt, region))
2829 continue;
2830
2831 /* Create a new copy of STMT and duplicate STMT's virtual
2832 operands. */
2833 gimple *copy = gimple_copy (stmt);
2834 gsi_insert_after (&gsi_tgt, copy, GSI_NEW_STMT);
2835
2836 if (dump_file)
2837 {
2838 fprintf (dump_file, "[codegen] inserting statement: ");
2839 print_gimple_stmt (dump_file, copy, 0, 0);
2840 }
2841
2842 maybe_duplicate_eh_stmt (copy, stmt);
2843 gimple_duplicate_stmt_histograms (cfun, copy, cfun, stmt);
2844
2845 /* Crete new names for each def in the copied stmt. */
2846 set_rename_for_each_def (copy);
2847
2848 loop_p loop = bb->loop_father;
2849 if (rename_uses (copy, &gsi_tgt, bb, loop, iv_map))
2850 {
2851 fold_stmt_inplace (&gsi_tgt);
2852 gcc_assert (gsi_stmt (gsi_tgt) == copy);
2853 }
2854
2855 if (codegen_error_p ())
2856 return false;
2857
2858 /* For each SSA_NAME in the parameter_rename_map rename their usage. */
2859 ssa_op_iter iter;
2860 use_operand_p use_p;
2861 if (!is_gimple_debug (copy))
2862 FOR_EACH_SSA_USE_OPERAND (use_p, copy, iter, SSA_OP_USE)
2863 {
2864 tree old_name = USE_FROM_PTR (use_p);
2865
2866 if (TREE_CODE (old_name) != SSA_NAME
2867 || SSA_NAME_IS_DEFAULT_DEF (old_name))
2868 continue;
2869
2870 tree *new_expr = region->parameter_rename_map->get (old_name);
2871 if (!new_expr)
2872 continue;
2873
2874 replace_exp (use_p, *new_expr);
2875 }
2876
2877 update_stmt (copy);
2878 }
2879
2880 return true;
2881 }
2882
2883
2884 /* Given a basic block containing close-phi it returns the new basic block where
2885 to insert a copy of the close-phi nodes. All the uses in close phis should
2886 come from a single loop otherwise it returns NULL. */
2887
2888 edge
2889 translate_isl_ast_to_gimple::edge_for_new_close_phis (basic_block bb)
2890 {
2891 /* Make sure that NEW_BB is the new_loop->exit->dest. We find the definition
2892 of close phi in the original code and then find the mapping of basic block
2893 defining that variable. If there are multiple close-phis and they are
2894 defined in different loops (in the original or in the new code) because of
2895 loop splitting, then we bail out. */
2896 loop_p new_loop = NULL;
2897 for (gphi_iterator psi = gsi_start_phis (bb); !gsi_end_p (psi);
2898 gsi_next (&psi))
2899 {
2900 gphi *phi = psi.phi ();
2901 tree name = gimple_phi_arg_def (phi, 0);
2902 basic_block old_loop_bb = gimple_bb (SSA_NAME_DEF_STMT (name));
2903
2904 vec <basic_block> *bbs = region->copied_bb_map->get (old_loop_bb);
2905 if (!bbs || bbs->length () != 1)
2906 /* This is one of the places which shows preserving original structure
2907 is not always possible, as we may need to insert close PHI for a loop
2908 where the latch does not have any mapping, or the mapping is
2909 ambiguous. */
2910 return NULL;
2911
2912 if (!new_loop)
2913 new_loop = (*bbs)[0]->loop_father;
2914 else if (new_loop != (*bbs)[0]->loop_father)
2915 return NULL;
2916 }
2917
2918 if (!new_loop)
2919 return NULL;
2920
2921 return single_exit (new_loop);
2922 }
2923
2924 /* Copies BB and includes in the copied BB all the statements that can
2925 be reached following the use-def chains from the memory accesses,
2926 and returns the next edge following this new block. */
2927
2928 edge
2929 translate_isl_ast_to_gimple::copy_bb_and_scalar_dependences (basic_block bb,
2930 edge next_e,
2931 vec<tree> iv_map)
2932 {
2933 int num_phis = number_of_phi_nodes (bb);
2934
2935 if (region->copied_bb_map->get (bb))
2936 {
2937 /* FIXME: we should be able to handle phi nodes with args coming from
2938 outside the region. */
2939 if (num_phis)
2940 {
2941 codegen_error = true;
2942 return NULL;
2943 }
2944 }
2945
2946 basic_block new_bb = NULL;
2947 if (bb_contains_loop_close_phi_nodes (bb))
2948 {
2949 if (dump_file)
2950 fprintf (dump_file, "[codegen] bb_%d contains close phi nodes.\n",
2951 bb->index);
2952
2953 edge e = edge_for_new_close_phis (bb);
2954 if (!e)
2955 {
2956 codegen_error = true;
2957 return NULL;
2958 }
2959
2960 basic_block phi_bb = e->dest;
2961
2962 if (!bb_contains_loop_close_phi_nodes (phi_bb) || !single_succ_p (phi_bb))
2963 phi_bb = split_edge (e);
2964
2965 gcc_assert (single_pred_edge (phi_bb)->src->loop_father
2966 != single_pred_edge (phi_bb)->dest->loop_father);
2967
2968 if (!copy_loop_close_phi_nodes (bb, phi_bb))
2969 {
2970 codegen_error = true;
2971 return NULL;
2972 }
2973
2974 if (e == next_e)
2975 new_bb = phi_bb;
2976 else
2977 new_bb = split_edge (next_e);
2978 }
2979 else
2980 {
2981 new_bb = split_edge (next_e);
2982 if (num_phis > 0 && bb_contains_loop_phi_nodes (bb))
2983 {
2984 basic_block phi_bb = next_e->dest->loop_father->header;
2985
2986 /* At this point we are unable to codegenerate by still preserving the SSA
2987 structure because maybe the loop is completely unrolled and the PHIs
2988 and cross-bb scalar dependencies are untrackable w.r.t. the original
2989 code. See gfortran.dg/graphite/pr29832.f90. */
2990 if (EDGE_COUNT (bb->preds) != EDGE_COUNT (phi_bb->preds))
2991 {
2992 codegen_error = true;
2993 return NULL;
2994 }
2995
2996 /* In case isl did some loop peeling, like this:
2997
2998 S_8(0);
2999 for (int c1 = 1; c1 <= 5; c1 += 1) {
3000 S_8(c1);
3001 }
3002 S_8(6);
3003
3004 there should be no loop-phi nodes in S_8(0).
3005
3006 FIXME: We need to reason about dynamic instances of S_8, i.e., the
3007 values of all scalar variables: for the moment we instantiate only
3008 SCEV analyzable expressions on the iteration domain, and we need to
3009 extend that to reductions that cannot be analyzed by SCEV. */
3010 if (!bb_in_sese_p (phi_bb, region->if_region->true_region->region))
3011 {
3012 codegen_error = true;
3013 return NULL;
3014 }
3015
3016 if (dump_file)
3017 fprintf (dump_file, "[codegen] bb_%d contains loop phi nodes.\n",
3018 bb->index);
3019 if (!copy_loop_phi_nodes (bb, phi_bb))
3020 {
3021 codegen_error = true;
3022 return NULL;
3023 }
3024 }
3025 else if (num_phis > 0)
3026 {
3027 if (dump_file)
3028 fprintf (dump_file, "[codegen] bb_%d contains cond phi nodes.\n",
3029 bb->index);
3030
3031 basic_block phi_bb = single_pred (new_bb);
3032 loop_p loop_father = new_bb->loop_father;
3033
3034 /* Move back until we find the block with two predecessors. */
3035 while (single_pred_p (phi_bb))
3036 phi_bb = single_pred_edge (phi_bb)->src;
3037
3038 /* If a corresponding merge-point was not found, then abort codegen. */
3039 if (phi_bb->loop_father != loop_father
3040 || !bb_in_sese_p (phi_bb, region->if_region->true_region->region)
3041 || !copy_cond_phi_nodes (bb, phi_bb, iv_map))
3042 {
3043 codegen_error = true;
3044 return NULL;
3045 }
3046 }
3047 }
3048
3049 if (dump_file)
3050 fprintf (dump_file, "[codegen] copying from bb_%d to bb_%d.\n",
3051 bb->index, new_bb->index);
3052
3053 vec <basic_block> *copied_bbs = region->copied_bb_map->get (bb);
3054 if (copied_bbs)
3055 copied_bbs->safe_push (new_bb);
3056 else
3057 {
3058 vec<basic_block> bbs;
3059 bbs.create (2);
3060 bbs.safe_push (new_bb);
3061 region->copied_bb_map->put (bb, bbs);
3062 }
3063
3064 if (!graphite_copy_stmts_from_block (bb, new_bb, iv_map))
3065 {
3066 codegen_error = true;
3067 return NULL;
3068 }
3069
3070 return single_succ_edge (new_bb);
3071 }
3072
3073 /* Patch the missing arguments of the phi nodes. */
3074
3075 void
3076 translate_isl_ast_to_gimple::translate_pending_phi_nodes ()
3077 {
3078 int i;
3079 phi_rename *rename;
3080 FOR_EACH_VEC_ELT (region->incomplete_phis, i, rename)
3081 {
3082 gphi *old_phi = rename->first;
3083 gphi *new_phi = rename->second;
3084 basic_block old_bb = gimple_bb (old_phi);
3085 basic_block new_bb = gimple_bb (new_phi);
3086
3087 /* First edge is the init edge and second is the back edge. */
3088 init_back_edge_pair_t ibp_old_bb = get_edges (old_bb);
3089 init_back_edge_pair_t ibp_new_bb = get_edges (new_bb);
3090
3091 if (dump_file)
3092 {
3093 fprintf (dump_file, "[codegen] translating pending old-phi: ");
3094 print_gimple_stmt (dump_file, old_phi, 0, 0);
3095 }
3096
3097 auto_vec <tree, 1> iv_map;
3098 if (bb_contains_loop_phi_nodes (new_bb))
3099 codegen_error = !copy_loop_phi_args (old_phi, ibp_old_bb, new_phi,
3100 ibp_new_bb, false);
3101 else if (bb_contains_loop_close_phi_nodes (new_bb))
3102 codegen_error = !copy_loop_close_phi_args (old_bb, new_bb, false);
3103 else
3104 codegen_error = !copy_cond_phi_args (old_phi, new_phi, iv_map, false);
3105
3106 if (dump_file)
3107 {
3108 fprintf (dump_file, "[codegen] to new-phi: ");
3109 print_gimple_stmt (dump_file, new_phi, 0, 0);
3110 }
3111 if (codegen_error_p ())
3112 return;
3113 }
3114 }
3115
3116 /* Add isl's parameter identifiers and corresponding trees to ivs_params. */
3117
3118 void
3119 translate_isl_ast_to_gimple::add_parameters_to_ivs_params (scop_p scop,
3120 ivs_params &ip)
3121 {
3122 sese_info_p region = scop->scop_info;
3123 unsigned nb_parameters = isl_set_dim (scop->param_context, isl_dim_param);
3124 gcc_assert (nb_parameters == region->params.length ());
3125 unsigned i;
3126 for (i = 0; i < nb_parameters; i++)
3127 {
3128 isl_id *tmp_id = isl_set_get_dim_id (scop->param_context,
3129 isl_dim_param, i);
3130 ip[tmp_id] = region->params[i];
3131 }
3132 }
3133
3134
3135 /* Generates a build, which specifies the constraints on the parameters. */
3136
3137 __isl_give isl_ast_build *
3138 translate_isl_ast_to_gimple::generate_isl_context (scop_p scop)
3139 {
3140 isl_set *context_isl = isl_set_params (isl_set_copy (scop->param_context));
3141 return isl_ast_build_from_context (context_isl);
3142 }
3143
3144 /* This method is executed before the construction of a for node. */
3145 __isl_give isl_id *
3146 ast_build_before_for (__isl_keep isl_ast_build *build, void *user)
3147 {
3148 isl_union_map *dependences = (isl_union_map *) user;
3149 ast_build_info *for_info = XNEW (struct ast_build_info);
3150 isl_union_map *schedule = isl_ast_build_get_schedule (build);
3151 isl_space *schedule_space = isl_ast_build_get_schedule_space (build);
3152 int dimension = isl_space_dim (schedule_space, isl_dim_out);
3153 for_info->is_parallelizable =
3154 !carries_deps (schedule, dependences, dimension);
3155 isl_union_map_free (schedule);
3156 isl_space_free (schedule_space);
3157 isl_id *id = isl_id_alloc (isl_ast_build_get_ctx (build), "", for_info);
3158 return id;
3159 }
3160
3161 #ifdef HAVE_ISL_OPTIONS_SET_SCHEDULE_SERIALIZE_SCCS
3162
3163 /* Generate isl AST from schedule of SCOP. */
3164
3165 __isl_give isl_ast_node *
3166 translate_isl_ast_to_gimple::scop_to_isl_ast (scop_p scop)
3167 {
3168 gcc_assert (scop->transformed_schedule);
3169
3170 /* Set the separate option to reduce control flow overhead. */
3171 isl_schedule *schedule = isl_schedule_map_schedule_node_bottom_up
3172 (isl_schedule_copy (scop->transformed_schedule), set_separate_option, NULL);
3173 isl_ast_build *context_isl = generate_isl_context (scop);
3174
3175 if (flag_loop_parallelize_all)
3176 {
3177 scop_get_dependences (scop);
3178 context_isl =
3179 isl_ast_build_set_before_each_for (context_isl, ast_build_before_for,
3180 scop->dependence);
3181 }
3182
3183 isl_ast_node *ast_isl = isl_ast_build_node_from_schedule
3184 (context_isl, schedule);
3185 isl_ast_build_free (context_isl);
3186 return ast_isl;
3187 }
3188
3189 #else
3190 /* Get the maximal number of schedule dimensions in the scop SCOP. */
3191
3192 int
3193 translate_isl_ast_to_gimple::get_max_schedule_dimensions (scop_p scop)
3194 {
3195 int i;
3196 poly_bb_p pbb;
3197 int schedule_dims = 0;
3198
3199 FOR_EACH_VEC_ELT (scop->pbbs, i, pbb)
3200 {
3201 int pbb_schedule_dims = isl_map_dim (pbb->transformed, isl_dim_out);
3202 if (pbb_schedule_dims > schedule_dims)
3203 schedule_dims = pbb_schedule_dims;
3204 }
3205
3206 return schedule_dims;
3207 }
3208
3209 /* Extend the schedule to NB_SCHEDULE_DIMS schedule dimensions.
3210
3211 For schedules with different dimensionality, the isl AST generator can not
3212 define an order and will just randomly choose an order. The solution to this
3213 problem is to extend all schedules to the maximal number of schedule
3214 dimensions (using '0's for the remaining values). */
3215
3216 __isl_give isl_map *
3217 translate_isl_ast_to_gimple::extend_schedule (__isl_take isl_map *schedule,
3218 int nb_schedule_dims)
3219 {
3220 int tmp_dims = isl_map_dim (schedule, isl_dim_out);
3221 schedule =
3222 isl_map_add_dims (schedule, isl_dim_out, nb_schedule_dims - tmp_dims);
3223 isl_val *zero =
3224 isl_val_int_from_si (isl_map_get_ctx (schedule), 0);
3225 int i;
3226 for (i = tmp_dims; i < nb_schedule_dims; i++)
3227 {
3228 schedule
3229 = isl_map_fix_val (schedule, isl_dim_out, i, isl_val_copy (zero));
3230 }
3231 isl_val_free (zero);
3232 return schedule;
3233 }
3234
3235 /* Generates a schedule, which specifies an order used to
3236 visit elements in a domain. */
3237
3238 __isl_give isl_union_map *
3239 translate_isl_ast_to_gimple::generate_isl_schedule (scop_p scop)
3240 {
3241 int nb_schedule_dims = get_max_schedule_dimensions (scop);
3242 int i;
3243 poly_bb_p pbb;
3244 isl_union_map *schedule_isl =
3245 isl_union_map_empty (isl_set_get_space (scop->param_context));
3246
3247 FOR_EACH_VEC_ELT (scop->pbbs, i, pbb)
3248 {
3249 /* Dead code elimination: when the domain of a PBB is empty,
3250 don't generate code for the PBB. */
3251 if (isl_set_is_empty (pbb->domain))
3252 continue;
3253
3254 isl_map *bb_schedule = isl_map_copy (pbb->transformed);
3255 bb_schedule = isl_map_intersect_domain (bb_schedule,
3256 isl_set_copy (pbb->domain));
3257 bb_schedule = extend_schedule (bb_schedule, nb_schedule_dims);
3258 bb_schedule = isl_map_coalesce (bb_schedule);
3259 schedule_isl
3260 = isl_union_map_union (schedule_isl,
3261 isl_union_map_from_map (bb_schedule));
3262 schedule_isl = isl_union_map_coalesce (schedule_isl);
3263 }
3264 return schedule_isl;
3265 }
3266
3267 /* Set the separate option for all dimensions.
3268 This helps to reduce control overhead. */
3269
3270 __isl_give isl_ast_build *
3271 translate_isl_ast_to_gimple::set_options (__isl_take isl_ast_build *control,
3272 __isl_keep isl_union_map *schedule)
3273 {
3274 isl_ctx *ctx = isl_union_map_get_ctx (schedule);
3275 isl_space *range_space = isl_space_set_alloc (ctx, 0, 1);
3276 range_space =
3277 isl_space_set_tuple_name (range_space, isl_dim_set, "separate");
3278 isl_union_set *range =
3279 isl_union_set_from_set (isl_set_universe (range_space));
3280 isl_union_set *domain = isl_union_map_range (isl_union_map_copy (schedule));
3281 domain = isl_union_set_universe (domain);
3282 isl_union_map *options = isl_union_map_from_domain_and_range (domain, range);
3283 return isl_ast_build_set_options (control, options);
3284 }
3285
3286 /* Generate isl AST from schedule of SCOP. Also, collects IVS_PARAMS in IP. */
3287
3288 __isl_give isl_ast_node *
3289 translate_isl_ast_to_gimple::scop_to_isl_ast (scop_p scop, ivs_params &ip)
3290 {
3291 /* Generate loop upper bounds that consist of the current loop iterator, an
3292 operator (< or <=) and an expression not involving the iterator. If this
3293 option is not set, then the current loop iterator may appear several times
3294 in the upper bound. See the isl manual for more details. */
3295 isl_options_set_ast_build_atomic_upper_bound (scop->isl_context, true);
3296
3297 add_parameters_to_ivs_params (scop, ip);
3298 isl_union_map *schedule_isl = generate_isl_schedule (scop);
3299 isl_ast_build *context_isl = generate_isl_context (scop);
3300 context_isl = set_options (context_isl, schedule_isl);
3301 if (flag_loop_parallelize_all)
3302 {
3303 isl_union_map *dependence = scop_get_dependences (scop);
3304 context_isl =
3305 isl_ast_build_set_before_each_for (context_isl, ast_build_before_for,
3306 dependence);
3307 }
3308
3309 isl_ast_node *ast_isl = isl_ast_build_ast_from_schedule (context_isl,
3310 schedule_isl);
3311 if (scop->schedule)
3312 {
3313 isl_schedule_free (scop->schedule);
3314 scop->schedule = NULL;
3315 }
3316
3317 isl_ast_build_free (context_isl);
3318 return ast_isl;
3319 }
3320 #endif
3321
3322 /* Copy def from sese REGION to the newly created TO_REGION. TR is defined by
3323 DEF_STMT. GSI points to entry basic block of the TO_REGION. */
3324
3325 static void
3326 copy_def (tree tr, gimple *def_stmt, sese_info_p region, sese_info_p to_region,
3327 gimple_stmt_iterator *gsi)
3328 {
3329 if (!defined_in_sese_p (tr, region->region))
3330 return;
3331
3332 ssa_op_iter iter;
3333 use_operand_p use_p;
3334 FOR_EACH_SSA_USE_OPERAND (use_p, def_stmt, iter, SSA_OP_USE)
3335 {
3336 tree use_tr = USE_FROM_PTR (use_p);
3337
3338 /* Do not copy parameters that have been generated in the header of the
3339 scop. */
3340 if (region->parameter_rename_map->get(use_tr))
3341 continue;
3342
3343 gimple *def_of_use = SSA_NAME_DEF_STMT (use_tr);
3344 if (!def_of_use)
3345 continue;
3346
3347 copy_def (use_tr, def_of_use, region, to_region, gsi);
3348 }
3349
3350 gimple *copy = gimple_copy (def_stmt);
3351 gsi_insert_after (gsi, copy, GSI_NEW_STMT);
3352
3353 /* Create new names for all the definitions created by COPY and
3354 add replacement mappings for each new name. */
3355 def_operand_p def_p;
3356 ssa_op_iter op_iter;
3357 FOR_EACH_SSA_DEF_OPERAND (def_p, copy, op_iter, SSA_OP_ALL_DEFS)
3358 {
3359 tree old_name = DEF_FROM_PTR (def_p);
3360 tree new_name = create_new_def_for (old_name, copy, def_p);
3361 region->parameter_rename_map->put(old_name, new_name);
3362 }
3363
3364 update_stmt (copy);
3365 }
3366
3367 static void
3368 copy_internal_parameters (sese_info_p region, sese_info_p to_region)
3369 {
3370 /* For all the parameters which definitino is in the if_region->false_region,
3371 insert code on true_region (if_region->true_region->entry). */
3372
3373 int i;
3374 tree tr;
3375 gimple_stmt_iterator gsi = gsi_start_bb(to_region->region.entry->dest);
3376
3377 FOR_EACH_VEC_ELT (region->params, i, tr)
3378 {
3379 // If def is not in region.
3380 gimple *def_stmt = SSA_NAME_DEF_STMT (tr);
3381 if (def_stmt)
3382 copy_def (tr, def_stmt, region, to_region, &gsi);
3383 }
3384 }
3385
3386 /* GIMPLE Loop Generator: generates loops from STMT in GIMPLE form for
3387 the given SCOP. Return true if code generation succeeded.
3388
3389 FIXME: This is not yet a full implementation of the code generator
3390 with isl ASTs. Generation of GIMPLE code has to be completed. */
3391
3392 bool
3393 graphite_regenerate_ast_isl (scop_p scop)
3394 {
3395 sese_info_p region = scop->scop_info;
3396 translate_isl_ast_to_gimple t (region);
3397
3398 ifsese if_region = NULL;
3399 isl_ast_node *root_node;
3400 ivs_params ip;
3401
3402 timevar_push (TV_GRAPHITE_CODE_GEN);
3403 #ifdef HAVE_ISL_OPTIONS_SET_SCHEDULE_SERIALIZE_SCCS
3404 t.add_parameters_to_ivs_params (scop, ip);
3405 root_node = t.scop_to_isl_ast (scop);
3406 #else
3407 root_node = t.scop_to_isl_ast (scop, ip);
3408 #endif
3409
3410 if (dump_file && (dump_flags & TDF_DETAILS))
3411 {
3412 #ifdef HAVE_ISL_OPTIONS_SET_SCHEDULE_SERIALIZE_SCCS
3413 fprintf (dump_file, "[scheduler] original schedule:\n");
3414 print_isl_schedule (dump_file, scop->original_schedule);
3415 fprintf (dump_file, "[scheduler] isl transformed schedule:\n");
3416 print_isl_schedule (dump_file, scop->transformed_schedule);
3417
3418 fprintf (dump_file, "[scheduler] original ast:\n");
3419 print_schedule_ast (dump_file, scop->original_schedule, scop);
3420 #endif
3421 fprintf (dump_file, "[scheduler] AST generated by isl:\n");
3422 print_isl_ast (dump_file, root_node);
3423 }
3424
3425 recompute_all_dominators ();
3426 graphite_verify ();
3427
3428 if_region = move_sese_in_condition (region);
3429 region->if_region = if_region;
3430 recompute_all_dominators ();
3431
3432 loop_p context_loop = region->region.entry->src->loop_father;
3433
3434 /* Copy all the parameters which are defined in the region. */
3435 copy_internal_parameters(if_region->false_region, if_region->true_region);
3436
3437 edge e = single_succ_edge (if_region->true_region->region.entry->dest);
3438 basic_block bb = split_edge (e);
3439
3440 /* Update the true_region exit edge. */
3441 region->if_region->true_region->region.exit = single_succ_edge (bb);
3442
3443 t.translate_isl_ast (context_loop, root_node, e, ip);
3444 if (t.codegen_error_p ())
3445 {
3446 if (dump_file)
3447 fprintf (dump_file, "codegen error: "
3448 "reverting back to the original code.\n");
3449 set_ifsese_condition (if_region, integer_zero_node);
3450 }
3451 else
3452 {
3453 t.translate_pending_phi_nodes ();
3454 if (!t.codegen_error_p ())
3455 {
3456 sese_insert_phis_for_liveouts (region,
3457 if_region->region->region.exit->src,
3458 if_region->false_region->region.exit,
3459 if_region->true_region->region.exit);
3460 mark_virtual_operands_for_renaming (cfun);
3461 update_ssa (TODO_update_ssa);
3462
3463
3464 graphite_verify ();
3465 scev_reset ();
3466 recompute_all_dominators ();
3467 graphite_verify ();
3468
3469 if (dump_file)
3470 fprintf (dump_file, "[codegen] isl AST to Gimple succeeded.\n");
3471 }
3472 else
3473 {
3474 if (dump_file)
3475 fprintf (dump_file, "[codegen] unsuccessful in translating"
3476 " pending phis, reverting back to the original code.\n");
3477 set_ifsese_condition (if_region, integer_zero_node);
3478 }
3479 }
3480
3481 free (if_region->true_region);
3482 free (if_region->region);
3483 free (if_region);
3484
3485 ivs_params_clear (ip);
3486 isl_ast_node_free (root_node);
3487 timevar_pop (TV_GRAPHITE_CODE_GEN);
3488
3489 if (dump_file && (dump_flags & TDF_DETAILS))
3490 {
3491 loop_p loop;
3492 int num_no_dependency = 0;
3493
3494 FOR_EACH_LOOP (loop, 0)
3495 if (loop->can_be_parallel)
3496 num_no_dependency++;
3497
3498 fprintf (dump_file, "%d loops carried no dependency.\n",
3499 num_no_dependency);
3500 }
3501
3502 return !t.codegen_error_p ();
3503 }
3504
3505 #endif /* HAVE_isl */