alias.c: Remove unused headers.
[gcc.git] / gcc / tree-data-ref.c
1 /* Data references and dependences detectors.
2 Copyright (C) 2003-2015 Free Software Foundation, Inc.
3 Contributed by Sebastian Pop <pop@cri.ensmp.fr>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 /* This pass walks a given loop structure searching for array
22 references. The information about the array accesses is recorded
23 in DATA_REFERENCE structures.
24
25 The basic test for determining the dependences is:
26 given two access functions chrec1 and chrec2 to a same array, and
27 x and y two vectors from the iteration domain, the same element of
28 the array is accessed twice at iterations x and y if and only if:
29 | chrec1 (x) == chrec2 (y).
30
31 The goals of this analysis are:
32
33 - to determine the independence: the relation between two
34 independent accesses is qualified with the chrec_known (this
35 information allows a loop parallelization),
36
37 - when two data references access the same data, to qualify the
38 dependence relation with classic dependence representations:
39
40 - distance vectors
41 - direction vectors
42 - loop carried level dependence
43 - polyhedron dependence
44 or with the chains of recurrences based representation,
45
46 - to define a knowledge base for storing the data dependence
47 information,
48
49 - to define an interface to access this data.
50
51
52 Definitions:
53
54 - subscript: given two array accesses a subscript is the tuple
55 composed of the access functions for a given dimension. Example:
56 Given A[f1][f2][f3] and B[g1][g2][g3], there are three subscripts:
57 (f1, g1), (f2, g2), (f3, g3).
58
59 - Diophantine equation: an equation whose coefficients and
60 solutions are integer constants, for example the equation
61 | 3*x + 2*y = 1
62 has an integer solution x = 1 and y = -1.
63
64 References:
65
66 - "Advanced Compilation for High Performance Computing" by Randy
67 Allen and Ken Kennedy.
68 http://citeseer.ist.psu.edu/goff91practical.html
69
70 - "Loop Transformations for Restructuring Compilers - The Foundations"
71 by Utpal Banerjee.
72
73
74 */
75
76 #include "config.h"
77 #include "system.h"
78 #include "coretypes.h"
79 #include "backend.h"
80 #include "rtl.h"
81 #include "tree.h"
82 #include "gimple.h"
83 #include "gimple-pretty-print.h"
84 #include "alias.h"
85 #include "fold-const.h"
86 #include "expr.h"
87 #include "gimple-iterator.h"
88 #include "tree-ssa-loop-niter.h"
89 #include "tree-ssa-loop.h"
90 #include "tree-ssa.h"
91 #include "cfgloop.h"
92 #include "tree-data-ref.h"
93 #include "tree-scalar-evolution.h"
94 #include "dumpfile.h"
95 #include "tree-affine.h"
96 #include "params.h"
97
98 static struct datadep_stats
99 {
100 int num_dependence_tests;
101 int num_dependence_dependent;
102 int num_dependence_independent;
103 int num_dependence_undetermined;
104
105 int num_subscript_tests;
106 int num_subscript_undetermined;
107 int num_same_subscript_function;
108
109 int num_ziv;
110 int num_ziv_independent;
111 int num_ziv_dependent;
112 int num_ziv_unimplemented;
113
114 int num_siv;
115 int num_siv_independent;
116 int num_siv_dependent;
117 int num_siv_unimplemented;
118
119 int num_miv;
120 int num_miv_independent;
121 int num_miv_dependent;
122 int num_miv_unimplemented;
123 } dependence_stats;
124
125 static bool subscript_dependence_tester_1 (struct data_dependence_relation *,
126 struct data_reference *,
127 struct data_reference *,
128 struct loop *);
129 /* Returns true iff A divides B. */
130
131 static inline bool
132 tree_fold_divides_p (const_tree a, const_tree b)
133 {
134 gcc_assert (TREE_CODE (a) == INTEGER_CST);
135 gcc_assert (TREE_CODE (b) == INTEGER_CST);
136 return integer_zerop (int_const_binop (TRUNC_MOD_EXPR, b, a));
137 }
138
139 /* Returns true iff A divides B. */
140
141 static inline bool
142 int_divides_p (int a, int b)
143 {
144 return ((b % a) == 0);
145 }
146
147 \f
148
149 /* Dump into FILE all the data references from DATAREFS. */
150
151 static void
152 dump_data_references (FILE *file, vec<data_reference_p> datarefs)
153 {
154 unsigned int i;
155 struct data_reference *dr;
156
157 FOR_EACH_VEC_ELT (datarefs, i, dr)
158 dump_data_reference (file, dr);
159 }
160
161 /* Unified dump into FILE all the data references from DATAREFS. */
162
163 DEBUG_FUNCTION void
164 debug (vec<data_reference_p> &ref)
165 {
166 dump_data_references (stderr, ref);
167 }
168
169 DEBUG_FUNCTION void
170 debug (vec<data_reference_p> *ptr)
171 {
172 if (ptr)
173 debug (*ptr);
174 else
175 fprintf (stderr, "<nil>\n");
176 }
177
178
179 /* Dump into STDERR all the data references from DATAREFS. */
180
181 DEBUG_FUNCTION void
182 debug_data_references (vec<data_reference_p> datarefs)
183 {
184 dump_data_references (stderr, datarefs);
185 }
186
187 /* Print to STDERR the data_reference DR. */
188
189 DEBUG_FUNCTION void
190 debug_data_reference (struct data_reference *dr)
191 {
192 dump_data_reference (stderr, dr);
193 }
194
195 /* Dump function for a DATA_REFERENCE structure. */
196
197 void
198 dump_data_reference (FILE *outf,
199 struct data_reference *dr)
200 {
201 unsigned int i;
202
203 fprintf (outf, "#(Data Ref: \n");
204 fprintf (outf, "# bb: %d \n", gimple_bb (DR_STMT (dr))->index);
205 fprintf (outf, "# stmt: ");
206 print_gimple_stmt (outf, DR_STMT (dr), 0, 0);
207 fprintf (outf, "# ref: ");
208 print_generic_stmt (outf, DR_REF (dr), 0);
209 fprintf (outf, "# base_object: ");
210 print_generic_stmt (outf, DR_BASE_OBJECT (dr), 0);
211
212 for (i = 0; i < DR_NUM_DIMENSIONS (dr); i++)
213 {
214 fprintf (outf, "# Access function %d: ", i);
215 print_generic_stmt (outf, DR_ACCESS_FN (dr, i), 0);
216 }
217 fprintf (outf, "#)\n");
218 }
219
220 /* Unified dump function for a DATA_REFERENCE structure. */
221
222 DEBUG_FUNCTION void
223 debug (data_reference &ref)
224 {
225 dump_data_reference (stderr, &ref);
226 }
227
228 DEBUG_FUNCTION void
229 debug (data_reference *ptr)
230 {
231 if (ptr)
232 debug (*ptr);
233 else
234 fprintf (stderr, "<nil>\n");
235 }
236
237
238 /* Dumps the affine function described by FN to the file OUTF. */
239
240 DEBUG_FUNCTION void
241 dump_affine_function (FILE *outf, affine_fn fn)
242 {
243 unsigned i;
244 tree coef;
245
246 print_generic_expr (outf, fn[0], TDF_SLIM);
247 for (i = 1; fn.iterate (i, &coef); i++)
248 {
249 fprintf (outf, " + ");
250 print_generic_expr (outf, coef, TDF_SLIM);
251 fprintf (outf, " * x_%u", i);
252 }
253 }
254
255 /* Dumps the conflict function CF to the file OUTF. */
256
257 DEBUG_FUNCTION void
258 dump_conflict_function (FILE *outf, conflict_function *cf)
259 {
260 unsigned i;
261
262 if (cf->n == NO_DEPENDENCE)
263 fprintf (outf, "no dependence");
264 else if (cf->n == NOT_KNOWN)
265 fprintf (outf, "not known");
266 else
267 {
268 for (i = 0; i < cf->n; i++)
269 {
270 if (i != 0)
271 fprintf (outf, " ");
272 fprintf (outf, "[");
273 dump_affine_function (outf, cf->fns[i]);
274 fprintf (outf, "]");
275 }
276 }
277 }
278
279 /* Dump function for a SUBSCRIPT structure. */
280
281 DEBUG_FUNCTION void
282 dump_subscript (FILE *outf, struct subscript *subscript)
283 {
284 conflict_function *cf = SUB_CONFLICTS_IN_A (subscript);
285
286 fprintf (outf, "\n (subscript \n");
287 fprintf (outf, " iterations_that_access_an_element_twice_in_A: ");
288 dump_conflict_function (outf, cf);
289 if (CF_NONTRIVIAL_P (cf))
290 {
291 tree last_iteration = SUB_LAST_CONFLICT (subscript);
292 fprintf (outf, "\n last_conflict: ");
293 print_generic_expr (outf, last_iteration, 0);
294 }
295
296 cf = SUB_CONFLICTS_IN_B (subscript);
297 fprintf (outf, "\n iterations_that_access_an_element_twice_in_B: ");
298 dump_conflict_function (outf, cf);
299 if (CF_NONTRIVIAL_P (cf))
300 {
301 tree last_iteration = SUB_LAST_CONFLICT (subscript);
302 fprintf (outf, "\n last_conflict: ");
303 print_generic_expr (outf, last_iteration, 0);
304 }
305
306 fprintf (outf, "\n (Subscript distance: ");
307 print_generic_expr (outf, SUB_DISTANCE (subscript), 0);
308 fprintf (outf, " ))\n");
309 }
310
311 /* Print the classic direction vector DIRV to OUTF. */
312
313 DEBUG_FUNCTION void
314 print_direction_vector (FILE *outf,
315 lambda_vector dirv,
316 int length)
317 {
318 int eq;
319
320 for (eq = 0; eq < length; eq++)
321 {
322 enum data_dependence_direction dir = ((enum data_dependence_direction)
323 dirv[eq]);
324
325 switch (dir)
326 {
327 case dir_positive:
328 fprintf (outf, " +");
329 break;
330 case dir_negative:
331 fprintf (outf, " -");
332 break;
333 case dir_equal:
334 fprintf (outf, " =");
335 break;
336 case dir_positive_or_equal:
337 fprintf (outf, " +=");
338 break;
339 case dir_positive_or_negative:
340 fprintf (outf, " +-");
341 break;
342 case dir_negative_or_equal:
343 fprintf (outf, " -=");
344 break;
345 case dir_star:
346 fprintf (outf, " *");
347 break;
348 default:
349 fprintf (outf, "indep");
350 break;
351 }
352 }
353 fprintf (outf, "\n");
354 }
355
356 /* Print a vector of direction vectors. */
357
358 DEBUG_FUNCTION void
359 print_dir_vectors (FILE *outf, vec<lambda_vector> dir_vects,
360 int length)
361 {
362 unsigned j;
363 lambda_vector v;
364
365 FOR_EACH_VEC_ELT (dir_vects, j, v)
366 print_direction_vector (outf, v, length);
367 }
368
369 /* Print out a vector VEC of length N to OUTFILE. */
370
371 DEBUG_FUNCTION void
372 print_lambda_vector (FILE * outfile, lambda_vector vector, int n)
373 {
374 int i;
375
376 for (i = 0; i < n; i++)
377 fprintf (outfile, "%3d ", vector[i]);
378 fprintf (outfile, "\n");
379 }
380
381 /* Print a vector of distance vectors. */
382
383 DEBUG_FUNCTION void
384 print_dist_vectors (FILE *outf, vec<lambda_vector> dist_vects,
385 int length)
386 {
387 unsigned j;
388 lambda_vector v;
389
390 FOR_EACH_VEC_ELT (dist_vects, j, v)
391 print_lambda_vector (outf, v, length);
392 }
393
394 /* Dump function for a DATA_DEPENDENCE_RELATION structure. */
395
396 DEBUG_FUNCTION void
397 dump_data_dependence_relation (FILE *outf,
398 struct data_dependence_relation *ddr)
399 {
400 struct data_reference *dra, *drb;
401
402 fprintf (outf, "(Data Dep: \n");
403
404 if (!ddr || DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
405 {
406 if (ddr)
407 {
408 dra = DDR_A (ddr);
409 drb = DDR_B (ddr);
410 if (dra)
411 dump_data_reference (outf, dra);
412 else
413 fprintf (outf, " (nil)\n");
414 if (drb)
415 dump_data_reference (outf, drb);
416 else
417 fprintf (outf, " (nil)\n");
418 }
419 fprintf (outf, " (don't know)\n)\n");
420 return;
421 }
422
423 dra = DDR_A (ddr);
424 drb = DDR_B (ddr);
425 dump_data_reference (outf, dra);
426 dump_data_reference (outf, drb);
427
428 if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
429 fprintf (outf, " (no dependence)\n");
430
431 else if (DDR_ARE_DEPENDENT (ddr) == NULL_TREE)
432 {
433 unsigned int i;
434 struct loop *loopi;
435
436 for (i = 0; i < DDR_NUM_SUBSCRIPTS (ddr); i++)
437 {
438 fprintf (outf, " access_fn_A: ");
439 print_generic_stmt (outf, DR_ACCESS_FN (dra, i), 0);
440 fprintf (outf, " access_fn_B: ");
441 print_generic_stmt (outf, DR_ACCESS_FN (drb, i), 0);
442 dump_subscript (outf, DDR_SUBSCRIPT (ddr, i));
443 }
444
445 fprintf (outf, " inner loop index: %d\n", DDR_INNER_LOOP (ddr));
446 fprintf (outf, " loop nest: (");
447 FOR_EACH_VEC_ELT (DDR_LOOP_NEST (ddr), i, loopi)
448 fprintf (outf, "%d ", loopi->num);
449 fprintf (outf, ")\n");
450
451 for (i = 0; i < DDR_NUM_DIST_VECTS (ddr); i++)
452 {
453 fprintf (outf, " distance_vector: ");
454 print_lambda_vector (outf, DDR_DIST_VECT (ddr, i),
455 DDR_NB_LOOPS (ddr));
456 }
457
458 for (i = 0; i < DDR_NUM_DIR_VECTS (ddr); i++)
459 {
460 fprintf (outf, " direction_vector: ");
461 print_direction_vector (outf, DDR_DIR_VECT (ddr, i),
462 DDR_NB_LOOPS (ddr));
463 }
464 }
465
466 fprintf (outf, ")\n");
467 }
468
469 /* Debug version. */
470
471 DEBUG_FUNCTION void
472 debug_data_dependence_relation (struct data_dependence_relation *ddr)
473 {
474 dump_data_dependence_relation (stderr, ddr);
475 }
476
477 /* Dump into FILE all the dependence relations from DDRS. */
478
479 DEBUG_FUNCTION void
480 dump_data_dependence_relations (FILE *file,
481 vec<ddr_p> ddrs)
482 {
483 unsigned int i;
484 struct data_dependence_relation *ddr;
485
486 FOR_EACH_VEC_ELT (ddrs, i, ddr)
487 dump_data_dependence_relation (file, ddr);
488 }
489
490 DEBUG_FUNCTION void
491 debug (vec<ddr_p> &ref)
492 {
493 dump_data_dependence_relations (stderr, ref);
494 }
495
496 DEBUG_FUNCTION void
497 debug (vec<ddr_p> *ptr)
498 {
499 if (ptr)
500 debug (*ptr);
501 else
502 fprintf (stderr, "<nil>\n");
503 }
504
505
506 /* Dump to STDERR all the dependence relations from DDRS. */
507
508 DEBUG_FUNCTION void
509 debug_data_dependence_relations (vec<ddr_p> ddrs)
510 {
511 dump_data_dependence_relations (stderr, ddrs);
512 }
513
514 /* Dumps the distance and direction vectors in FILE. DDRS contains
515 the dependence relations, and VECT_SIZE is the size of the
516 dependence vectors, or in other words the number of loops in the
517 considered nest. */
518
519 DEBUG_FUNCTION void
520 dump_dist_dir_vectors (FILE *file, vec<ddr_p> ddrs)
521 {
522 unsigned int i, j;
523 struct data_dependence_relation *ddr;
524 lambda_vector v;
525
526 FOR_EACH_VEC_ELT (ddrs, i, ddr)
527 if (DDR_ARE_DEPENDENT (ddr) == NULL_TREE && DDR_AFFINE_P (ddr))
528 {
529 FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), j, v)
530 {
531 fprintf (file, "DISTANCE_V (");
532 print_lambda_vector (file, v, DDR_NB_LOOPS (ddr));
533 fprintf (file, ")\n");
534 }
535
536 FOR_EACH_VEC_ELT (DDR_DIR_VECTS (ddr), j, v)
537 {
538 fprintf (file, "DIRECTION_V (");
539 print_direction_vector (file, v, DDR_NB_LOOPS (ddr));
540 fprintf (file, ")\n");
541 }
542 }
543
544 fprintf (file, "\n\n");
545 }
546
547 /* Dumps the data dependence relations DDRS in FILE. */
548
549 DEBUG_FUNCTION void
550 dump_ddrs (FILE *file, vec<ddr_p> ddrs)
551 {
552 unsigned int i;
553 struct data_dependence_relation *ddr;
554
555 FOR_EACH_VEC_ELT (ddrs, i, ddr)
556 dump_data_dependence_relation (file, ddr);
557
558 fprintf (file, "\n\n");
559 }
560
561 DEBUG_FUNCTION void
562 debug_ddrs (vec<ddr_p> ddrs)
563 {
564 dump_ddrs (stderr, ddrs);
565 }
566
567 /* Helper function for split_constant_offset. Expresses OP0 CODE OP1
568 (the type of the result is TYPE) as VAR + OFF, where OFF is a nonzero
569 constant of type ssizetype, and returns true. If we cannot do this
570 with OFF nonzero, OFF and VAR are set to NULL_TREE instead and false
571 is returned. */
572
573 static bool
574 split_constant_offset_1 (tree type, tree op0, enum tree_code code, tree op1,
575 tree *var, tree *off)
576 {
577 tree var0, var1;
578 tree off0, off1;
579 enum tree_code ocode = code;
580
581 *var = NULL_TREE;
582 *off = NULL_TREE;
583
584 switch (code)
585 {
586 case INTEGER_CST:
587 *var = build_int_cst (type, 0);
588 *off = fold_convert (ssizetype, op0);
589 return true;
590
591 case POINTER_PLUS_EXPR:
592 ocode = PLUS_EXPR;
593 /* FALLTHROUGH */
594 case PLUS_EXPR:
595 case MINUS_EXPR:
596 split_constant_offset (op0, &var0, &off0);
597 split_constant_offset (op1, &var1, &off1);
598 *var = fold_build2 (code, type, var0, var1);
599 *off = size_binop (ocode, off0, off1);
600 return true;
601
602 case MULT_EXPR:
603 if (TREE_CODE (op1) != INTEGER_CST)
604 return false;
605
606 split_constant_offset (op0, &var0, &off0);
607 *var = fold_build2 (MULT_EXPR, type, var0, op1);
608 *off = size_binop (MULT_EXPR, off0, fold_convert (ssizetype, op1));
609 return true;
610
611 case ADDR_EXPR:
612 {
613 tree base, poffset;
614 HOST_WIDE_INT pbitsize, pbitpos;
615 machine_mode pmode;
616 int punsignedp, pvolatilep;
617
618 op0 = TREE_OPERAND (op0, 0);
619 base = get_inner_reference (op0, &pbitsize, &pbitpos, &poffset,
620 &pmode, &punsignedp, &pvolatilep, false);
621
622 if (pbitpos % BITS_PER_UNIT != 0)
623 return false;
624 base = build_fold_addr_expr (base);
625 off0 = ssize_int (pbitpos / BITS_PER_UNIT);
626
627 if (poffset)
628 {
629 split_constant_offset (poffset, &poffset, &off1);
630 off0 = size_binop (PLUS_EXPR, off0, off1);
631 if (POINTER_TYPE_P (TREE_TYPE (base)))
632 base = fold_build_pointer_plus (base, poffset);
633 else
634 base = fold_build2 (PLUS_EXPR, TREE_TYPE (base), base,
635 fold_convert (TREE_TYPE (base), poffset));
636 }
637
638 var0 = fold_convert (type, base);
639
640 /* If variable length types are involved, punt, otherwise casts
641 might be converted into ARRAY_REFs in gimplify_conversion.
642 To compute that ARRAY_REF's element size TYPE_SIZE_UNIT, which
643 possibly no longer appears in current GIMPLE, might resurface.
644 This perhaps could run
645 if (CONVERT_EXPR_P (var0))
646 {
647 gimplify_conversion (&var0);
648 // Attempt to fill in any within var0 found ARRAY_REF's
649 // element size from corresponding op embedded ARRAY_REF,
650 // if unsuccessful, just punt.
651 } */
652 while (POINTER_TYPE_P (type))
653 type = TREE_TYPE (type);
654 if (int_size_in_bytes (type) < 0)
655 return false;
656
657 *var = var0;
658 *off = off0;
659 return true;
660 }
661
662 case SSA_NAME:
663 {
664 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op0))
665 return false;
666
667 gimple *def_stmt = SSA_NAME_DEF_STMT (op0);
668 enum tree_code subcode;
669
670 if (gimple_code (def_stmt) != GIMPLE_ASSIGN)
671 return false;
672
673 var0 = gimple_assign_rhs1 (def_stmt);
674 subcode = gimple_assign_rhs_code (def_stmt);
675 var1 = gimple_assign_rhs2 (def_stmt);
676
677 return split_constant_offset_1 (type, var0, subcode, var1, var, off);
678 }
679 CASE_CONVERT:
680 {
681 /* We must not introduce undefined overflow, and we must not change the value.
682 Hence we're okay if the inner type doesn't overflow to start with
683 (pointer or signed), the outer type also is an integer or pointer
684 and the outer precision is at least as large as the inner. */
685 tree itype = TREE_TYPE (op0);
686 if ((POINTER_TYPE_P (itype)
687 || (INTEGRAL_TYPE_P (itype) && TYPE_OVERFLOW_UNDEFINED (itype)))
688 && TYPE_PRECISION (type) >= TYPE_PRECISION (itype)
689 && (POINTER_TYPE_P (type) || INTEGRAL_TYPE_P (type)))
690 {
691 split_constant_offset (op0, &var0, off);
692 *var = fold_convert (type, var0);
693 return true;
694 }
695 return false;
696 }
697
698 default:
699 return false;
700 }
701 }
702
703 /* Expresses EXP as VAR + OFF, where off is a constant. The type of OFF
704 will be ssizetype. */
705
706 void
707 split_constant_offset (tree exp, tree *var, tree *off)
708 {
709 tree type = TREE_TYPE (exp), otype, op0, op1, e, o;
710 enum tree_code code;
711
712 *var = exp;
713 *off = ssize_int (0);
714 STRIP_NOPS (exp);
715
716 if (tree_is_chrec (exp)
717 || get_gimple_rhs_class (TREE_CODE (exp)) == GIMPLE_TERNARY_RHS)
718 return;
719
720 otype = TREE_TYPE (exp);
721 code = TREE_CODE (exp);
722 extract_ops_from_tree (exp, &code, &op0, &op1);
723 if (split_constant_offset_1 (otype, op0, code, op1, &e, &o))
724 {
725 *var = fold_convert (type, e);
726 *off = o;
727 }
728 }
729
730 /* Returns the address ADDR of an object in a canonical shape (without nop
731 casts, and with type of pointer to the object). */
732
733 static tree
734 canonicalize_base_object_address (tree addr)
735 {
736 tree orig = addr;
737
738 STRIP_NOPS (addr);
739
740 /* The base address may be obtained by casting from integer, in that case
741 keep the cast. */
742 if (!POINTER_TYPE_P (TREE_TYPE (addr)))
743 return orig;
744
745 if (TREE_CODE (addr) != ADDR_EXPR)
746 return addr;
747
748 return build_fold_addr_expr (TREE_OPERAND (addr, 0));
749 }
750
751 /* Analyzes the behavior of the memory reference DR in the innermost loop or
752 basic block that contains it. Returns true if analysis succeed or false
753 otherwise. */
754
755 bool
756 dr_analyze_innermost (struct data_reference *dr, struct loop *nest)
757 {
758 gimple *stmt = DR_STMT (dr);
759 struct loop *loop = loop_containing_stmt (stmt);
760 tree ref = DR_REF (dr);
761 HOST_WIDE_INT pbitsize, pbitpos;
762 tree base, poffset;
763 machine_mode pmode;
764 int punsignedp, pvolatilep;
765 affine_iv base_iv, offset_iv;
766 tree init, dinit, step;
767 bool in_loop = (loop && loop->num);
768
769 if (dump_file && (dump_flags & TDF_DETAILS))
770 fprintf (dump_file, "analyze_innermost: ");
771
772 base = get_inner_reference (ref, &pbitsize, &pbitpos, &poffset,
773 &pmode, &punsignedp, &pvolatilep, false);
774 gcc_assert (base != NULL_TREE);
775
776 if (pbitpos % BITS_PER_UNIT != 0)
777 {
778 if (dump_file && (dump_flags & TDF_DETAILS))
779 fprintf (dump_file, "failed: bit offset alignment.\n");
780 return false;
781 }
782
783 if (TREE_CODE (base) == MEM_REF)
784 {
785 if (!integer_zerop (TREE_OPERAND (base, 1)))
786 {
787 offset_int moff = mem_ref_offset (base);
788 tree mofft = wide_int_to_tree (sizetype, moff);
789 if (!poffset)
790 poffset = mofft;
791 else
792 poffset = size_binop (PLUS_EXPR, poffset, mofft);
793 }
794 base = TREE_OPERAND (base, 0);
795 }
796 else
797 base = build_fold_addr_expr (base);
798
799 if (in_loop)
800 {
801 if (!simple_iv (loop, loop_containing_stmt (stmt), base, &base_iv,
802 nest ? true : false))
803 {
804 if (nest)
805 {
806 if (dump_file && (dump_flags & TDF_DETAILS))
807 fprintf (dump_file, "failed: evolution of base is not"
808 " affine.\n");
809 return false;
810 }
811 else
812 {
813 base_iv.base = base;
814 base_iv.step = ssize_int (0);
815 base_iv.no_overflow = true;
816 }
817 }
818 }
819 else
820 {
821 base_iv.base = base;
822 base_iv.step = ssize_int (0);
823 base_iv.no_overflow = true;
824 }
825
826 if (!poffset)
827 {
828 offset_iv.base = ssize_int (0);
829 offset_iv.step = ssize_int (0);
830 }
831 else
832 {
833 if (!in_loop)
834 {
835 offset_iv.base = poffset;
836 offset_iv.step = ssize_int (0);
837 }
838 else if (!simple_iv (loop, loop_containing_stmt (stmt),
839 poffset, &offset_iv,
840 nest ? true : false))
841 {
842 if (nest)
843 {
844 if (dump_file && (dump_flags & TDF_DETAILS))
845 fprintf (dump_file, "failed: evolution of offset is not"
846 " affine.\n");
847 return false;
848 }
849 else
850 {
851 offset_iv.base = poffset;
852 offset_iv.step = ssize_int (0);
853 }
854 }
855 }
856
857 init = ssize_int (pbitpos / BITS_PER_UNIT);
858 split_constant_offset (base_iv.base, &base_iv.base, &dinit);
859 init = size_binop (PLUS_EXPR, init, dinit);
860 split_constant_offset (offset_iv.base, &offset_iv.base, &dinit);
861 init = size_binop (PLUS_EXPR, init, dinit);
862
863 step = size_binop (PLUS_EXPR,
864 fold_convert (ssizetype, base_iv.step),
865 fold_convert (ssizetype, offset_iv.step));
866
867 DR_BASE_ADDRESS (dr) = canonicalize_base_object_address (base_iv.base);
868
869 DR_OFFSET (dr) = fold_convert (ssizetype, offset_iv.base);
870 DR_INIT (dr) = init;
871 DR_STEP (dr) = step;
872
873 DR_ALIGNED_TO (dr) = size_int (highest_pow2_factor (offset_iv.base));
874
875 if (dump_file && (dump_flags & TDF_DETAILS))
876 fprintf (dump_file, "success.\n");
877
878 return true;
879 }
880
881 /* Determines the base object and the list of indices of memory reference
882 DR, analyzed in LOOP and instantiated in loop nest NEST. */
883
884 static void
885 dr_analyze_indices (struct data_reference *dr, loop_p nest, loop_p loop)
886 {
887 vec<tree> access_fns = vNULL;
888 tree ref, op;
889 tree base, off, access_fn;
890 basic_block before_loop;
891
892 /* If analyzing a basic-block there are no indices to analyze
893 and thus no access functions. */
894 if (!nest)
895 {
896 DR_BASE_OBJECT (dr) = DR_REF (dr);
897 DR_ACCESS_FNS (dr).create (0);
898 return;
899 }
900
901 ref = DR_REF (dr);
902 before_loop = block_before_loop (nest);
903
904 /* REALPART_EXPR and IMAGPART_EXPR can be handled like accesses
905 into a two element array with a constant index. The base is
906 then just the immediate underlying object. */
907 if (TREE_CODE (ref) == REALPART_EXPR)
908 {
909 ref = TREE_OPERAND (ref, 0);
910 access_fns.safe_push (integer_zero_node);
911 }
912 else if (TREE_CODE (ref) == IMAGPART_EXPR)
913 {
914 ref = TREE_OPERAND (ref, 0);
915 access_fns.safe_push (integer_one_node);
916 }
917
918 /* Analyze access functions of dimensions we know to be independent. */
919 while (handled_component_p (ref))
920 {
921 if (TREE_CODE (ref) == ARRAY_REF)
922 {
923 op = TREE_OPERAND (ref, 1);
924 access_fn = analyze_scalar_evolution (loop, op);
925 access_fn = instantiate_scev (before_loop, loop, access_fn);
926 access_fns.safe_push (access_fn);
927 }
928 else if (TREE_CODE (ref) == COMPONENT_REF
929 && TREE_CODE (TREE_TYPE (TREE_OPERAND (ref, 0))) == RECORD_TYPE)
930 {
931 /* For COMPONENT_REFs of records (but not unions!) use the
932 FIELD_DECL offset as constant access function so we can
933 disambiguate a[i].f1 and a[i].f2. */
934 tree off = component_ref_field_offset (ref);
935 off = size_binop (PLUS_EXPR,
936 size_binop (MULT_EXPR,
937 fold_convert (bitsizetype, off),
938 bitsize_int (BITS_PER_UNIT)),
939 DECL_FIELD_BIT_OFFSET (TREE_OPERAND (ref, 1)));
940 access_fns.safe_push (off);
941 }
942 else
943 /* If we have an unhandled component we could not translate
944 to an access function stop analyzing. We have determined
945 our base object in this case. */
946 break;
947
948 ref = TREE_OPERAND (ref, 0);
949 }
950
951 /* If the address operand of a MEM_REF base has an evolution in the
952 analyzed nest, add it as an additional independent access-function. */
953 if (TREE_CODE (ref) == MEM_REF)
954 {
955 op = TREE_OPERAND (ref, 0);
956 access_fn = analyze_scalar_evolution (loop, op);
957 access_fn = instantiate_scev (before_loop, loop, access_fn);
958 if (TREE_CODE (access_fn) == POLYNOMIAL_CHREC)
959 {
960 tree orig_type;
961 tree memoff = TREE_OPERAND (ref, 1);
962 base = initial_condition (access_fn);
963 orig_type = TREE_TYPE (base);
964 STRIP_USELESS_TYPE_CONVERSION (base);
965 split_constant_offset (base, &base, &off);
966 STRIP_USELESS_TYPE_CONVERSION (base);
967 /* Fold the MEM_REF offset into the evolutions initial
968 value to make more bases comparable. */
969 if (!integer_zerop (memoff))
970 {
971 off = size_binop (PLUS_EXPR, off,
972 fold_convert (ssizetype, memoff));
973 memoff = build_int_cst (TREE_TYPE (memoff), 0);
974 }
975 /* Adjust the offset so it is a multiple of the access type
976 size and thus we separate bases that can possibly be used
977 to produce partial overlaps (which the access_fn machinery
978 cannot handle). */
979 wide_int rem;
980 if (TYPE_SIZE_UNIT (TREE_TYPE (ref))
981 && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (ref))) == INTEGER_CST
982 && !integer_zerop (TYPE_SIZE_UNIT (TREE_TYPE (ref))))
983 rem = wi::mod_trunc (off, TYPE_SIZE_UNIT (TREE_TYPE (ref)), SIGNED);
984 else
985 /* If we can't compute the remainder simply force the initial
986 condition to zero. */
987 rem = off;
988 off = wide_int_to_tree (ssizetype, wi::sub (off, rem));
989 memoff = wide_int_to_tree (TREE_TYPE (memoff), rem);
990 /* And finally replace the initial condition. */
991 access_fn = chrec_replace_initial_condition
992 (access_fn, fold_convert (orig_type, off));
993 /* ??? This is still not a suitable base object for
994 dr_may_alias_p - the base object needs to be an
995 access that covers the object as whole. With
996 an evolution in the pointer this cannot be
997 guaranteed.
998 As a band-aid, mark the access so we can special-case
999 it in dr_may_alias_p. */
1000 tree old = ref;
1001 ref = fold_build2_loc (EXPR_LOCATION (ref),
1002 MEM_REF, TREE_TYPE (ref),
1003 base, memoff);
1004 MR_DEPENDENCE_CLIQUE (ref) = MR_DEPENDENCE_CLIQUE (old);
1005 MR_DEPENDENCE_BASE (ref) = MR_DEPENDENCE_BASE (old);
1006 DR_UNCONSTRAINED_BASE (dr) = true;
1007 access_fns.safe_push (access_fn);
1008 }
1009 }
1010 else if (DECL_P (ref))
1011 {
1012 /* Canonicalize DR_BASE_OBJECT to MEM_REF form. */
1013 ref = build2 (MEM_REF, TREE_TYPE (ref),
1014 build_fold_addr_expr (ref),
1015 build_int_cst (reference_alias_ptr_type (ref), 0));
1016 }
1017
1018 DR_BASE_OBJECT (dr) = ref;
1019 DR_ACCESS_FNS (dr) = access_fns;
1020 }
1021
1022 /* Extracts the alias analysis information from the memory reference DR. */
1023
1024 static void
1025 dr_analyze_alias (struct data_reference *dr)
1026 {
1027 tree ref = DR_REF (dr);
1028 tree base = get_base_address (ref), addr;
1029
1030 if (INDIRECT_REF_P (base)
1031 || TREE_CODE (base) == MEM_REF)
1032 {
1033 addr = TREE_OPERAND (base, 0);
1034 if (TREE_CODE (addr) == SSA_NAME)
1035 DR_PTR_INFO (dr) = SSA_NAME_PTR_INFO (addr);
1036 }
1037 }
1038
1039 /* Frees data reference DR. */
1040
1041 void
1042 free_data_ref (data_reference_p dr)
1043 {
1044 DR_ACCESS_FNS (dr).release ();
1045 free (dr);
1046 }
1047
1048 /* Analyzes memory reference MEMREF accessed in STMT. The reference
1049 is read if IS_READ is true, write otherwise. Returns the
1050 data_reference description of MEMREF. NEST is the outermost loop
1051 in which the reference should be instantiated, LOOP is the loop in
1052 which the data reference should be analyzed. */
1053
1054 struct data_reference *
1055 create_data_ref (loop_p nest, loop_p loop, tree memref, gimple *stmt,
1056 bool is_read)
1057 {
1058 struct data_reference *dr;
1059
1060 if (dump_file && (dump_flags & TDF_DETAILS))
1061 {
1062 fprintf (dump_file, "Creating dr for ");
1063 print_generic_expr (dump_file, memref, TDF_SLIM);
1064 fprintf (dump_file, "\n");
1065 }
1066
1067 dr = XCNEW (struct data_reference);
1068 DR_STMT (dr) = stmt;
1069 DR_REF (dr) = memref;
1070 DR_IS_READ (dr) = is_read;
1071
1072 dr_analyze_innermost (dr, nest);
1073 dr_analyze_indices (dr, nest, loop);
1074 dr_analyze_alias (dr);
1075
1076 if (dump_file && (dump_flags & TDF_DETAILS))
1077 {
1078 unsigned i;
1079 fprintf (dump_file, "\tbase_address: ");
1080 print_generic_expr (dump_file, DR_BASE_ADDRESS (dr), TDF_SLIM);
1081 fprintf (dump_file, "\n\toffset from base address: ");
1082 print_generic_expr (dump_file, DR_OFFSET (dr), TDF_SLIM);
1083 fprintf (dump_file, "\n\tconstant offset from base address: ");
1084 print_generic_expr (dump_file, DR_INIT (dr), TDF_SLIM);
1085 fprintf (dump_file, "\n\tstep: ");
1086 print_generic_expr (dump_file, DR_STEP (dr), TDF_SLIM);
1087 fprintf (dump_file, "\n\taligned to: ");
1088 print_generic_expr (dump_file, DR_ALIGNED_TO (dr), TDF_SLIM);
1089 fprintf (dump_file, "\n\tbase_object: ");
1090 print_generic_expr (dump_file, DR_BASE_OBJECT (dr), TDF_SLIM);
1091 fprintf (dump_file, "\n");
1092 for (i = 0; i < DR_NUM_DIMENSIONS (dr); i++)
1093 {
1094 fprintf (dump_file, "\tAccess function %d: ", i);
1095 print_generic_stmt (dump_file, DR_ACCESS_FN (dr, i), TDF_SLIM);
1096 }
1097 }
1098
1099 return dr;
1100 }
1101
1102 /* Check if OFFSET1 and OFFSET2 (DR_OFFSETs of some data-refs) are identical
1103 expressions. */
1104 static bool
1105 dr_equal_offsets_p1 (tree offset1, tree offset2)
1106 {
1107 bool res;
1108
1109 STRIP_NOPS (offset1);
1110 STRIP_NOPS (offset2);
1111
1112 if (offset1 == offset2)
1113 return true;
1114
1115 if (TREE_CODE (offset1) != TREE_CODE (offset2)
1116 || (!BINARY_CLASS_P (offset1) && !UNARY_CLASS_P (offset1)))
1117 return false;
1118
1119 res = dr_equal_offsets_p1 (TREE_OPERAND (offset1, 0),
1120 TREE_OPERAND (offset2, 0));
1121
1122 if (!res || !BINARY_CLASS_P (offset1))
1123 return res;
1124
1125 res = dr_equal_offsets_p1 (TREE_OPERAND (offset1, 1),
1126 TREE_OPERAND (offset2, 1));
1127
1128 return res;
1129 }
1130
1131 /* Check if DRA and DRB have equal offsets. */
1132 bool
1133 dr_equal_offsets_p (struct data_reference *dra,
1134 struct data_reference *drb)
1135 {
1136 tree offset1, offset2;
1137
1138 offset1 = DR_OFFSET (dra);
1139 offset2 = DR_OFFSET (drb);
1140
1141 return dr_equal_offsets_p1 (offset1, offset2);
1142 }
1143
1144 /* Returns true if FNA == FNB. */
1145
1146 static bool
1147 affine_function_equal_p (affine_fn fna, affine_fn fnb)
1148 {
1149 unsigned i, n = fna.length ();
1150
1151 if (n != fnb.length ())
1152 return false;
1153
1154 for (i = 0; i < n; i++)
1155 if (!operand_equal_p (fna[i], fnb[i], 0))
1156 return false;
1157
1158 return true;
1159 }
1160
1161 /* If all the functions in CF are the same, returns one of them,
1162 otherwise returns NULL. */
1163
1164 static affine_fn
1165 common_affine_function (conflict_function *cf)
1166 {
1167 unsigned i;
1168 affine_fn comm;
1169
1170 if (!CF_NONTRIVIAL_P (cf))
1171 return affine_fn ();
1172
1173 comm = cf->fns[0];
1174
1175 for (i = 1; i < cf->n; i++)
1176 if (!affine_function_equal_p (comm, cf->fns[i]))
1177 return affine_fn ();
1178
1179 return comm;
1180 }
1181
1182 /* Returns the base of the affine function FN. */
1183
1184 static tree
1185 affine_function_base (affine_fn fn)
1186 {
1187 return fn[0];
1188 }
1189
1190 /* Returns true if FN is a constant. */
1191
1192 static bool
1193 affine_function_constant_p (affine_fn fn)
1194 {
1195 unsigned i;
1196 tree coef;
1197
1198 for (i = 1; fn.iterate (i, &coef); i++)
1199 if (!integer_zerop (coef))
1200 return false;
1201
1202 return true;
1203 }
1204
1205 /* Returns true if FN is the zero constant function. */
1206
1207 static bool
1208 affine_function_zero_p (affine_fn fn)
1209 {
1210 return (integer_zerop (affine_function_base (fn))
1211 && affine_function_constant_p (fn));
1212 }
1213
1214 /* Returns a signed integer type with the largest precision from TA
1215 and TB. */
1216
1217 static tree
1218 signed_type_for_types (tree ta, tree tb)
1219 {
1220 if (TYPE_PRECISION (ta) > TYPE_PRECISION (tb))
1221 return signed_type_for (ta);
1222 else
1223 return signed_type_for (tb);
1224 }
1225
1226 /* Applies operation OP on affine functions FNA and FNB, and returns the
1227 result. */
1228
1229 static affine_fn
1230 affine_fn_op (enum tree_code op, affine_fn fna, affine_fn fnb)
1231 {
1232 unsigned i, n, m;
1233 affine_fn ret;
1234 tree coef;
1235
1236 if (fnb.length () > fna.length ())
1237 {
1238 n = fna.length ();
1239 m = fnb.length ();
1240 }
1241 else
1242 {
1243 n = fnb.length ();
1244 m = fna.length ();
1245 }
1246
1247 ret.create (m);
1248 for (i = 0; i < n; i++)
1249 {
1250 tree type = signed_type_for_types (TREE_TYPE (fna[i]),
1251 TREE_TYPE (fnb[i]));
1252 ret.quick_push (fold_build2 (op, type, fna[i], fnb[i]));
1253 }
1254
1255 for (; fna.iterate (i, &coef); i++)
1256 ret.quick_push (fold_build2 (op, signed_type_for (TREE_TYPE (coef)),
1257 coef, integer_zero_node));
1258 for (; fnb.iterate (i, &coef); i++)
1259 ret.quick_push (fold_build2 (op, signed_type_for (TREE_TYPE (coef)),
1260 integer_zero_node, coef));
1261
1262 return ret;
1263 }
1264
1265 /* Returns the sum of affine functions FNA and FNB. */
1266
1267 static affine_fn
1268 affine_fn_plus (affine_fn fna, affine_fn fnb)
1269 {
1270 return affine_fn_op (PLUS_EXPR, fna, fnb);
1271 }
1272
1273 /* Returns the difference of affine functions FNA and FNB. */
1274
1275 static affine_fn
1276 affine_fn_minus (affine_fn fna, affine_fn fnb)
1277 {
1278 return affine_fn_op (MINUS_EXPR, fna, fnb);
1279 }
1280
1281 /* Frees affine function FN. */
1282
1283 static void
1284 affine_fn_free (affine_fn fn)
1285 {
1286 fn.release ();
1287 }
1288
1289 /* Determine for each subscript in the data dependence relation DDR
1290 the distance. */
1291
1292 static void
1293 compute_subscript_distance (struct data_dependence_relation *ddr)
1294 {
1295 conflict_function *cf_a, *cf_b;
1296 affine_fn fn_a, fn_b, diff;
1297
1298 if (DDR_ARE_DEPENDENT (ddr) == NULL_TREE)
1299 {
1300 unsigned int i;
1301
1302 for (i = 0; i < DDR_NUM_SUBSCRIPTS (ddr); i++)
1303 {
1304 struct subscript *subscript;
1305
1306 subscript = DDR_SUBSCRIPT (ddr, i);
1307 cf_a = SUB_CONFLICTS_IN_A (subscript);
1308 cf_b = SUB_CONFLICTS_IN_B (subscript);
1309
1310 fn_a = common_affine_function (cf_a);
1311 fn_b = common_affine_function (cf_b);
1312 if (!fn_a.exists () || !fn_b.exists ())
1313 {
1314 SUB_DISTANCE (subscript) = chrec_dont_know;
1315 return;
1316 }
1317 diff = affine_fn_minus (fn_a, fn_b);
1318
1319 if (affine_function_constant_p (diff))
1320 SUB_DISTANCE (subscript) = affine_function_base (diff);
1321 else
1322 SUB_DISTANCE (subscript) = chrec_dont_know;
1323
1324 affine_fn_free (diff);
1325 }
1326 }
1327 }
1328
1329 /* Returns the conflict function for "unknown". */
1330
1331 static conflict_function *
1332 conflict_fn_not_known (void)
1333 {
1334 conflict_function *fn = XCNEW (conflict_function);
1335 fn->n = NOT_KNOWN;
1336
1337 return fn;
1338 }
1339
1340 /* Returns the conflict function for "independent". */
1341
1342 static conflict_function *
1343 conflict_fn_no_dependence (void)
1344 {
1345 conflict_function *fn = XCNEW (conflict_function);
1346 fn->n = NO_DEPENDENCE;
1347
1348 return fn;
1349 }
1350
1351 /* Returns true if the address of OBJ is invariant in LOOP. */
1352
1353 static bool
1354 object_address_invariant_in_loop_p (const struct loop *loop, const_tree obj)
1355 {
1356 while (handled_component_p (obj))
1357 {
1358 if (TREE_CODE (obj) == ARRAY_REF)
1359 {
1360 /* Index of the ARRAY_REF was zeroed in analyze_indices, thus we only
1361 need to check the stride and the lower bound of the reference. */
1362 if (chrec_contains_symbols_defined_in_loop (TREE_OPERAND (obj, 2),
1363 loop->num)
1364 || chrec_contains_symbols_defined_in_loop (TREE_OPERAND (obj, 3),
1365 loop->num))
1366 return false;
1367 }
1368 else if (TREE_CODE (obj) == COMPONENT_REF)
1369 {
1370 if (chrec_contains_symbols_defined_in_loop (TREE_OPERAND (obj, 2),
1371 loop->num))
1372 return false;
1373 }
1374 obj = TREE_OPERAND (obj, 0);
1375 }
1376
1377 if (!INDIRECT_REF_P (obj)
1378 && TREE_CODE (obj) != MEM_REF)
1379 return true;
1380
1381 return !chrec_contains_symbols_defined_in_loop (TREE_OPERAND (obj, 0),
1382 loop->num);
1383 }
1384
1385 /* Returns false if we can prove that data references A and B do not alias,
1386 true otherwise. If LOOP_NEST is false no cross-iteration aliases are
1387 considered. */
1388
1389 bool
1390 dr_may_alias_p (const struct data_reference *a, const struct data_reference *b,
1391 bool loop_nest)
1392 {
1393 tree addr_a = DR_BASE_OBJECT (a);
1394 tree addr_b = DR_BASE_OBJECT (b);
1395
1396 /* If we are not processing a loop nest but scalar code we
1397 do not need to care about possible cross-iteration dependences
1398 and thus can process the full original reference. Do so,
1399 similar to how loop invariant motion applies extra offset-based
1400 disambiguation. */
1401 if (!loop_nest)
1402 {
1403 aff_tree off1, off2;
1404 widest_int size1, size2;
1405 get_inner_reference_aff (DR_REF (a), &off1, &size1);
1406 get_inner_reference_aff (DR_REF (b), &off2, &size2);
1407 aff_combination_scale (&off1, -1);
1408 aff_combination_add (&off2, &off1);
1409 if (aff_comb_cannot_overlap_p (&off2, size1, size2))
1410 return false;
1411 }
1412
1413 if ((TREE_CODE (addr_a) == MEM_REF || TREE_CODE (addr_a) == TARGET_MEM_REF)
1414 && (TREE_CODE (addr_b) == MEM_REF || TREE_CODE (addr_b) == TARGET_MEM_REF)
1415 && MR_DEPENDENCE_CLIQUE (addr_a) == MR_DEPENDENCE_CLIQUE (addr_b)
1416 && MR_DEPENDENCE_BASE (addr_a) != MR_DEPENDENCE_BASE (addr_b))
1417 return false;
1418
1419 /* If we had an evolution in a pointer-based MEM_REF BASE_OBJECT we
1420 do not know the size of the base-object. So we cannot do any
1421 offset/overlap based analysis but have to rely on points-to
1422 information only. */
1423 if (TREE_CODE (addr_a) == MEM_REF
1424 && (DR_UNCONSTRAINED_BASE (a)
1425 || TREE_CODE (TREE_OPERAND (addr_a, 0)) == SSA_NAME))
1426 {
1427 /* For true dependences we can apply TBAA. */
1428 if (flag_strict_aliasing
1429 && DR_IS_WRITE (a) && DR_IS_READ (b)
1430 && !alias_sets_conflict_p (get_alias_set (DR_REF (a)),
1431 get_alias_set (DR_REF (b))))
1432 return false;
1433 if (TREE_CODE (addr_b) == MEM_REF)
1434 return ptr_derefs_may_alias_p (TREE_OPERAND (addr_a, 0),
1435 TREE_OPERAND (addr_b, 0));
1436 else
1437 return ptr_derefs_may_alias_p (TREE_OPERAND (addr_a, 0),
1438 build_fold_addr_expr (addr_b));
1439 }
1440 else if (TREE_CODE (addr_b) == MEM_REF
1441 && (DR_UNCONSTRAINED_BASE (b)
1442 || TREE_CODE (TREE_OPERAND (addr_b, 0)) == SSA_NAME))
1443 {
1444 /* For true dependences we can apply TBAA. */
1445 if (flag_strict_aliasing
1446 && DR_IS_WRITE (a) && DR_IS_READ (b)
1447 && !alias_sets_conflict_p (get_alias_set (DR_REF (a)),
1448 get_alias_set (DR_REF (b))))
1449 return false;
1450 if (TREE_CODE (addr_a) == MEM_REF)
1451 return ptr_derefs_may_alias_p (TREE_OPERAND (addr_a, 0),
1452 TREE_OPERAND (addr_b, 0));
1453 else
1454 return ptr_derefs_may_alias_p (build_fold_addr_expr (addr_a),
1455 TREE_OPERAND (addr_b, 0));
1456 }
1457
1458 /* Otherwise DR_BASE_OBJECT is an access that covers the whole object
1459 that is being subsetted in the loop nest. */
1460 if (DR_IS_WRITE (a) && DR_IS_WRITE (b))
1461 return refs_output_dependent_p (addr_a, addr_b);
1462 else if (DR_IS_READ (a) && DR_IS_WRITE (b))
1463 return refs_anti_dependent_p (addr_a, addr_b);
1464 return refs_may_alias_p (addr_a, addr_b);
1465 }
1466
1467 /* Initialize a data dependence relation between data accesses A and
1468 B. NB_LOOPS is the number of loops surrounding the references: the
1469 size of the classic distance/direction vectors. */
1470
1471 struct data_dependence_relation *
1472 initialize_data_dependence_relation (struct data_reference *a,
1473 struct data_reference *b,
1474 vec<loop_p> loop_nest)
1475 {
1476 struct data_dependence_relation *res;
1477 unsigned int i;
1478
1479 res = XNEW (struct data_dependence_relation);
1480 DDR_A (res) = a;
1481 DDR_B (res) = b;
1482 DDR_LOOP_NEST (res).create (0);
1483 DDR_REVERSED_P (res) = false;
1484 DDR_SUBSCRIPTS (res).create (0);
1485 DDR_DIR_VECTS (res).create (0);
1486 DDR_DIST_VECTS (res).create (0);
1487
1488 if (a == NULL || b == NULL)
1489 {
1490 DDR_ARE_DEPENDENT (res) = chrec_dont_know;
1491 return res;
1492 }
1493
1494 /* If the data references do not alias, then they are independent. */
1495 if (!dr_may_alias_p (a, b, loop_nest.exists ()))
1496 {
1497 DDR_ARE_DEPENDENT (res) = chrec_known;
1498 return res;
1499 }
1500
1501 /* The case where the references are exactly the same. */
1502 if (operand_equal_p (DR_REF (a), DR_REF (b), 0))
1503 {
1504 if (loop_nest.exists ()
1505 && !object_address_invariant_in_loop_p (loop_nest[0],
1506 DR_BASE_OBJECT (a)))
1507 {
1508 DDR_ARE_DEPENDENT (res) = chrec_dont_know;
1509 return res;
1510 }
1511 DDR_AFFINE_P (res) = true;
1512 DDR_ARE_DEPENDENT (res) = NULL_TREE;
1513 DDR_SUBSCRIPTS (res).create (DR_NUM_DIMENSIONS (a));
1514 DDR_LOOP_NEST (res) = loop_nest;
1515 DDR_INNER_LOOP (res) = 0;
1516 DDR_SELF_REFERENCE (res) = true;
1517 for (i = 0; i < DR_NUM_DIMENSIONS (a); i++)
1518 {
1519 struct subscript *subscript;
1520
1521 subscript = XNEW (struct subscript);
1522 SUB_CONFLICTS_IN_A (subscript) = conflict_fn_not_known ();
1523 SUB_CONFLICTS_IN_B (subscript) = conflict_fn_not_known ();
1524 SUB_LAST_CONFLICT (subscript) = chrec_dont_know;
1525 SUB_DISTANCE (subscript) = chrec_dont_know;
1526 DDR_SUBSCRIPTS (res).safe_push (subscript);
1527 }
1528 return res;
1529 }
1530
1531 /* If the references do not access the same object, we do not know
1532 whether they alias or not. */
1533 if (!operand_equal_p (DR_BASE_OBJECT (a), DR_BASE_OBJECT (b), 0))
1534 {
1535 DDR_ARE_DEPENDENT (res) = chrec_dont_know;
1536 return res;
1537 }
1538
1539 /* If the base of the object is not invariant in the loop nest, we cannot
1540 analyze it. TODO -- in fact, it would suffice to record that there may
1541 be arbitrary dependences in the loops where the base object varies. */
1542 if (loop_nest.exists ()
1543 && !object_address_invariant_in_loop_p (loop_nest[0],
1544 DR_BASE_OBJECT (a)))
1545 {
1546 DDR_ARE_DEPENDENT (res) = chrec_dont_know;
1547 return res;
1548 }
1549
1550 /* If the number of dimensions of the access to not agree we can have
1551 a pointer access to a component of the array element type and an
1552 array access while the base-objects are still the same. Punt. */
1553 if (DR_NUM_DIMENSIONS (a) != DR_NUM_DIMENSIONS (b))
1554 {
1555 DDR_ARE_DEPENDENT (res) = chrec_dont_know;
1556 return res;
1557 }
1558
1559 DDR_AFFINE_P (res) = true;
1560 DDR_ARE_DEPENDENT (res) = NULL_TREE;
1561 DDR_SUBSCRIPTS (res).create (DR_NUM_DIMENSIONS (a));
1562 DDR_LOOP_NEST (res) = loop_nest;
1563 DDR_INNER_LOOP (res) = 0;
1564 DDR_SELF_REFERENCE (res) = false;
1565
1566 for (i = 0; i < DR_NUM_DIMENSIONS (a); i++)
1567 {
1568 struct subscript *subscript;
1569
1570 subscript = XNEW (struct subscript);
1571 SUB_CONFLICTS_IN_A (subscript) = conflict_fn_not_known ();
1572 SUB_CONFLICTS_IN_B (subscript) = conflict_fn_not_known ();
1573 SUB_LAST_CONFLICT (subscript) = chrec_dont_know;
1574 SUB_DISTANCE (subscript) = chrec_dont_know;
1575 DDR_SUBSCRIPTS (res).safe_push (subscript);
1576 }
1577
1578 return res;
1579 }
1580
1581 /* Frees memory used by the conflict function F. */
1582
1583 static void
1584 free_conflict_function (conflict_function *f)
1585 {
1586 unsigned i;
1587
1588 if (CF_NONTRIVIAL_P (f))
1589 {
1590 for (i = 0; i < f->n; i++)
1591 affine_fn_free (f->fns[i]);
1592 }
1593 free (f);
1594 }
1595
1596 /* Frees memory used by SUBSCRIPTS. */
1597
1598 static void
1599 free_subscripts (vec<subscript_p> subscripts)
1600 {
1601 unsigned i;
1602 subscript_p s;
1603
1604 FOR_EACH_VEC_ELT (subscripts, i, s)
1605 {
1606 free_conflict_function (s->conflicting_iterations_in_a);
1607 free_conflict_function (s->conflicting_iterations_in_b);
1608 free (s);
1609 }
1610 subscripts.release ();
1611 }
1612
1613 /* Set DDR_ARE_DEPENDENT to CHREC and finalize the subscript overlap
1614 description. */
1615
1616 static inline void
1617 finalize_ddr_dependent (struct data_dependence_relation *ddr,
1618 tree chrec)
1619 {
1620 DDR_ARE_DEPENDENT (ddr) = chrec;
1621 free_subscripts (DDR_SUBSCRIPTS (ddr));
1622 DDR_SUBSCRIPTS (ddr).create (0);
1623 }
1624
1625 /* The dependence relation DDR cannot be represented by a distance
1626 vector. */
1627
1628 static inline void
1629 non_affine_dependence_relation (struct data_dependence_relation *ddr)
1630 {
1631 if (dump_file && (dump_flags & TDF_DETAILS))
1632 fprintf (dump_file, "(Dependence relation cannot be represented by distance vector.) \n");
1633
1634 DDR_AFFINE_P (ddr) = false;
1635 }
1636
1637 \f
1638
1639 /* This section contains the classic Banerjee tests. */
1640
1641 /* Returns true iff CHREC_A and CHREC_B are not dependent on any index
1642 variables, i.e., if the ZIV (Zero Index Variable) test is true. */
1643
1644 static inline bool
1645 ziv_subscript_p (const_tree chrec_a, const_tree chrec_b)
1646 {
1647 return (evolution_function_is_constant_p (chrec_a)
1648 && evolution_function_is_constant_p (chrec_b));
1649 }
1650
1651 /* Returns true iff CHREC_A and CHREC_B are dependent on an index
1652 variable, i.e., if the SIV (Single Index Variable) test is true. */
1653
1654 static bool
1655 siv_subscript_p (const_tree chrec_a, const_tree chrec_b)
1656 {
1657 if ((evolution_function_is_constant_p (chrec_a)
1658 && evolution_function_is_univariate_p (chrec_b))
1659 || (evolution_function_is_constant_p (chrec_b)
1660 && evolution_function_is_univariate_p (chrec_a)))
1661 return true;
1662
1663 if (evolution_function_is_univariate_p (chrec_a)
1664 && evolution_function_is_univariate_p (chrec_b))
1665 {
1666 switch (TREE_CODE (chrec_a))
1667 {
1668 case POLYNOMIAL_CHREC:
1669 switch (TREE_CODE (chrec_b))
1670 {
1671 case POLYNOMIAL_CHREC:
1672 if (CHREC_VARIABLE (chrec_a) != CHREC_VARIABLE (chrec_b))
1673 return false;
1674
1675 default:
1676 return true;
1677 }
1678
1679 default:
1680 return true;
1681 }
1682 }
1683
1684 return false;
1685 }
1686
1687 /* Creates a conflict function with N dimensions. The affine functions
1688 in each dimension follow. */
1689
1690 static conflict_function *
1691 conflict_fn (unsigned n, ...)
1692 {
1693 unsigned i;
1694 conflict_function *ret = XCNEW (conflict_function);
1695 va_list ap;
1696
1697 gcc_assert (0 < n && n <= MAX_DIM);
1698 va_start (ap, n);
1699
1700 ret->n = n;
1701 for (i = 0; i < n; i++)
1702 ret->fns[i] = va_arg (ap, affine_fn);
1703 va_end (ap);
1704
1705 return ret;
1706 }
1707
1708 /* Returns constant affine function with value CST. */
1709
1710 static affine_fn
1711 affine_fn_cst (tree cst)
1712 {
1713 affine_fn fn;
1714 fn.create (1);
1715 fn.quick_push (cst);
1716 return fn;
1717 }
1718
1719 /* Returns affine function with single variable, CST + COEF * x_DIM. */
1720
1721 static affine_fn
1722 affine_fn_univar (tree cst, unsigned dim, tree coef)
1723 {
1724 affine_fn fn;
1725 fn.create (dim + 1);
1726 unsigned i;
1727
1728 gcc_assert (dim > 0);
1729 fn.quick_push (cst);
1730 for (i = 1; i < dim; i++)
1731 fn.quick_push (integer_zero_node);
1732 fn.quick_push (coef);
1733 return fn;
1734 }
1735
1736 /* Analyze a ZIV (Zero Index Variable) subscript. *OVERLAPS_A and
1737 *OVERLAPS_B are initialized to the functions that describe the
1738 relation between the elements accessed twice by CHREC_A and
1739 CHREC_B. For k >= 0, the following property is verified:
1740
1741 CHREC_A (*OVERLAPS_A (k)) = CHREC_B (*OVERLAPS_B (k)). */
1742
1743 static void
1744 analyze_ziv_subscript (tree chrec_a,
1745 tree chrec_b,
1746 conflict_function **overlaps_a,
1747 conflict_function **overlaps_b,
1748 tree *last_conflicts)
1749 {
1750 tree type, difference;
1751 dependence_stats.num_ziv++;
1752
1753 if (dump_file && (dump_flags & TDF_DETAILS))
1754 fprintf (dump_file, "(analyze_ziv_subscript \n");
1755
1756 type = signed_type_for_types (TREE_TYPE (chrec_a), TREE_TYPE (chrec_b));
1757 chrec_a = chrec_convert (type, chrec_a, NULL);
1758 chrec_b = chrec_convert (type, chrec_b, NULL);
1759 difference = chrec_fold_minus (type, chrec_a, chrec_b);
1760
1761 switch (TREE_CODE (difference))
1762 {
1763 case INTEGER_CST:
1764 if (integer_zerop (difference))
1765 {
1766 /* The difference is equal to zero: the accessed index
1767 overlaps for each iteration in the loop. */
1768 *overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node));
1769 *overlaps_b = conflict_fn (1, affine_fn_cst (integer_zero_node));
1770 *last_conflicts = chrec_dont_know;
1771 dependence_stats.num_ziv_dependent++;
1772 }
1773 else
1774 {
1775 /* The accesses do not overlap. */
1776 *overlaps_a = conflict_fn_no_dependence ();
1777 *overlaps_b = conflict_fn_no_dependence ();
1778 *last_conflicts = integer_zero_node;
1779 dependence_stats.num_ziv_independent++;
1780 }
1781 break;
1782
1783 default:
1784 /* We're not sure whether the indexes overlap. For the moment,
1785 conservatively answer "don't know". */
1786 if (dump_file && (dump_flags & TDF_DETAILS))
1787 fprintf (dump_file, "ziv test failed: difference is non-integer.\n");
1788
1789 *overlaps_a = conflict_fn_not_known ();
1790 *overlaps_b = conflict_fn_not_known ();
1791 *last_conflicts = chrec_dont_know;
1792 dependence_stats.num_ziv_unimplemented++;
1793 break;
1794 }
1795
1796 if (dump_file && (dump_flags & TDF_DETAILS))
1797 fprintf (dump_file, ")\n");
1798 }
1799
1800 /* Similar to max_stmt_executions_int, but returns the bound as a tree,
1801 and only if it fits to the int type. If this is not the case, or the
1802 bound on the number of iterations of LOOP could not be derived, returns
1803 chrec_dont_know. */
1804
1805 static tree
1806 max_stmt_executions_tree (struct loop *loop)
1807 {
1808 widest_int nit;
1809
1810 if (!max_stmt_executions (loop, &nit))
1811 return chrec_dont_know;
1812
1813 if (!wi::fits_to_tree_p (nit, unsigned_type_node))
1814 return chrec_dont_know;
1815
1816 return wide_int_to_tree (unsigned_type_node, nit);
1817 }
1818
1819 /* Determine whether the CHREC is always positive/negative. If the expression
1820 cannot be statically analyzed, return false, otherwise set the answer into
1821 VALUE. */
1822
1823 static bool
1824 chrec_is_positive (tree chrec, bool *value)
1825 {
1826 bool value0, value1, value2;
1827 tree end_value, nb_iter;
1828
1829 switch (TREE_CODE (chrec))
1830 {
1831 case POLYNOMIAL_CHREC:
1832 if (!chrec_is_positive (CHREC_LEFT (chrec), &value0)
1833 || !chrec_is_positive (CHREC_RIGHT (chrec), &value1))
1834 return false;
1835
1836 /* FIXME -- overflows. */
1837 if (value0 == value1)
1838 {
1839 *value = value0;
1840 return true;
1841 }
1842
1843 /* Otherwise the chrec is under the form: "{-197, +, 2}_1",
1844 and the proof consists in showing that the sign never
1845 changes during the execution of the loop, from 0 to
1846 loop->nb_iterations. */
1847 if (!evolution_function_is_affine_p (chrec))
1848 return false;
1849
1850 nb_iter = number_of_latch_executions (get_chrec_loop (chrec));
1851 if (chrec_contains_undetermined (nb_iter))
1852 return false;
1853
1854 #if 0
1855 /* TODO -- If the test is after the exit, we may decrease the number of
1856 iterations by one. */
1857 if (after_exit)
1858 nb_iter = chrec_fold_minus (type, nb_iter, build_int_cst (type, 1));
1859 #endif
1860
1861 end_value = chrec_apply (CHREC_VARIABLE (chrec), chrec, nb_iter);
1862
1863 if (!chrec_is_positive (end_value, &value2))
1864 return false;
1865
1866 *value = value0;
1867 return value0 == value1;
1868
1869 case INTEGER_CST:
1870 switch (tree_int_cst_sgn (chrec))
1871 {
1872 case -1:
1873 *value = false;
1874 break;
1875 case 1:
1876 *value = true;
1877 break;
1878 default:
1879 return false;
1880 }
1881 return true;
1882
1883 default:
1884 return false;
1885 }
1886 }
1887
1888
1889 /* Analyze a SIV (Single Index Variable) subscript where CHREC_A is a
1890 constant, and CHREC_B is an affine function. *OVERLAPS_A and
1891 *OVERLAPS_B are initialized to the functions that describe the
1892 relation between the elements accessed twice by CHREC_A and
1893 CHREC_B. For k >= 0, the following property is verified:
1894
1895 CHREC_A (*OVERLAPS_A (k)) = CHREC_B (*OVERLAPS_B (k)). */
1896
1897 static void
1898 analyze_siv_subscript_cst_affine (tree chrec_a,
1899 tree chrec_b,
1900 conflict_function **overlaps_a,
1901 conflict_function **overlaps_b,
1902 tree *last_conflicts)
1903 {
1904 bool value0, value1, value2;
1905 tree type, difference, tmp;
1906
1907 type = signed_type_for_types (TREE_TYPE (chrec_a), TREE_TYPE (chrec_b));
1908 chrec_a = chrec_convert (type, chrec_a, NULL);
1909 chrec_b = chrec_convert (type, chrec_b, NULL);
1910 difference = chrec_fold_minus (type, initial_condition (chrec_b), chrec_a);
1911
1912 /* Special case overlap in the first iteration. */
1913 if (integer_zerop (difference))
1914 {
1915 *overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node));
1916 *overlaps_b = conflict_fn (1, affine_fn_cst (integer_zero_node));
1917 *last_conflicts = integer_one_node;
1918 return;
1919 }
1920
1921 if (!chrec_is_positive (initial_condition (difference), &value0))
1922 {
1923 if (dump_file && (dump_flags & TDF_DETAILS))
1924 fprintf (dump_file, "siv test failed: chrec is not positive.\n");
1925
1926 dependence_stats.num_siv_unimplemented++;
1927 *overlaps_a = conflict_fn_not_known ();
1928 *overlaps_b = conflict_fn_not_known ();
1929 *last_conflicts = chrec_dont_know;
1930 return;
1931 }
1932 else
1933 {
1934 if (value0 == false)
1935 {
1936 if (!chrec_is_positive (CHREC_RIGHT (chrec_b), &value1))
1937 {
1938 if (dump_file && (dump_flags & TDF_DETAILS))
1939 fprintf (dump_file, "siv test failed: chrec not positive.\n");
1940
1941 *overlaps_a = conflict_fn_not_known ();
1942 *overlaps_b = conflict_fn_not_known ();
1943 *last_conflicts = chrec_dont_know;
1944 dependence_stats.num_siv_unimplemented++;
1945 return;
1946 }
1947 else
1948 {
1949 if (value1 == true)
1950 {
1951 /* Example:
1952 chrec_a = 12
1953 chrec_b = {10, +, 1}
1954 */
1955
1956 if (tree_fold_divides_p (CHREC_RIGHT (chrec_b), difference))
1957 {
1958 HOST_WIDE_INT numiter;
1959 struct loop *loop = get_chrec_loop (chrec_b);
1960
1961 *overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node));
1962 tmp = fold_build2 (EXACT_DIV_EXPR, type,
1963 fold_build1 (ABS_EXPR, type, difference),
1964 CHREC_RIGHT (chrec_b));
1965 *overlaps_b = conflict_fn (1, affine_fn_cst (tmp));
1966 *last_conflicts = integer_one_node;
1967
1968
1969 /* Perform weak-zero siv test to see if overlap is
1970 outside the loop bounds. */
1971 numiter = max_stmt_executions_int (loop);
1972
1973 if (numiter >= 0
1974 && compare_tree_int (tmp, numiter) > 0)
1975 {
1976 free_conflict_function (*overlaps_a);
1977 free_conflict_function (*overlaps_b);
1978 *overlaps_a = conflict_fn_no_dependence ();
1979 *overlaps_b = conflict_fn_no_dependence ();
1980 *last_conflicts = integer_zero_node;
1981 dependence_stats.num_siv_independent++;
1982 return;
1983 }
1984 dependence_stats.num_siv_dependent++;
1985 return;
1986 }
1987
1988 /* When the step does not divide the difference, there are
1989 no overlaps. */
1990 else
1991 {
1992 *overlaps_a = conflict_fn_no_dependence ();
1993 *overlaps_b = conflict_fn_no_dependence ();
1994 *last_conflicts = integer_zero_node;
1995 dependence_stats.num_siv_independent++;
1996 return;
1997 }
1998 }
1999
2000 else
2001 {
2002 /* Example:
2003 chrec_a = 12
2004 chrec_b = {10, +, -1}
2005
2006 In this case, chrec_a will not overlap with chrec_b. */
2007 *overlaps_a = conflict_fn_no_dependence ();
2008 *overlaps_b = conflict_fn_no_dependence ();
2009 *last_conflicts = integer_zero_node;
2010 dependence_stats.num_siv_independent++;
2011 return;
2012 }
2013 }
2014 }
2015 else
2016 {
2017 if (!chrec_is_positive (CHREC_RIGHT (chrec_b), &value2))
2018 {
2019 if (dump_file && (dump_flags & TDF_DETAILS))
2020 fprintf (dump_file, "siv test failed: chrec not positive.\n");
2021
2022 *overlaps_a = conflict_fn_not_known ();
2023 *overlaps_b = conflict_fn_not_known ();
2024 *last_conflicts = chrec_dont_know;
2025 dependence_stats.num_siv_unimplemented++;
2026 return;
2027 }
2028 else
2029 {
2030 if (value2 == false)
2031 {
2032 /* Example:
2033 chrec_a = 3
2034 chrec_b = {10, +, -1}
2035 */
2036 if (tree_fold_divides_p (CHREC_RIGHT (chrec_b), difference))
2037 {
2038 HOST_WIDE_INT numiter;
2039 struct loop *loop = get_chrec_loop (chrec_b);
2040
2041 *overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node));
2042 tmp = fold_build2 (EXACT_DIV_EXPR, type, difference,
2043 CHREC_RIGHT (chrec_b));
2044 *overlaps_b = conflict_fn (1, affine_fn_cst (tmp));
2045 *last_conflicts = integer_one_node;
2046
2047 /* Perform weak-zero siv test to see if overlap is
2048 outside the loop bounds. */
2049 numiter = max_stmt_executions_int (loop);
2050
2051 if (numiter >= 0
2052 && compare_tree_int (tmp, numiter) > 0)
2053 {
2054 free_conflict_function (*overlaps_a);
2055 free_conflict_function (*overlaps_b);
2056 *overlaps_a = conflict_fn_no_dependence ();
2057 *overlaps_b = conflict_fn_no_dependence ();
2058 *last_conflicts = integer_zero_node;
2059 dependence_stats.num_siv_independent++;
2060 return;
2061 }
2062 dependence_stats.num_siv_dependent++;
2063 return;
2064 }
2065
2066 /* When the step does not divide the difference, there
2067 are no overlaps. */
2068 else
2069 {
2070 *overlaps_a = conflict_fn_no_dependence ();
2071 *overlaps_b = conflict_fn_no_dependence ();
2072 *last_conflicts = integer_zero_node;
2073 dependence_stats.num_siv_independent++;
2074 return;
2075 }
2076 }
2077 else
2078 {
2079 /* Example:
2080 chrec_a = 3
2081 chrec_b = {4, +, 1}
2082
2083 In this case, chrec_a will not overlap with chrec_b. */
2084 *overlaps_a = conflict_fn_no_dependence ();
2085 *overlaps_b = conflict_fn_no_dependence ();
2086 *last_conflicts = integer_zero_node;
2087 dependence_stats.num_siv_independent++;
2088 return;
2089 }
2090 }
2091 }
2092 }
2093 }
2094
2095 /* Helper recursive function for initializing the matrix A. Returns
2096 the initial value of CHREC. */
2097
2098 static tree
2099 initialize_matrix_A (lambda_matrix A, tree chrec, unsigned index, int mult)
2100 {
2101 gcc_assert (chrec);
2102
2103 switch (TREE_CODE (chrec))
2104 {
2105 case POLYNOMIAL_CHREC:
2106 gcc_assert (TREE_CODE (CHREC_RIGHT (chrec)) == INTEGER_CST);
2107
2108 A[index][0] = mult * int_cst_value (CHREC_RIGHT (chrec));
2109 return initialize_matrix_A (A, CHREC_LEFT (chrec), index + 1, mult);
2110
2111 case PLUS_EXPR:
2112 case MULT_EXPR:
2113 case MINUS_EXPR:
2114 {
2115 tree op0 = initialize_matrix_A (A, TREE_OPERAND (chrec, 0), index, mult);
2116 tree op1 = initialize_matrix_A (A, TREE_OPERAND (chrec, 1), index, mult);
2117
2118 return chrec_fold_op (TREE_CODE (chrec), chrec_type (chrec), op0, op1);
2119 }
2120
2121 CASE_CONVERT:
2122 {
2123 tree op = initialize_matrix_A (A, TREE_OPERAND (chrec, 0), index, mult);
2124 return chrec_convert (chrec_type (chrec), op, NULL);
2125 }
2126
2127 case BIT_NOT_EXPR:
2128 {
2129 /* Handle ~X as -1 - X. */
2130 tree op = initialize_matrix_A (A, TREE_OPERAND (chrec, 0), index, mult);
2131 return chrec_fold_op (MINUS_EXPR, chrec_type (chrec),
2132 build_int_cst (TREE_TYPE (chrec), -1), op);
2133 }
2134
2135 case INTEGER_CST:
2136 return chrec;
2137
2138 default:
2139 gcc_unreachable ();
2140 return NULL_TREE;
2141 }
2142 }
2143
2144 #define FLOOR_DIV(x,y) ((x) / (y))
2145
2146 /* Solves the special case of the Diophantine equation:
2147 | {0, +, STEP_A}_x (OVERLAPS_A) = {0, +, STEP_B}_y (OVERLAPS_B)
2148
2149 Computes the descriptions OVERLAPS_A and OVERLAPS_B. NITER is the
2150 number of iterations that loops X and Y run. The overlaps will be
2151 constructed as evolutions in dimension DIM. */
2152
2153 static void
2154 compute_overlap_steps_for_affine_univar (int niter, int step_a, int step_b,
2155 affine_fn *overlaps_a,
2156 affine_fn *overlaps_b,
2157 tree *last_conflicts, int dim)
2158 {
2159 if (((step_a > 0 && step_b > 0)
2160 || (step_a < 0 && step_b < 0)))
2161 {
2162 int step_overlaps_a, step_overlaps_b;
2163 int gcd_steps_a_b, last_conflict, tau2;
2164
2165 gcd_steps_a_b = gcd (step_a, step_b);
2166 step_overlaps_a = step_b / gcd_steps_a_b;
2167 step_overlaps_b = step_a / gcd_steps_a_b;
2168
2169 if (niter > 0)
2170 {
2171 tau2 = FLOOR_DIV (niter, step_overlaps_a);
2172 tau2 = MIN (tau2, FLOOR_DIV (niter, step_overlaps_b));
2173 last_conflict = tau2;
2174 *last_conflicts = build_int_cst (NULL_TREE, last_conflict);
2175 }
2176 else
2177 *last_conflicts = chrec_dont_know;
2178
2179 *overlaps_a = affine_fn_univar (integer_zero_node, dim,
2180 build_int_cst (NULL_TREE,
2181 step_overlaps_a));
2182 *overlaps_b = affine_fn_univar (integer_zero_node, dim,
2183 build_int_cst (NULL_TREE,
2184 step_overlaps_b));
2185 }
2186
2187 else
2188 {
2189 *overlaps_a = affine_fn_cst (integer_zero_node);
2190 *overlaps_b = affine_fn_cst (integer_zero_node);
2191 *last_conflicts = integer_zero_node;
2192 }
2193 }
2194
2195 /* Solves the special case of a Diophantine equation where CHREC_A is
2196 an affine bivariate function, and CHREC_B is an affine univariate
2197 function. For example,
2198
2199 | {{0, +, 1}_x, +, 1335}_y = {0, +, 1336}_z
2200
2201 has the following overlapping functions:
2202
2203 | x (t, u, v) = {{0, +, 1336}_t, +, 1}_v
2204 | y (t, u, v) = {{0, +, 1336}_u, +, 1}_v
2205 | z (t, u, v) = {{{0, +, 1}_t, +, 1335}_u, +, 1}_v
2206
2207 FORNOW: This is a specialized implementation for a case occurring in
2208 a common benchmark. Implement the general algorithm. */
2209
2210 static void
2211 compute_overlap_steps_for_affine_1_2 (tree chrec_a, tree chrec_b,
2212 conflict_function **overlaps_a,
2213 conflict_function **overlaps_b,
2214 tree *last_conflicts)
2215 {
2216 bool xz_p, yz_p, xyz_p;
2217 int step_x, step_y, step_z;
2218 HOST_WIDE_INT niter_x, niter_y, niter_z, niter;
2219 affine_fn overlaps_a_xz, overlaps_b_xz;
2220 affine_fn overlaps_a_yz, overlaps_b_yz;
2221 affine_fn overlaps_a_xyz, overlaps_b_xyz;
2222 affine_fn ova1, ova2, ovb;
2223 tree last_conflicts_xz, last_conflicts_yz, last_conflicts_xyz;
2224
2225 step_x = int_cst_value (CHREC_RIGHT (CHREC_LEFT (chrec_a)));
2226 step_y = int_cst_value (CHREC_RIGHT (chrec_a));
2227 step_z = int_cst_value (CHREC_RIGHT (chrec_b));
2228
2229 niter_x = max_stmt_executions_int (get_chrec_loop (CHREC_LEFT (chrec_a)));
2230 niter_y = max_stmt_executions_int (get_chrec_loop (chrec_a));
2231 niter_z = max_stmt_executions_int (get_chrec_loop (chrec_b));
2232
2233 if (niter_x < 0 || niter_y < 0 || niter_z < 0)
2234 {
2235 if (dump_file && (dump_flags & TDF_DETAILS))
2236 fprintf (dump_file, "overlap steps test failed: no iteration counts.\n");
2237
2238 *overlaps_a = conflict_fn_not_known ();
2239 *overlaps_b = conflict_fn_not_known ();
2240 *last_conflicts = chrec_dont_know;
2241 return;
2242 }
2243
2244 niter = MIN (niter_x, niter_z);
2245 compute_overlap_steps_for_affine_univar (niter, step_x, step_z,
2246 &overlaps_a_xz,
2247 &overlaps_b_xz,
2248 &last_conflicts_xz, 1);
2249 niter = MIN (niter_y, niter_z);
2250 compute_overlap_steps_for_affine_univar (niter, step_y, step_z,
2251 &overlaps_a_yz,
2252 &overlaps_b_yz,
2253 &last_conflicts_yz, 2);
2254 niter = MIN (niter_x, niter_z);
2255 niter = MIN (niter_y, niter);
2256 compute_overlap_steps_for_affine_univar (niter, step_x + step_y, step_z,
2257 &overlaps_a_xyz,
2258 &overlaps_b_xyz,
2259 &last_conflicts_xyz, 3);
2260
2261 xz_p = !integer_zerop (last_conflicts_xz);
2262 yz_p = !integer_zerop (last_conflicts_yz);
2263 xyz_p = !integer_zerop (last_conflicts_xyz);
2264
2265 if (xz_p || yz_p || xyz_p)
2266 {
2267 ova1 = affine_fn_cst (integer_zero_node);
2268 ova2 = affine_fn_cst (integer_zero_node);
2269 ovb = affine_fn_cst (integer_zero_node);
2270 if (xz_p)
2271 {
2272 affine_fn t0 = ova1;
2273 affine_fn t2 = ovb;
2274
2275 ova1 = affine_fn_plus (ova1, overlaps_a_xz);
2276 ovb = affine_fn_plus (ovb, overlaps_b_xz);
2277 affine_fn_free (t0);
2278 affine_fn_free (t2);
2279 *last_conflicts = last_conflicts_xz;
2280 }
2281 if (yz_p)
2282 {
2283 affine_fn t0 = ova2;
2284 affine_fn t2 = ovb;
2285
2286 ova2 = affine_fn_plus (ova2, overlaps_a_yz);
2287 ovb = affine_fn_plus (ovb, overlaps_b_yz);
2288 affine_fn_free (t0);
2289 affine_fn_free (t2);
2290 *last_conflicts = last_conflicts_yz;
2291 }
2292 if (xyz_p)
2293 {
2294 affine_fn t0 = ova1;
2295 affine_fn t2 = ova2;
2296 affine_fn t4 = ovb;
2297
2298 ova1 = affine_fn_plus (ova1, overlaps_a_xyz);
2299 ova2 = affine_fn_plus (ova2, overlaps_a_xyz);
2300 ovb = affine_fn_plus (ovb, overlaps_b_xyz);
2301 affine_fn_free (t0);
2302 affine_fn_free (t2);
2303 affine_fn_free (t4);
2304 *last_conflicts = last_conflicts_xyz;
2305 }
2306 *overlaps_a = conflict_fn (2, ova1, ova2);
2307 *overlaps_b = conflict_fn (1, ovb);
2308 }
2309 else
2310 {
2311 *overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node));
2312 *overlaps_b = conflict_fn (1, affine_fn_cst (integer_zero_node));
2313 *last_conflicts = integer_zero_node;
2314 }
2315
2316 affine_fn_free (overlaps_a_xz);
2317 affine_fn_free (overlaps_b_xz);
2318 affine_fn_free (overlaps_a_yz);
2319 affine_fn_free (overlaps_b_yz);
2320 affine_fn_free (overlaps_a_xyz);
2321 affine_fn_free (overlaps_b_xyz);
2322 }
2323
2324 /* Copy the elements of vector VEC1 with length SIZE to VEC2. */
2325
2326 static void
2327 lambda_vector_copy (lambda_vector vec1, lambda_vector vec2,
2328 int size)
2329 {
2330 memcpy (vec2, vec1, size * sizeof (*vec1));
2331 }
2332
2333 /* Copy the elements of M x N matrix MAT1 to MAT2. */
2334
2335 static void
2336 lambda_matrix_copy (lambda_matrix mat1, lambda_matrix mat2,
2337 int m, int n)
2338 {
2339 int i;
2340
2341 for (i = 0; i < m; i++)
2342 lambda_vector_copy (mat1[i], mat2[i], n);
2343 }
2344
2345 /* Store the N x N identity matrix in MAT. */
2346
2347 static void
2348 lambda_matrix_id (lambda_matrix mat, int size)
2349 {
2350 int i, j;
2351
2352 for (i = 0; i < size; i++)
2353 for (j = 0; j < size; j++)
2354 mat[i][j] = (i == j) ? 1 : 0;
2355 }
2356
2357 /* Return the first nonzero element of vector VEC1 between START and N.
2358 We must have START <= N. Returns N if VEC1 is the zero vector. */
2359
2360 static int
2361 lambda_vector_first_nz (lambda_vector vec1, int n, int start)
2362 {
2363 int j = start;
2364 while (j < n && vec1[j] == 0)
2365 j++;
2366 return j;
2367 }
2368
2369 /* Add a multiple of row R1 of matrix MAT with N columns to row R2:
2370 R2 = R2 + CONST1 * R1. */
2371
2372 static void
2373 lambda_matrix_row_add (lambda_matrix mat, int n, int r1, int r2, int const1)
2374 {
2375 int i;
2376
2377 if (const1 == 0)
2378 return;
2379
2380 for (i = 0; i < n; i++)
2381 mat[r2][i] += const1 * mat[r1][i];
2382 }
2383
2384 /* Multiply vector VEC1 of length SIZE by a constant CONST1,
2385 and store the result in VEC2. */
2386
2387 static void
2388 lambda_vector_mult_const (lambda_vector vec1, lambda_vector vec2,
2389 int size, int const1)
2390 {
2391 int i;
2392
2393 if (const1 == 0)
2394 lambda_vector_clear (vec2, size);
2395 else
2396 for (i = 0; i < size; i++)
2397 vec2[i] = const1 * vec1[i];
2398 }
2399
2400 /* Negate vector VEC1 with length SIZE and store it in VEC2. */
2401
2402 static void
2403 lambda_vector_negate (lambda_vector vec1, lambda_vector vec2,
2404 int size)
2405 {
2406 lambda_vector_mult_const (vec1, vec2, size, -1);
2407 }
2408
2409 /* Negate row R1 of matrix MAT which has N columns. */
2410
2411 static void
2412 lambda_matrix_row_negate (lambda_matrix mat, int n, int r1)
2413 {
2414 lambda_vector_negate (mat[r1], mat[r1], n);
2415 }
2416
2417 /* Return true if two vectors are equal. */
2418
2419 static bool
2420 lambda_vector_equal (lambda_vector vec1, lambda_vector vec2, int size)
2421 {
2422 int i;
2423 for (i = 0; i < size; i++)
2424 if (vec1[i] != vec2[i])
2425 return false;
2426 return true;
2427 }
2428
2429 /* Given an M x N integer matrix A, this function determines an M x
2430 M unimodular matrix U, and an M x N echelon matrix S such that
2431 "U.A = S". This decomposition is also known as "right Hermite".
2432
2433 Ref: Algorithm 2.1 page 33 in "Loop Transformations for
2434 Restructuring Compilers" Utpal Banerjee. */
2435
2436 static void
2437 lambda_matrix_right_hermite (lambda_matrix A, int m, int n,
2438 lambda_matrix S, lambda_matrix U)
2439 {
2440 int i, j, i0 = 0;
2441
2442 lambda_matrix_copy (A, S, m, n);
2443 lambda_matrix_id (U, m);
2444
2445 for (j = 0; j < n; j++)
2446 {
2447 if (lambda_vector_first_nz (S[j], m, i0) < m)
2448 {
2449 ++i0;
2450 for (i = m - 1; i >= i0; i--)
2451 {
2452 while (S[i][j] != 0)
2453 {
2454 int sigma, factor, a, b;
2455
2456 a = S[i-1][j];
2457 b = S[i][j];
2458 sigma = (a * b < 0) ? -1: 1;
2459 a = abs (a);
2460 b = abs (b);
2461 factor = sigma * (a / b);
2462
2463 lambda_matrix_row_add (S, n, i, i-1, -factor);
2464 std::swap (S[i], S[i-1]);
2465
2466 lambda_matrix_row_add (U, m, i, i-1, -factor);
2467 std::swap (U[i], U[i-1]);
2468 }
2469 }
2470 }
2471 }
2472 }
2473
2474 /* Determines the overlapping elements due to accesses CHREC_A and
2475 CHREC_B, that are affine functions. This function cannot handle
2476 symbolic evolution functions, ie. when initial conditions are
2477 parameters, because it uses lambda matrices of integers. */
2478
2479 static void
2480 analyze_subscript_affine_affine (tree chrec_a,
2481 tree chrec_b,
2482 conflict_function **overlaps_a,
2483 conflict_function **overlaps_b,
2484 tree *last_conflicts)
2485 {
2486 unsigned nb_vars_a, nb_vars_b, dim;
2487 HOST_WIDE_INT init_a, init_b, gamma, gcd_alpha_beta;
2488 lambda_matrix A, U, S;
2489 struct obstack scratch_obstack;
2490
2491 if (eq_evolutions_p (chrec_a, chrec_b))
2492 {
2493 /* The accessed index overlaps for each iteration in the
2494 loop. */
2495 *overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node));
2496 *overlaps_b = conflict_fn (1, affine_fn_cst (integer_zero_node));
2497 *last_conflicts = chrec_dont_know;
2498 return;
2499 }
2500 if (dump_file && (dump_flags & TDF_DETAILS))
2501 fprintf (dump_file, "(analyze_subscript_affine_affine \n");
2502
2503 /* For determining the initial intersection, we have to solve a
2504 Diophantine equation. This is the most time consuming part.
2505
2506 For answering to the question: "Is there a dependence?" we have
2507 to prove that there exists a solution to the Diophantine
2508 equation, and that the solution is in the iteration domain,
2509 i.e. the solution is positive or zero, and that the solution
2510 happens before the upper bound loop.nb_iterations. Otherwise
2511 there is no dependence. This function outputs a description of
2512 the iterations that hold the intersections. */
2513
2514 nb_vars_a = nb_vars_in_chrec (chrec_a);
2515 nb_vars_b = nb_vars_in_chrec (chrec_b);
2516
2517 gcc_obstack_init (&scratch_obstack);
2518
2519 dim = nb_vars_a + nb_vars_b;
2520 U = lambda_matrix_new (dim, dim, &scratch_obstack);
2521 A = lambda_matrix_new (dim, 1, &scratch_obstack);
2522 S = lambda_matrix_new (dim, 1, &scratch_obstack);
2523
2524 init_a = int_cst_value (initialize_matrix_A (A, chrec_a, 0, 1));
2525 init_b = int_cst_value (initialize_matrix_A (A, chrec_b, nb_vars_a, -1));
2526 gamma = init_b - init_a;
2527
2528 /* Don't do all the hard work of solving the Diophantine equation
2529 when we already know the solution: for example,
2530 | {3, +, 1}_1
2531 | {3, +, 4}_2
2532 | gamma = 3 - 3 = 0.
2533 Then the first overlap occurs during the first iterations:
2534 | {3, +, 1}_1 ({0, +, 4}_x) = {3, +, 4}_2 ({0, +, 1}_x)
2535 */
2536 if (gamma == 0)
2537 {
2538 if (nb_vars_a == 1 && nb_vars_b == 1)
2539 {
2540 HOST_WIDE_INT step_a, step_b;
2541 HOST_WIDE_INT niter, niter_a, niter_b;
2542 affine_fn ova, ovb;
2543
2544 niter_a = max_stmt_executions_int (get_chrec_loop (chrec_a));
2545 niter_b = max_stmt_executions_int (get_chrec_loop (chrec_b));
2546 niter = MIN (niter_a, niter_b);
2547 step_a = int_cst_value (CHREC_RIGHT (chrec_a));
2548 step_b = int_cst_value (CHREC_RIGHT (chrec_b));
2549
2550 compute_overlap_steps_for_affine_univar (niter, step_a, step_b,
2551 &ova, &ovb,
2552 last_conflicts, 1);
2553 *overlaps_a = conflict_fn (1, ova);
2554 *overlaps_b = conflict_fn (1, ovb);
2555 }
2556
2557 else if (nb_vars_a == 2 && nb_vars_b == 1)
2558 compute_overlap_steps_for_affine_1_2
2559 (chrec_a, chrec_b, overlaps_a, overlaps_b, last_conflicts);
2560
2561 else if (nb_vars_a == 1 && nb_vars_b == 2)
2562 compute_overlap_steps_for_affine_1_2
2563 (chrec_b, chrec_a, overlaps_b, overlaps_a, last_conflicts);
2564
2565 else
2566 {
2567 if (dump_file && (dump_flags & TDF_DETAILS))
2568 fprintf (dump_file, "affine-affine test failed: too many variables.\n");
2569 *overlaps_a = conflict_fn_not_known ();
2570 *overlaps_b = conflict_fn_not_known ();
2571 *last_conflicts = chrec_dont_know;
2572 }
2573 goto end_analyze_subs_aa;
2574 }
2575
2576 /* U.A = S */
2577 lambda_matrix_right_hermite (A, dim, 1, S, U);
2578
2579 if (S[0][0] < 0)
2580 {
2581 S[0][0] *= -1;
2582 lambda_matrix_row_negate (U, dim, 0);
2583 }
2584 gcd_alpha_beta = S[0][0];
2585
2586 /* Something went wrong: for example in {1, +, 0}_5 vs. {0, +, 0}_5,
2587 but that is a quite strange case. Instead of ICEing, answer
2588 don't know. */
2589 if (gcd_alpha_beta == 0)
2590 {
2591 *overlaps_a = conflict_fn_not_known ();
2592 *overlaps_b = conflict_fn_not_known ();
2593 *last_conflicts = chrec_dont_know;
2594 goto end_analyze_subs_aa;
2595 }
2596
2597 /* The classic "gcd-test". */
2598 if (!int_divides_p (gcd_alpha_beta, gamma))
2599 {
2600 /* The "gcd-test" has determined that there is no integer
2601 solution, i.e. there is no dependence. */
2602 *overlaps_a = conflict_fn_no_dependence ();
2603 *overlaps_b = conflict_fn_no_dependence ();
2604 *last_conflicts = integer_zero_node;
2605 }
2606
2607 /* Both access functions are univariate. This includes SIV and MIV cases. */
2608 else if (nb_vars_a == 1 && nb_vars_b == 1)
2609 {
2610 /* Both functions should have the same evolution sign. */
2611 if (((A[0][0] > 0 && -A[1][0] > 0)
2612 || (A[0][0] < 0 && -A[1][0] < 0)))
2613 {
2614 /* The solutions are given by:
2615 |
2616 | [GAMMA/GCD_ALPHA_BETA t].[u11 u12] = [x0]
2617 | [u21 u22] [y0]
2618
2619 For a given integer t. Using the following variables,
2620
2621 | i0 = u11 * gamma / gcd_alpha_beta
2622 | j0 = u12 * gamma / gcd_alpha_beta
2623 | i1 = u21
2624 | j1 = u22
2625
2626 the solutions are:
2627
2628 | x0 = i0 + i1 * t,
2629 | y0 = j0 + j1 * t. */
2630 HOST_WIDE_INT i0, j0, i1, j1;
2631
2632 i0 = U[0][0] * gamma / gcd_alpha_beta;
2633 j0 = U[0][1] * gamma / gcd_alpha_beta;
2634 i1 = U[1][0];
2635 j1 = U[1][1];
2636
2637 if ((i1 == 0 && i0 < 0)
2638 || (j1 == 0 && j0 < 0))
2639 {
2640 /* There is no solution.
2641 FIXME: The case "i0 > nb_iterations, j0 > nb_iterations"
2642 falls in here, but for the moment we don't look at the
2643 upper bound of the iteration domain. */
2644 *overlaps_a = conflict_fn_no_dependence ();
2645 *overlaps_b = conflict_fn_no_dependence ();
2646 *last_conflicts = integer_zero_node;
2647 goto end_analyze_subs_aa;
2648 }
2649
2650 if (i1 > 0 && j1 > 0)
2651 {
2652 HOST_WIDE_INT niter_a
2653 = max_stmt_executions_int (get_chrec_loop (chrec_a));
2654 HOST_WIDE_INT niter_b
2655 = max_stmt_executions_int (get_chrec_loop (chrec_b));
2656 HOST_WIDE_INT niter = MIN (niter_a, niter_b);
2657
2658 /* (X0, Y0) is a solution of the Diophantine equation:
2659 "chrec_a (X0) = chrec_b (Y0)". */
2660 HOST_WIDE_INT tau1 = MAX (CEIL (-i0, i1),
2661 CEIL (-j0, j1));
2662 HOST_WIDE_INT x0 = i1 * tau1 + i0;
2663 HOST_WIDE_INT y0 = j1 * tau1 + j0;
2664
2665 /* (X1, Y1) is the smallest positive solution of the eq
2666 "chrec_a (X1) = chrec_b (Y1)", i.e. this is where the
2667 first conflict occurs. */
2668 HOST_WIDE_INT min_multiple = MIN (x0 / i1, y0 / j1);
2669 HOST_WIDE_INT x1 = x0 - i1 * min_multiple;
2670 HOST_WIDE_INT y1 = y0 - j1 * min_multiple;
2671
2672 if (niter > 0)
2673 {
2674 HOST_WIDE_INT tau2 = MIN (FLOOR_DIV (niter - i0, i1),
2675 FLOOR_DIV (niter - j0, j1));
2676 HOST_WIDE_INT last_conflict = tau2 - (x1 - i0)/i1;
2677
2678 /* If the overlap occurs outside of the bounds of the
2679 loop, there is no dependence. */
2680 if (x1 >= niter || y1 >= niter)
2681 {
2682 *overlaps_a = conflict_fn_no_dependence ();
2683 *overlaps_b = conflict_fn_no_dependence ();
2684 *last_conflicts = integer_zero_node;
2685 goto end_analyze_subs_aa;
2686 }
2687 else
2688 *last_conflicts = build_int_cst (NULL_TREE, last_conflict);
2689 }
2690 else
2691 *last_conflicts = chrec_dont_know;
2692
2693 *overlaps_a
2694 = conflict_fn (1,
2695 affine_fn_univar (build_int_cst (NULL_TREE, x1),
2696 1,
2697 build_int_cst (NULL_TREE, i1)));
2698 *overlaps_b
2699 = conflict_fn (1,
2700 affine_fn_univar (build_int_cst (NULL_TREE, y1),
2701 1,
2702 build_int_cst (NULL_TREE, j1)));
2703 }
2704 else
2705 {
2706 /* FIXME: For the moment, the upper bound of the
2707 iteration domain for i and j is not checked. */
2708 if (dump_file && (dump_flags & TDF_DETAILS))
2709 fprintf (dump_file, "affine-affine test failed: unimplemented.\n");
2710 *overlaps_a = conflict_fn_not_known ();
2711 *overlaps_b = conflict_fn_not_known ();
2712 *last_conflicts = chrec_dont_know;
2713 }
2714 }
2715 else
2716 {
2717 if (dump_file && (dump_flags & TDF_DETAILS))
2718 fprintf (dump_file, "affine-affine test failed: unimplemented.\n");
2719 *overlaps_a = conflict_fn_not_known ();
2720 *overlaps_b = conflict_fn_not_known ();
2721 *last_conflicts = chrec_dont_know;
2722 }
2723 }
2724 else
2725 {
2726 if (dump_file && (dump_flags & TDF_DETAILS))
2727 fprintf (dump_file, "affine-affine test failed: unimplemented.\n");
2728 *overlaps_a = conflict_fn_not_known ();
2729 *overlaps_b = conflict_fn_not_known ();
2730 *last_conflicts = chrec_dont_know;
2731 }
2732
2733 end_analyze_subs_aa:
2734 obstack_free (&scratch_obstack, NULL);
2735 if (dump_file && (dump_flags & TDF_DETAILS))
2736 {
2737 fprintf (dump_file, " (overlaps_a = ");
2738 dump_conflict_function (dump_file, *overlaps_a);
2739 fprintf (dump_file, ")\n (overlaps_b = ");
2740 dump_conflict_function (dump_file, *overlaps_b);
2741 fprintf (dump_file, "))\n");
2742 }
2743 }
2744
2745 /* Returns true when analyze_subscript_affine_affine can be used for
2746 determining the dependence relation between chrec_a and chrec_b,
2747 that contain symbols. This function modifies chrec_a and chrec_b
2748 such that the analysis result is the same, and such that they don't
2749 contain symbols, and then can safely be passed to the analyzer.
2750
2751 Example: The analysis of the following tuples of evolutions produce
2752 the same results: {x+1, +, 1}_1 vs. {x+3, +, 1}_1, and {-2, +, 1}_1
2753 vs. {0, +, 1}_1
2754
2755 {x+1, +, 1}_1 ({2, +, 1}_1) = {x+3, +, 1}_1 ({0, +, 1}_1)
2756 {-2, +, 1}_1 ({2, +, 1}_1) = {0, +, 1}_1 ({0, +, 1}_1)
2757 */
2758
2759 static bool
2760 can_use_analyze_subscript_affine_affine (tree *chrec_a, tree *chrec_b)
2761 {
2762 tree diff, type, left_a, left_b, right_b;
2763
2764 if (chrec_contains_symbols (CHREC_RIGHT (*chrec_a))
2765 || chrec_contains_symbols (CHREC_RIGHT (*chrec_b)))
2766 /* FIXME: For the moment not handled. Might be refined later. */
2767 return false;
2768
2769 type = chrec_type (*chrec_a);
2770 left_a = CHREC_LEFT (*chrec_a);
2771 left_b = chrec_convert (type, CHREC_LEFT (*chrec_b), NULL);
2772 diff = chrec_fold_minus (type, left_a, left_b);
2773
2774 if (!evolution_function_is_constant_p (diff))
2775 return false;
2776
2777 if (dump_file && (dump_flags & TDF_DETAILS))
2778 fprintf (dump_file, "can_use_subscript_aff_aff_for_symbolic \n");
2779
2780 *chrec_a = build_polynomial_chrec (CHREC_VARIABLE (*chrec_a),
2781 diff, CHREC_RIGHT (*chrec_a));
2782 right_b = chrec_convert (type, CHREC_RIGHT (*chrec_b), NULL);
2783 *chrec_b = build_polynomial_chrec (CHREC_VARIABLE (*chrec_b),
2784 build_int_cst (type, 0),
2785 right_b);
2786 return true;
2787 }
2788
2789 /* Analyze a SIV (Single Index Variable) subscript. *OVERLAPS_A and
2790 *OVERLAPS_B are initialized to the functions that describe the
2791 relation between the elements accessed twice by CHREC_A and
2792 CHREC_B. For k >= 0, the following property is verified:
2793
2794 CHREC_A (*OVERLAPS_A (k)) = CHREC_B (*OVERLAPS_B (k)). */
2795
2796 static void
2797 analyze_siv_subscript (tree chrec_a,
2798 tree chrec_b,
2799 conflict_function **overlaps_a,
2800 conflict_function **overlaps_b,
2801 tree *last_conflicts,
2802 int loop_nest_num)
2803 {
2804 dependence_stats.num_siv++;
2805
2806 if (dump_file && (dump_flags & TDF_DETAILS))
2807 fprintf (dump_file, "(analyze_siv_subscript \n");
2808
2809 if (evolution_function_is_constant_p (chrec_a)
2810 && evolution_function_is_affine_in_loop (chrec_b, loop_nest_num))
2811 analyze_siv_subscript_cst_affine (chrec_a, chrec_b,
2812 overlaps_a, overlaps_b, last_conflicts);
2813
2814 else if (evolution_function_is_affine_in_loop (chrec_a, loop_nest_num)
2815 && evolution_function_is_constant_p (chrec_b))
2816 analyze_siv_subscript_cst_affine (chrec_b, chrec_a,
2817 overlaps_b, overlaps_a, last_conflicts);
2818
2819 else if (evolution_function_is_affine_in_loop (chrec_a, loop_nest_num)
2820 && evolution_function_is_affine_in_loop (chrec_b, loop_nest_num))
2821 {
2822 if (!chrec_contains_symbols (chrec_a)
2823 && !chrec_contains_symbols (chrec_b))
2824 {
2825 analyze_subscript_affine_affine (chrec_a, chrec_b,
2826 overlaps_a, overlaps_b,
2827 last_conflicts);
2828
2829 if (CF_NOT_KNOWN_P (*overlaps_a)
2830 || CF_NOT_KNOWN_P (*overlaps_b))
2831 dependence_stats.num_siv_unimplemented++;
2832 else if (CF_NO_DEPENDENCE_P (*overlaps_a)
2833 || CF_NO_DEPENDENCE_P (*overlaps_b))
2834 dependence_stats.num_siv_independent++;
2835 else
2836 dependence_stats.num_siv_dependent++;
2837 }
2838 else if (can_use_analyze_subscript_affine_affine (&chrec_a,
2839 &chrec_b))
2840 {
2841 analyze_subscript_affine_affine (chrec_a, chrec_b,
2842 overlaps_a, overlaps_b,
2843 last_conflicts);
2844
2845 if (CF_NOT_KNOWN_P (*overlaps_a)
2846 || CF_NOT_KNOWN_P (*overlaps_b))
2847 dependence_stats.num_siv_unimplemented++;
2848 else if (CF_NO_DEPENDENCE_P (*overlaps_a)
2849 || CF_NO_DEPENDENCE_P (*overlaps_b))
2850 dependence_stats.num_siv_independent++;
2851 else
2852 dependence_stats.num_siv_dependent++;
2853 }
2854 else
2855 goto siv_subscript_dontknow;
2856 }
2857
2858 else
2859 {
2860 siv_subscript_dontknow:;
2861 if (dump_file && (dump_flags & TDF_DETAILS))
2862 fprintf (dump_file, " siv test failed: unimplemented");
2863 *overlaps_a = conflict_fn_not_known ();
2864 *overlaps_b = conflict_fn_not_known ();
2865 *last_conflicts = chrec_dont_know;
2866 dependence_stats.num_siv_unimplemented++;
2867 }
2868
2869 if (dump_file && (dump_flags & TDF_DETAILS))
2870 fprintf (dump_file, ")\n");
2871 }
2872
2873 /* Returns false if we can prove that the greatest common divisor of the steps
2874 of CHREC does not divide CST, false otherwise. */
2875
2876 static bool
2877 gcd_of_steps_may_divide_p (const_tree chrec, const_tree cst)
2878 {
2879 HOST_WIDE_INT cd = 0, val;
2880 tree step;
2881
2882 if (!tree_fits_shwi_p (cst))
2883 return true;
2884 val = tree_to_shwi (cst);
2885
2886 while (TREE_CODE (chrec) == POLYNOMIAL_CHREC)
2887 {
2888 step = CHREC_RIGHT (chrec);
2889 if (!tree_fits_shwi_p (step))
2890 return true;
2891 cd = gcd (cd, tree_to_shwi (step));
2892 chrec = CHREC_LEFT (chrec);
2893 }
2894
2895 return val % cd == 0;
2896 }
2897
2898 /* Analyze a MIV (Multiple Index Variable) subscript with respect to
2899 LOOP_NEST. *OVERLAPS_A and *OVERLAPS_B are initialized to the
2900 functions that describe the relation between the elements accessed
2901 twice by CHREC_A and CHREC_B. For k >= 0, the following property
2902 is verified:
2903
2904 CHREC_A (*OVERLAPS_A (k)) = CHREC_B (*OVERLAPS_B (k)). */
2905
2906 static void
2907 analyze_miv_subscript (tree chrec_a,
2908 tree chrec_b,
2909 conflict_function **overlaps_a,
2910 conflict_function **overlaps_b,
2911 tree *last_conflicts,
2912 struct loop *loop_nest)
2913 {
2914 tree type, difference;
2915
2916 dependence_stats.num_miv++;
2917 if (dump_file && (dump_flags & TDF_DETAILS))
2918 fprintf (dump_file, "(analyze_miv_subscript \n");
2919
2920 type = signed_type_for_types (TREE_TYPE (chrec_a), TREE_TYPE (chrec_b));
2921 chrec_a = chrec_convert (type, chrec_a, NULL);
2922 chrec_b = chrec_convert (type, chrec_b, NULL);
2923 difference = chrec_fold_minus (type, chrec_a, chrec_b);
2924
2925 if (eq_evolutions_p (chrec_a, chrec_b))
2926 {
2927 /* Access functions are the same: all the elements are accessed
2928 in the same order. */
2929 *overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node));
2930 *overlaps_b = conflict_fn (1, affine_fn_cst (integer_zero_node));
2931 *last_conflicts = max_stmt_executions_tree (get_chrec_loop (chrec_a));
2932 dependence_stats.num_miv_dependent++;
2933 }
2934
2935 else if (evolution_function_is_constant_p (difference)
2936 /* For the moment, the following is verified:
2937 evolution_function_is_affine_multivariate_p (chrec_a,
2938 loop_nest->num) */
2939 && !gcd_of_steps_may_divide_p (chrec_a, difference))
2940 {
2941 /* testsuite/.../ssa-chrec-33.c
2942 {{21, +, 2}_1, +, -2}_2 vs. {{20, +, 2}_1, +, -2}_2
2943
2944 The difference is 1, and all the evolution steps are multiples
2945 of 2, consequently there are no overlapping elements. */
2946 *overlaps_a = conflict_fn_no_dependence ();
2947 *overlaps_b = conflict_fn_no_dependence ();
2948 *last_conflicts = integer_zero_node;
2949 dependence_stats.num_miv_independent++;
2950 }
2951
2952 else if (evolution_function_is_affine_multivariate_p (chrec_a, loop_nest->num)
2953 && !chrec_contains_symbols (chrec_a)
2954 && evolution_function_is_affine_multivariate_p (chrec_b, loop_nest->num)
2955 && !chrec_contains_symbols (chrec_b))
2956 {
2957 /* testsuite/.../ssa-chrec-35.c
2958 {0, +, 1}_2 vs. {0, +, 1}_3
2959 the overlapping elements are respectively located at iterations:
2960 {0, +, 1}_x and {0, +, 1}_x,
2961 in other words, we have the equality:
2962 {0, +, 1}_2 ({0, +, 1}_x) = {0, +, 1}_3 ({0, +, 1}_x)
2963
2964 Other examples:
2965 {{0, +, 1}_1, +, 2}_2 ({0, +, 1}_x, {0, +, 1}_y) =
2966 {0, +, 1}_1 ({{0, +, 1}_x, +, 2}_y)
2967
2968 {{0, +, 2}_1, +, 3}_2 ({0, +, 1}_y, {0, +, 1}_x) =
2969 {{0, +, 3}_1, +, 2}_2 ({0, +, 1}_x, {0, +, 1}_y)
2970 */
2971 analyze_subscript_affine_affine (chrec_a, chrec_b,
2972 overlaps_a, overlaps_b, last_conflicts);
2973
2974 if (CF_NOT_KNOWN_P (*overlaps_a)
2975 || CF_NOT_KNOWN_P (*overlaps_b))
2976 dependence_stats.num_miv_unimplemented++;
2977 else if (CF_NO_DEPENDENCE_P (*overlaps_a)
2978 || CF_NO_DEPENDENCE_P (*overlaps_b))
2979 dependence_stats.num_miv_independent++;
2980 else
2981 dependence_stats.num_miv_dependent++;
2982 }
2983
2984 else
2985 {
2986 /* When the analysis is too difficult, answer "don't know". */
2987 if (dump_file && (dump_flags & TDF_DETAILS))
2988 fprintf (dump_file, "analyze_miv_subscript test failed: unimplemented.\n");
2989
2990 *overlaps_a = conflict_fn_not_known ();
2991 *overlaps_b = conflict_fn_not_known ();
2992 *last_conflicts = chrec_dont_know;
2993 dependence_stats.num_miv_unimplemented++;
2994 }
2995
2996 if (dump_file && (dump_flags & TDF_DETAILS))
2997 fprintf (dump_file, ")\n");
2998 }
2999
3000 /* Determines the iterations for which CHREC_A is equal to CHREC_B in
3001 with respect to LOOP_NEST. OVERLAP_ITERATIONS_A and
3002 OVERLAP_ITERATIONS_B are initialized with two functions that
3003 describe the iterations that contain conflicting elements.
3004
3005 Remark: For an integer k >= 0, the following equality is true:
3006
3007 CHREC_A (OVERLAP_ITERATIONS_A (k)) == CHREC_B (OVERLAP_ITERATIONS_B (k)).
3008 */
3009
3010 static void
3011 analyze_overlapping_iterations (tree chrec_a,
3012 tree chrec_b,
3013 conflict_function **overlap_iterations_a,
3014 conflict_function **overlap_iterations_b,
3015 tree *last_conflicts, struct loop *loop_nest)
3016 {
3017 unsigned int lnn = loop_nest->num;
3018
3019 dependence_stats.num_subscript_tests++;
3020
3021 if (dump_file && (dump_flags & TDF_DETAILS))
3022 {
3023 fprintf (dump_file, "(analyze_overlapping_iterations \n");
3024 fprintf (dump_file, " (chrec_a = ");
3025 print_generic_expr (dump_file, chrec_a, 0);
3026 fprintf (dump_file, ")\n (chrec_b = ");
3027 print_generic_expr (dump_file, chrec_b, 0);
3028 fprintf (dump_file, ")\n");
3029 }
3030
3031 if (chrec_a == NULL_TREE
3032 || chrec_b == NULL_TREE
3033 || chrec_contains_undetermined (chrec_a)
3034 || chrec_contains_undetermined (chrec_b))
3035 {
3036 dependence_stats.num_subscript_undetermined++;
3037
3038 *overlap_iterations_a = conflict_fn_not_known ();
3039 *overlap_iterations_b = conflict_fn_not_known ();
3040 }
3041
3042 /* If they are the same chrec, and are affine, they overlap
3043 on every iteration. */
3044 else if (eq_evolutions_p (chrec_a, chrec_b)
3045 && (evolution_function_is_affine_multivariate_p (chrec_a, lnn)
3046 || operand_equal_p (chrec_a, chrec_b, 0)))
3047 {
3048 dependence_stats.num_same_subscript_function++;
3049 *overlap_iterations_a = conflict_fn (1, affine_fn_cst (integer_zero_node));
3050 *overlap_iterations_b = conflict_fn (1, affine_fn_cst (integer_zero_node));
3051 *last_conflicts = chrec_dont_know;
3052 }
3053
3054 /* If they aren't the same, and aren't affine, we can't do anything
3055 yet. */
3056 else if ((chrec_contains_symbols (chrec_a)
3057 || chrec_contains_symbols (chrec_b))
3058 && (!evolution_function_is_affine_multivariate_p (chrec_a, lnn)
3059 || !evolution_function_is_affine_multivariate_p (chrec_b, lnn)))
3060 {
3061 dependence_stats.num_subscript_undetermined++;
3062 *overlap_iterations_a = conflict_fn_not_known ();
3063 *overlap_iterations_b = conflict_fn_not_known ();
3064 }
3065
3066 else if (ziv_subscript_p (chrec_a, chrec_b))
3067 analyze_ziv_subscript (chrec_a, chrec_b,
3068 overlap_iterations_a, overlap_iterations_b,
3069 last_conflicts);
3070
3071 else if (siv_subscript_p (chrec_a, chrec_b))
3072 analyze_siv_subscript (chrec_a, chrec_b,
3073 overlap_iterations_a, overlap_iterations_b,
3074 last_conflicts, lnn);
3075
3076 else
3077 analyze_miv_subscript (chrec_a, chrec_b,
3078 overlap_iterations_a, overlap_iterations_b,
3079 last_conflicts, loop_nest);
3080
3081 if (dump_file && (dump_flags & TDF_DETAILS))
3082 {
3083 fprintf (dump_file, " (overlap_iterations_a = ");
3084 dump_conflict_function (dump_file, *overlap_iterations_a);
3085 fprintf (dump_file, ")\n (overlap_iterations_b = ");
3086 dump_conflict_function (dump_file, *overlap_iterations_b);
3087 fprintf (dump_file, "))\n");
3088 }
3089 }
3090
3091 /* Helper function for uniquely inserting distance vectors. */
3092
3093 static void
3094 save_dist_v (struct data_dependence_relation *ddr, lambda_vector dist_v)
3095 {
3096 unsigned i;
3097 lambda_vector v;
3098
3099 FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, v)
3100 if (lambda_vector_equal (v, dist_v, DDR_NB_LOOPS (ddr)))
3101 return;
3102
3103 DDR_DIST_VECTS (ddr).safe_push (dist_v);
3104 }
3105
3106 /* Helper function for uniquely inserting direction vectors. */
3107
3108 static void
3109 save_dir_v (struct data_dependence_relation *ddr, lambda_vector dir_v)
3110 {
3111 unsigned i;
3112 lambda_vector v;
3113
3114 FOR_EACH_VEC_ELT (DDR_DIR_VECTS (ddr), i, v)
3115 if (lambda_vector_equal (v, dir_v, DDR_NB_LOOPS (ddr)))
3116 return;
3117
3118 DDR_DIR_VECTS (ddr).safe_push (dir_v);
3119 }
3120
3121 /* Add a distance of 1 on all the loops outer than INDEX. If we
3122 haven't yet determined a distance for this outer loop, push a new
3123 distance vector composed of the previous distance, and a distance
3124 of 1 for this outer loop. Example:
3125
3126 | loop_1
3127 | loop_2
3128 | A[10]
3129 | endloop_2
3130 | endloop_1
3131
3132 Saved vectors are of the form (dist_in_1, dist_in_2). First, we
3133 save (0, 1), then we have to save (1, 0). */
3134
3135 static void
3136 add_outer_distances (struct data_dependence_relation *ddr,
3137 lambda_vector dist_v, int index)
3138 {
3139 /* For each outer loop where init_v is not set, the accesses are
3140 in dependence of distance 1 in the loop. */
3141 while (--index >= 0)
3142 {
3143 lambda_vector save_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
3144 lambda_vector_copy (dist_v, save_v, DDR_NB_LOOPS (ddr));
3145 save_v[index] = 1;
3146 save_dist_v (ddr, save_v);
3147 }
3148 }
3149
3150 /* Return false when fail to represent the data dependence as a
3151 distance vector. INIT_B is set to true when a component has been
3152 added to the distance vector DIST_V. INDEX_CARRY is then set to
3153 the index in DIST_V that carries the dependence. */
3154
3155 static bool
3156 build_classic_dist_vector_1 (struct data_dependence_relation *ddr,
3157 struct data_reference *ddr_a,
3158 struct data_reference *ddr_b,
3159 lambda_vector dist_v, bool *init_b,
3160 int *index_carry)
3161 {
3162 unsigned i;
3163 lambda_vector init_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
3164
3165 for (i = 0; i < DDR_NUM_SUBSCRIPTS (ddr); i++)
3166 {
3167 tree access_fn_a, access_fn_b;
3168 struct subscript *subscript = DDR_SUBSCRIPT (ddr, i);
3169
3170 if (chrec_contains_undetermined (SUB_DISTANCE (subscript)))
3171 {
3172 non_affine_dependence_relation (ddr);
3173 return false;
3174 }
3175
3176 access_fn_a = DR_ACCESS_FN (ddr_a, i);
3177 access_fn_b = DR_ACCESS_FN (ddr_b, i);
3178
3179 if (TREE_CODE (access_fn_a) == POLYNOMIAL_CHREC
3180 && TREE_CODE (access_fn_b) == POLYNOMIAL_CHREC)
3181 {
3182 int dist, index;
3183 int var_a = CHREC_VARIABLE (access_fn_a);
3184 int var_b = CHREC_VARIABLE (access_fn_b);
3185
3186 if (var_a != var_b
3187 || chrec_contains_undetermined (SUB_DISTANCE (subscript)))
3188 {
3189 non_affine_dependence_relation (ddr);
3190 return false;
3191 }
3192
3193 dist = int_cst_value (SUB_DISTANCE (subscript));
3194 index = index_in_loop_nest (var_a, DDR_LOOP_NEST (ddr));
3195 *index_carry = MIN (index, *index_carry);
3196
3197 /* This is the subscript coupling test. If we have already
3198 recorded a distance for this loop (a distance coming from
3199 another subscript), it should be the same. For example,
3200 in the following code, there is no dependence:
3201
3202 | loop i = 0, N, 1
3203 | T[i+1][i] = ...
3204 | ... = T[i][i]
3205 | endloop
3206 */
3207 if (init_v[index] != 0 && dist_v[index] != dist)
3208 {
3209 finalize_ddr_dependent (ddr, chrec_known);
3210 return false;
3211 }
3212
3213 dist_v[index] = dist;
3214 init_v[index] = 1;
3215 *init_b = true;
3216 }
3217 else if (!operand_equal_p (access_fn_a, access_fn_b, 0))
3218 {
3219 /* This can be for example an affine vs. constant dependence
3220 (T[i] vs. T[3]) that is not an affine dependence and is
3221 not representable as a distance vector. */
3222 non_affine_dependence_relation (ddr);
3223 return false;
3224 }
3225 }
3226
3227 return true;
3228 }
3229
3230 /* Return true when the DDR contains only constant access functions. */
3231
3232 static bool
3233 constant_access_functions (const struct data_dependence_relation *ddr)
3234 {
3235 unsigned i;
3236
3237 for (i = 0; i < DDR_NUM_SUBSCRIPTS (ddr); i++)
3238 if (!evolution_function_is_constant_p (DR_ACCESS_FN (DDR_A (ddr), i))
3239 || !evolution_function_is_constant_p (DR_ACCESS_FN (DDR_B (ddr), i)))
3240 return false;
3241
3242 return true;
3243 }
3244
3245 /* Helper function for the case where DDR_A and DDR_B are the same
3246 multivariate access function with a constant step. For an example
3247 see pr34635-1.c. */
3248
3249 static void
3250 add_multivariate_self_dist (struct data_dependence_relation *ddr, tree c_2)
3251 {
3252 int x_1, x_2;
3253 tree c_1 = CHREC_LEFT (c_2);
3254 tree c_0 = CHREC_LEFT (c_1);
3255 lambda_vector dist_v;
3256 int v1, v2, cd;
3257
3258 /* Polynomials with more than 2 variables are not handled yet. When
3259 the evolution steps are parameters, it is not possible to
3260 represent the dependence using classical distance vectors. */
3261 if (TREE_CODE (c_0) != INTEGER_CST
3262 || TREE_CODE (CHREC_RIGHT (c_1)) != INTEGER_CST
3263 || TREE_CODE (CHREC_RIGHT (c_2)) != INTEGER_CST)
3264 {
3265 DDR_AFFINE_P (ddr) = false;
3266 return;
3267 }
3268
3269 x_2 = index_in_loop_nest (CHREC_VARIABLE (c_2), DDR_LOOP_NEST (ddr));
3270 x_1 = index_in_loop_nest (CHREC_VARIABLE (c_1), DDR_LOOP_NEST (ddr));
3271
3272 /* For "{{0, +, 2}_1, +, 3}_2" the distance vector is (3, -2). */
3273 dist_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
3274 v1 = int_cst_value (CHREC_RIGHT (c_1));
3275 v2 = int_cst_value (CHREC_RIGHT (c_2));
3276 cd = gcd (v1, v2);
3277 v1 /= cd;
3278 v2 /= cd;
3279
3280 if (v2 < 0)
3281 {
3282 v2 = -v2;
3283 v1 = -v1;
3284 }
3285
3286 dist_v[x_1] = v2;
3287 dist_v[x_2] = -v1;
3288 save_dist_v (ddr, dist_v);
3289
3290 add_outer_distances (ddr, dist_v, x_1);
3291 }
3292
3293 /* Helper function for the case where DDR_A and DDR_B are the same
3294 access functions. */
3295
3296 static void
3297 add_other_self_distances (struct data_dependence_relation *ddr)
3298 {
3299 lambda_vector dist_v;
3300 unsigned i;
3301 int index_carry = DDR_NB_LOOPS (ddr);
3302
3303 for (i = 0; i < DDR_NUM_SUBSCRIPTS (ddr); i++)
3304 {
3305 tree access_fun = DR_ACCESS_FN (DDR_A (ddr), i);
3306
3307 if (TREE_CODE (access_fun) == POLYNOMIAL_CHREC)
3308 {
3309 if (!evolution_function_is_univariate_p (access_fun))
3310 {
3311 if (DDR_NUM_SUBSCRIPTS (ddr) != 1)
3312 {
3313 DDR_ARE_DEPENDENT (ddr) = chrec_dont_know;
3314 return;
3315 }
3316
3317 access_fun = DR_ACCESS_FN (DDR_A (ddr), 0);
3318
3319 if (TREE_CODE (CHREC_LEFT (access_fun)) == POLYNOMIAL_CHREC)
3320 add_multivariate_self_dist (ddr, access_fun);
3321 else
3322 /* The evolution step is not constant: it varies in
3323 the outer loop, so this cannot be represented by a
3324 distance vector. For example in pr34635.c the
3325 evolution is {0, +, {0, +, 4}_1}_2. */
3326 DDR_AFFINE_P (ddr) = false;
3327
3328 return;
3329 }
3330
3331 index_carry = MIN (index_carry,
3332 index_in_loop_nest (CHREC_VARIABLE (access_fun),
3333 DDR_LOOP_NEST (ddr)));
3334 }
3335 }
3336
3337 dist_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
3338 add_outer_distances (ddr, dist_v, index_carry);
3339 }
3340
3341 static void
3342 insert_innermost_unit_dist_vector (struct data_dependence_relation *ddr)
3343 {
3344 lambda_vector dist_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
3345
3346 dist_v[DDR_INNER_LOOP (ddr)] = 1;
3347 save_dist_v (ddr, dist_v);
3348 }
3349
3350 /* Adds a unit distance vector to DDR when there is a 0 overlap. This
3351 is the case for example when access functions are the same and
3352 equal to a constant, as in:
3353
3354 | loop_1
3355 | A[3] = ...
3356 | ... = A[3]
3357 | endloop_1
3358
3359 in which case the distance vectors are (0) and (1). */
3360
3361 static void
3362 add_distance_for_zero_overlaps (struct data_dependence_relation *ddr)
3363 {
3364 unsigned i, j;
3365
3366 for (i = 0; i < DDR_NUM_SUBSCRIPTS (ddr); i++)
3367 {
3368 subscript_p sub = DDR_SUBSCRIPT (ddr, i);
3369 conflict_function *ca = SUB_CONFLICTS_IN_A (sub);
3370 conflict_function *cb = SUB_CONFLICTS_IN_B (sub);
3371
3372 for (j = 0; j < ca->n; j++)
3373 if (affine_function_zero_p (ca->fns[j]))
3374 {
3375 insert_innermost_unit_dist_vector (ddr);
3376 return;
3377 }
3378
3379 for (j = 0; j < cb->n; j++)
3380 if (affine_function_zero_p (cb->fns[j]))
3381 {
3382 insert_innermost_unit_dist_vector (ddr);
3383 return;
3384 }
3385 }
3386 }
3387
3388 /* Compute the classic per loop distance vector. DDR is the data
3389 dependence relation to build a vector from. Return false when fail
3390 to represent the data dependence as a distance vector. */
3391
3392 static bool
3393 build_classic_dist_vector (struct data_dependence_relation *ddr,
3394 struct loop *loop_nest)
3395 {
3396 bool init_b = false;
3397 int index_carry = DDR_NB_LOOPS (ddr);
3398 lambda_vector dist_v;
3399
3400 if (DDR_ARE_DEPENDENT (ddr) != NULL_TREE)
3401 return false;
3402
3403 if (same_access_functions (ddr))
3404 {
3405 /* Save the 0 vector. */
3406 dist_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
3407 save_dist_v (ddr, dist_v);
3408
3409 if (constant_access_functions (ddr))
3410 add_distance_for_zero_overlaps (ddr);
3411
3412 if (DDR_NB_LOOPS (ddr) > 1)
3413 add_other_self_distances (ddr);
3414
3415 return true;
3416 }
3417
3418 dist_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
3419 if (!build_classic_dist_vector_1 (ddr, DDR_A (ddr), DDR_B (ddr),
3420 dist_v, &init_b, &index_carry))
3421 return false;
3422
3423 /* Save the distance vector if we initialized one. */
3424 if (init_b)
3425 {
3426 /* Verify a basic constraint: classic distance vectors should
3427 always be lexicographically positive.
3428
3429 Data references are collected in the order of execution of
3430 the program, thus for the following loop
3431
3432 | for (i = 1; i < 100; i++)
3433 | for (j = 1; j < 100; j++)
3434 | {
3435 | t = T[j+1][i-1]; // A
3436 | T[j][i] = t + 2; // B
3437 | }
3438
3439 references are collected following the direction of the wind:
3440 A then B. The data dependence tests are performed also
3441 following this order, such that we're looking at the distance
3442 separating the elements accessed by A from the elements later
3443 accessed by B. But in this example, the distance returned by
3444 test_dep (A, B) is lexicographically negative (-1, 1), that
3445 means that the access A occurs later than B with respect to
3446 the outer loop, ie. we're actually looking upwind. In this
3447 case we solve test_dep (B, A) looking downwind to the
3448 lexicographically positive solution, that returns the
3449 distance vector (1, -1). */
3450 if (!lambda_vector_lexico_pos (dist_v, DDR_NB_LOOPS (ddr)))
3451 {
3452 lambda_vector save_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
3453 if (!subscript_dependence_tester_1 (ddr, DDR_B (ddr), DDR_A (ddr),
3454 loop_nest))
3455 return false;
3456 compute_subscript_distance (ddr);
3457 if (!build_classic_dist_vector_1 (ddr, DDR_B (ddr), DDR_A (ddr),
3458 save_v, &init_b, &index_carry))
3459 return false;
3460 save_dist_v (ddr, save_v);
3461 DDR_REVERSED_P (ddr) = true;
3462
3463 /* In this case there is a dependence forward for all the
3464 outer loops:
3465
3466 | for (k = 1; k < 100; k++)
3467 | for (i = 1; i < 100; i++)
3468 | for (j = 1; j < 100; j++)
3469 | {
3470 | t = T[j+1][i-1]; // A
3471 | T[j][i] = t + 2; // B
3472 | }
3473
3474 the vectors are:
3475 (0, 1, -1)
3476 (1, 1, -1)
3477 (1, -1, 1)
3478 */
3479 if (DDR_NB_LOOPS (ddr) > 1)
3480 {
3481 add_outer_distances (ddr, save_v, index_carry);
3482 add_outer_distances (ddr, dist_v, index_carry);
3483 }
3484 }
3485 else
3486 {
3487 lambda_vector save_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
3488 lambda_vector_copy (dist_v, save_v, DDR_NB_LOOPS (ddr));
3489
3490 if (DDR_NB_LOOPS (ddr) > 1)
3491 {
3492 lambda_vector opposite_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
3493
3494 if (!subscript_dependence_tester_1 (ddr, DDR_B (ddr),
3495 DDR_A (ddr), loop_nest))
3496 return false;
3497 compute_subscript_distance (ddr);
3498 if (!build_classic_dist_vector_1 (ddr, DDR_B (ddr), DDR_A (ddr),
3499 opposite_v, &init_b,
3500 &index_carry))
3501 return false;
3502
3503 save_dist_v (ddr, save_v);
3504 add_outer_distances (ddr, dist_v, index_carry);
3505 add_outer_distances (ddr, opposite_v, index_carry);
3506 }
3507 else
3508 save_dist_v (ddr, save_v);
3509 }
3510 }
3511 else
3512 {
3513 /* There is a distance of 1 on all the outer loops: Example:
3514 there is a dependence of distance 1 on loop_1 for the array A.
3515
3516 | loop_1
3517 | A[5] = ...
3518 | endloop
3519 */
3520 add_outer_distances (ddr, dist_v,
3521 lambda_vector_first_nz (dist_v,
3522 DDR_NB_LOOPS (ddr), 0));
3523 }
3524
3525 if (dump_file && (dump_flags & TDF_DETAILS))
3526 {
3527 unsigned i;
3528
3529 fprintf (dump_file, "(build_classic_dist_vector\n");
3530 for (i = 0; i < DDR_NUM_DIST_VECTS (ddr); i++)
3531 {
3532 fprintf (dump_file, " dist_vector = (");
3533 print_lambda_vector (dump_file, DDR_DIST_VECT (ddr, i),
3534 DDR_NB_LOOPS (ddr));
3535 fprintf (dump_file, " )\n");
3536 }
3537 fprintf (dump_file, ")\n");
3538 }
3539
3540 return true;
3541 }
3542
3543 /* Return the direction for a given distance.
3544 FIXME: Computing dir this way is suboptimal, since dir can catch
3545 cases that dist is unable to represent. */
3546
3547 static inline enum data_dependence_direction
3548 dir_from_dist (int dist)
3549 {
3550 if (dist > 0)
3551 return dir_positive;
3552 else if (dist < 0)
3553 return dir_negative;
3554 else
3555 return dir_equal;
3556 }
3557
3558 /* Compute the classic per loop direction vector. DDR is the data
3559 dependence relation to build a vector from. */
3560
3561 static void
3562 build_classic_dir_vector (struct data_dependence_relation *ddr)
3563 {
3564 unsigned i, j;
3565 lambda_vector dist_v;
3566
3567 FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v)
3568 {
3569 lambda_vector dir_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
3570
3571 for (j = 0; j < DDR_NB_LOOPS (ddr); j++)
3572 dir_v[j] = dir_from_dist (dist_v[j]);
3573
3574 save_dir_v (ddr, dir_v);
3575 }
3576 }
3577
3578 /* Helper function. Returns true when there is a dependence between
3579 data references DRA and DRB. */
3580
3581 static bool
3582 subscript_dependence_tester_1 (struct data_dependence_relation *ddr,
3583 struct data_reference *dra,
3584 struct data_reference *drb,
3585 struct loop *loop_nest)
3586 {
3587 unsigned int i;
3588 tree last_conflicts;
3589 struct subscript *subscript;
3590 tree res = NULL_TREE;
3591
3592 for (i = 0; DDR_SUBSCRIPTS (ddr).iterate (i, &subscript); i++)
3593 {
3594 conflict_function *overlaps_a, *overlaps_b;
3595
3596 analyze_overlapping_iterations (DR_ACCESS_FN (dra, i),
3597 DR_ACCESS_FN (drb, i),
3598 &overlaps_a, &overlaps_b,
3599 &last_conflicts, loop_nest);
3600
3601 if (SUB_CONFLICTS_IN_A (subscript))
3602 free_conflict_function (SUB_CONFLICTS_IN_A (subscript));
3603 if (SUB_CONFLICTS_IN_B (subscript))
3604 free_conflict_function (SUB_CONFLICTS_IN_B (subscript));
3605
3606 SUB_CONFLICTS_IN_A (subscript) = overlaps_a;
3607 SUB_CONFLICTS_IN_B (subscript) = overlaps_b;
3608 SUB_LAST_CONFLICT (subscript) = last_conflicts;
3609
3610 /* If there is any undetermined conflict function we have to
3611 give a conservative answer in case we cannot prove that
3612 no dependence exists when analyzing another subscript. */
3613 if (CF_NOT_KNOWN_P (overlaps_a)
3614 || CF_NOT_KNOWN_P (overlaps_b))
3615 {
3616 res = chrec_dont_know;
3617 continue;
3618 }
3619
3620 /* When there is a subscript with no dependence we can stop. */
3621 else if (CF_NO_DEPENDENCE_P (overlaps_a)
3622 || CF_NO_DEPENDENCE_P (overlaps_b))
3623 {
3624 res = chrec_known;
3625 break;
3626 }
3627 }
3628
3629 if (res == NULL_TREE)
3630 return true;
3631
3632 if (res == chrec_known)
3633 dependence_stats.num_dependence_independent++;
3634 else
3635 dependence_stats.num_dependence_undetermined++;
3636 finalize_ddr_dependent (ddr, res);
3637 return false;
3638 }
3639
3640 /* Computes the conflicting iterations in LOOP_NEST, and initialize DDR. */
3641
3642 static void
3643 subscript_dependence_tester (struct data_dependence_relation *ddr,
3644 struct loop *loop_nest)
3645 {
3646 if (subscript_dependence_tester_1 (ddr, DDR_A (ddr), DDR_B (ddr), loop_nest))
3647 dependence_stats.num_dependence_dependent++;
3648
3649 compute_subscript_distance (ddr);
3650 if (build_classic_dist_vector (ddr, loop_nest))
3651 build_classic_dir_vector (ddr);
3652 }
3653
3654 /* Returns true when all the access functions of A are affine or
3655 constant with respect to LOOP_NEST. */
3656
3657 static bool
3658 access_functions_are_affine_or_constant_p (const struct data_reference *a,
3659 const struct loop *loop_nest)
3660 {
3661 unsigned int i;
3662 vec<tree> fns = DR_ACCESS_FNS (a);
3663 tree t;
3664
3665 FOR_EACH_VEC_ELT (fns, i, t)
3666 if (!evolution_function_is_invariant_p (t, loop_nest->num)
3667 && !evolution_function_is_affine_multivariate_p (t, loop_nest->num))
3668 return false;
3669
3670 return true;
3671 }
3672
3673 /* This computes the affine dependence relation between A and B with
3674 respect to LOOP_NEST. CHREC_KNOWN is used for representing the
3675 independence between two accesses, while CHREC_DONT_KNOW is used
3676 for representing the unknown relation.
3677
3678 Note that it is possible to stop the computation of the dependence
3679 relation the first time we detect a CHREC_KNOWN element for a given
3680 subscript. */
3681
3682 void
3683 compute_affine_dependence (struct data_dependence_relation *ddr,
3684 struct loop *loop_nest)
3685 {
3686 struct data_reference *dra = DDR_A (ddr);
3687 struct data_reference *drb = DDR_B (ddr);
3688
3689 if (dump_file && (dump_flags & TDF_DETAILS))
3690 {
3691 fprintf (dump_file, "(compute_affine_dependence\n");
3692 fprintf (dump_file, " stmt_a: ");
3693 print_gimple_stmt (dump_file, DR_STMT (dra), 0, TDF_SLIM);
3694 fprintf (dump_file, " stmt_b: ");
3695 print_gimple_stmt (dump_file, DR_STMT (drb), 0, TDF_SLIM);
3696 }
3697
3698 /* Analyze only when the dependence relation is not yet known. */
3699 if (DDR_ARE_DEPENDENT (ddr) == NULL_TREE)
3700 {
3701 dependence_stats.num_dependence_tests++;
3702
3703 if (access_functions_are_affine_or_constant_p (dra, loop_nest)
3704 && access_functions_are_affine_or_constant_p (drb, loop_nest))
3705 subscript_dependence_tester (ddr, loop_nest);
3706
3707 /* As a last case, if the dependence cannot be determined, or if
3708 the dependence is considered too difficult to determine, answer
3709 "don't know". */
3710 else
3711 {
3712 dependence_stats.num_dependence_undetermined++;
3713
3714 if (dump_file && (dump_flags & TDF_DETAILS))
3715 {
3716 fprintf (dump_file, "Data ref a:\n");
3717 dump_data_reference (dump_file, dra);
3718 fprintf (dump_file, "Data ref b:\n");
3719 dump_data_reference (dump_file, drb);
3720 fprintf (dump_file, "affine dependence test not usable: access function not affine or constant.\n");
3721 }
3722 finalize_ddr_dependent (ddr, chrec_dont_know);
3723 }
3724 }
3725
3726 if (dump_file && (dump_flags & TDF_DETAILS))
3727 {
3728 if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
3729 fprintf (dump_file, ") -> no dependence\n");
3730 else if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
3731 fprintf (dump_file, ") -> dependence analysis failed\n");
3732 else
3733 fprintf (dump_file, ")\n");
3734 }
3735 }
3736
3737 /* Compute in DEPENDENCE_RELATIONS the data dependence graph for all
3738 the data references in DATAREFS, in the LOOP_NEST. When
3739 COMPUTE_SELF_AND_RR is FALSE, don't compute read-read and self
3740 relations. Return true when successful, i.e. data references number
3741 is small enough to be handled. */
3742
3743 bool
3744 compute_all_dependences (vec<data_reference_p> datarefs,
3745 vec<ddr_p> *dependence_relations,
3746 vec<loop_p> loop_nest,
3747 bool compute_self_and_rr)
3748 {
3749 struct data_dependence_relation *ddr;
3750 struct data_reference *a, *b;
3751 unsigned int i, j;
3752
3753 if ((int) datarefs.length ()
3754 > PARAM_VALUE (PARAM_LOOP_MAX_DATAREFS_FOR_DATADEPS))
3755 {
3756 struct data_dependence_relation *ddr;
3757
3758 /* Insert a single relation into dependence_relations:
3759 chrec_dont_know. */
3760 ddr = initialize_data_dependence_relation (NULL, NULL, loop_nest);
3761 dependence_relations->safe_push (ddr);
3762 return false;
3763 }
3764
3765 FOR_EACH_VEC_ELT (datarefs, i, a)
3766 for (j = i + 1; datarefs.iterate (j, &b); j++)
3767 if (DR_IS_WRITE (a) || DR_IS_WRITE (b) || compute_self_and_rr)
3768 {
3769 ddr = initialize_data_dependence_relation (a, b, loop_nest);
3770 dependence_relations->safe_push (ddr);
3771 if (loop_nest.exists ())
3772 compute_affine_dependence (ddr, loop_nest[0]);
3773 }
3774
3775 if (compute_self_and_rr)
3776 FOR_EACH_VEC_ELT (datarefs, i, a)
3777 {
3778 ddr = initialize_data_dependence_relation (a, a, loop_nest);
3779 dependence_relations->safe_push (ddr);
3780 if (loop_nest.exists ())
3781 compute_affine_dependence (ddr, loop_nest[0]);
3782 }
3783
3784 return true;
3785 }
3786
3787 /* Describes a location of a memory reference. */
3788
3789 struct data_ref_loc
3790 {
3791 /* The memory reference. */
3792 tree ref;
3793
3794 /* True if the memory reference is read. */
3795 bool is_read;
3796 };
3797
3798
3799 /* Stores the locations of memory references in STMT to REFERENCES. Returns
3800 true if STMT clobbers memory, false otherwise. */
3801
3802 static bool
3803 get_references_in_stmt (gimple *stmt, vec<data_ref_loc, va_heap> *references)
3804 {
3805 bool clobbers_memory = false;
3806 data_ref_loc ref;
3807 tree op0, op1;
3808 enum gimple_code stmt_code = gimple_code (stmt);
3809
3810 /* ASM_EXPR and CALL_EXPR may embed arbitrary side effects.
3811 As we cannot model data-references to not spelled out
3812 accesses give up if they may occur. */
3813 if (stmt_code == GIMPLE_CALL
3814 && !(gimple_call_flags (stmt) & ECF_CONST))
3815 {
3816 /* Allow IFN_GOMP_SIMD_LANE in their own loops. */
3817 if (gimple_call_internal_p (stmt))
3818 switch (gimple_call_internal_fn (stmt))
3819 {
3820 case IFN_GOMP_SIMD_LANE:
3821 {
3822 struct loop *loop = gimple_bb (stmt)->loop_father;
3823 tree uid = gimple_call_arg (stmt, 0);
3824 gcc_assert (TREE_CODE (uid) == SSA_NAME);
3825 if (loop == NULL
3826 || loop->simduid != SSA_NAME_VAR (uid))
3827 clobbers_memory = true;
3828 break;
3829 }
3830 case IFN_MASK_LOAD:
3831 case IFN_MASK_STORE:
3832 break;
3833 default:
3834 clobbers_memory = true;
3835 break;
3836 }
3837 else
3838 clobbers_memory = true;
3839 }
3840 else if (stmt_code == GIMPLE_ASM
3841 && (gimple_asm_volatile_p (as_a <gasm *> (stmt))
3842 || gimple_vuse (stmt)))
3843 clobbers_memory = true;
3844
3845 if (!gimple_vuse (stmt))
3846 return clobbers_memory;
3847
3848 if (stmt_code == GIMPLE_ASSIGN)
3849 {
3850 tree base;
3851 op0 = gimple_assign_lhs (stmt);
3852 op1 = gimple_assign_rhs1 (stmt);
3853
3854 if (DECL_P (op1)
3855 || (REFERENCE_CLASS_P (op1)
3856 && (base = get_base_address (op1))
3857 && TREE_CODE (base) != SSA_NAME))
3858 {
3859 ref.ref = op1;
3860 ref.is_read = true;
3861 references->safe_push (ref);
3862 }
3863 }
3864 else if (stmt_code == GIMPLE_CALL)
3865 {
3866 unsigned i, n;
3867
3868 ref.is_read = false;
3869 if (gimple_call_internal_p (stmt))
3870 switch (gimple_call_internal_fn (stmt))
3871 {
3872 case IFN_MASK_LOAD:
3873 if (gimple_call_lhs (stmt) == NULL_TREE)
3874 break;
3875 ref.is_read = true;
3876 case IFN_MASK_STORE:
3877 ref.ref = fold_build2 (MEM_REF,
3878 ref.is_read
3879 ? TREE_TYPE (gimple_call_lhs (stmt))
3880 : TREE_TYPE (gimple_call_arg (stmt, 3)),
3881 gimple_call_arg (stmt, 0),
3882 gimple_call_arg (stmt, 1));
3883 references->safe_push (ref);
3884 return false;
3885 default:
3886 break;
3887 }
3888
3889 op0 = gimple_call_lhs (stmt);
3890 n = gimple_call_num_args (stmt);
3891 for (i = 0; i < n; i++)
3892 {
3893 op1 = gimple_call_arg (stmt, i);
3894
3895 if (DECL_P (op1)
3896 || (REFERENCE_CLASS_P (op1) && get_base_address (op1)))
3897 {
3898 ref.ref = op1;
3899 ref.is_read = true;
3900 references->safe_push (ref);
3901 }
3902 }
3903 }
3904 else
3905 return clobbers_memory;
3906
3907 if (op0
3908 && (DECL_P (op0)
3909 || (REFERENCE_CLASS_P (op0) && get_base_address (op0))))
3910 {
3911 ref.ref = op0;
3912 ref.is_read = false;
3913 references->safe_push (ref);
3914 }
3915 return clobbers_memory;
3916 }
3917
3918
3919 /* Returns true if the loop-nest has any data reference. */
3920
3921 bool
3922 loop_nest_has_data_refs (loop_p loop)
3923 {
3924 basic_block *bbs = get_loop_body (loop);
3925 vec<data_ref_loc> references;
3926 references.create (3);
3927
3928 for (unsigned i = 0; i < loop->num_nodes; i++)
3929 {
3930 basic_block bb = bbs[i];
3931 gimple_stmt_iterator bsi;
3932
3933 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
3934 {
3935 gimple *stmt = gsi_stmt (bsi);
3936 get_references_in_stmt (stmt, &references);
3937 if (references.length ())
3938 {
3939 free (bbs);
3940 references.release ();
3941 return true;
3942 }
3943 }
3944 }
3945 free (bbs);
3946 references.release ();
3947
3948 if (loop->inner)
3949 {
3950 loop = loop->inner;
3951 while (loop)
3952 {
3953 if (loop_nest_has_data_refs (loop))
3954 return true;
3955 loop = loop->next;
3956 }
3957 }
3958 return false;
3959 }
3960
3961 /* Stores the data references in STMT to DATAREFS. If there is an unanalyzable
3962 reference, returns false, otherwise returns true. NEST is the outermost
3963 loop of the loop nest in which the references should be analyzed. */
3964
3965 bool
3966 find_data_references_in_stmt (struct loop *nest, gimple *stmt,
3967 vec<data_reference_p> *datarefs)
3968 {
3969 unsigned i;
3970 auto_vec<data_ref_loc, 2> references;
3971 data_ref_loc *ref;
3972 bool ret = true;
3973 data_reference_p dr;
3974
3975 if (get_references_in_stmt (stmt, &references))
3976 return false;
3977
3978 FOR_EACH_VEC_ELT (references, i, ref)
3979 {
3980 dr = create_data_ref (nest, loop_containing_stmt (stmt),
3981 ref->ref, stmt, ref->is_read);
3982 gcc_assert (dr != NULL);
3983 datarefs->safe_push (dr);
3984 }
3985 references.release ();
3986 return ret;
3987 }
3988
3989 /* Stores the data references in STMT to DATAREFS. If there is an
3990 unanalyzable reference, returns false, otherwise returns true.
3991 NEST is the outermost loop of the loop nest in which the references
3992 should be instantiated, LOOP is the loop in which the references
3993 should be analyzed. */
3994
3995 bool
3996 graphite_find_data_references_in_stmt (loop_p nest, loop_p loop, gimple *stmt,
3997 vec<data_reference_p> *datarefs)
3998 {
3999 unsigned i;
4000 auto_vec<data_ref_loc, 2> references;
4001 data_ref_loc *ref;
4002 bool ret = true;
4003 data_reference_p dr;
4004
4005 if (get_references_in_stmt (stmt, &references))
4006 return false;
4007
4008 FOR_EACH_VEC_ELT (references, i, ref)
4009 {
4010 dr = create_data_ref (nest, loop, ref->ref, stmt, ref->is_read);
4011 gcc_assert (dr != NULL);
4012 datarefs->safe_push (dr);
4013 }
4014
4015 references.release ();
4016 return ret;
4017 }
4018
4019 /* Search the data references in LOOP, and record the information into
4020 DATAREFS. Returns chrec_dont_know when failing to analyze a
4021 difficult case, returns NULL_TREE otherwise. */
4022
4023 tree
4024 find_data_references_in_bb (struct loop *loop, basic_block bb,
4025 vec<data_reference_p> *datarefs)
4026 {
4027 gimple_stmt_iterator bsi;
4028
4029 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
4030 {
4031 gimple *stmt = gsi_stmt (bsi);
4032
4033 if (!find_data_references_in_stmt (loop, stmt, datarefs))
4034 {
4035 struct data_reference *res;
4036 res = XCNEW (struct data_reference);
4037 datarefs->safe_push (res);
4038
4039 return chrec_dont_know;
4040 }
4041 }
4042
4043 return NULL_TREE;
4044 }
4045
4046 /* Search the data references in LOOP, and record the information into
4047 DATAREFS. Returns chrec_dont_know when failing to analyze a
4048 difficult case, returns NULL_TREE otherwise.
4049
4050 TODO: This function should be made smarter so that it can handle address
4051 arithmetic as if they were array accesses, etc. */
4052
4053 tree
4054 find_data_references_in_loop (struct loop *loop,
4055 vec<data_reference_p> *datarefs)
4056 {
4057 basic_block bb, *bbs;
4058 unsigned int i;
4059
4060 bbs = get_loop_body_in_dom_order (loop);
4061
4062 for (i = 0; i < loop->num_nodes; i++)
4063 {
4064 bb = bbs[i];
4065
4066 if (find_data_references_in_bb (loop, bb, datarefs) == chrec_dont_know)
4067 {
4068 free (bbs);
4069 return chrec_dont_know;
4070 }
4071 }
4072 free (bbs);
4073
4074 return NULL_TREE;
4075 }
4076
4077 /* Recursive helper function. */
4078
4079 static bool
4080 find_loop_nest_1 (struct loop *loop, vec<loop_p> *loop_nest)
4081 {
4082 /* Inner loops of the nest should not contain siblings. Example:
4083 when there are two consecutive loops,
4084
4085 | loop_0
4086 | loop_1
4087 | A[{0, +, 1}_1]
4088 | endloop_1
4089 | loop_2
4090 | A[{0, +, 1}_2]
4091 | endloop_2
4092 | endloop_0
4093
4094 the dependence relation cannot be captured by the distance
4095 abstraction. */
4096 if (loop->next)
4097 return false;
4098
4099 loop_nest->safe_push (loop);
4100 if (loop->inner)
4101 return find_loop_nest_1 (loop->inner, loop_nest);
4102 return true;
4103 }
4104
4105 /* Return false when the LOOP is not well nested. Otherwise return
4106 true and insert in LOOP_NEST the loops of the nest. LOOP_NEST will
4107 contain the loops from the outermost to the innermost, as they will
4108 appear in the classic distance vector. */
4109
4110 bool
4111 find_loop_nest (struct loop *loop, vec<loop_p> *loop_nest)
4112 {
4113 loop_nest->safe_push (loop);
4114 if (loop->inner)
4115 return find_loop_nest_1 (loop->inner, loop_nest);
4116 return true;
4117 }
4118
4119 /* Returns true when the data dependences have been computed, false otherwise.
4120 Given a loop nest LOOP, the following vectors are returned:
4121 DATAREFS is initialized to all the array elements contained in this loop,
4122 DEPENDENCE_RELATIONS contains the relations between the data references.
4123 Compute read-read and self relations if
4124 COMPUTE_SELF_AND_READ_READ_DEPENDENCES is TRUE. */
4125
4126 bool
4127 compute_data_dependences_for_loop (struct loop *loop,
4128 bool compute_self_and_read_read_dependences,
4129 vec<loop_p> *loop_nest,
4130 vec<data_reference_p> *datarefs,
4131 vec<ddr_p> *dependence_relations)
4132 {
4133 bool res = true;
4134
4135 memset (&dependence_stats, 0, sizeof (dependence_stats));
4136
4137 /* If the loop nest is not well formed, or one of the data references
4138 is not computable, give up without spending time to compute other
4139 dependences. */
4140 if (!loop
4141 || !find_loop_nest (loop, loop_nest)
4142 || find_data_references_in_loop (loop, datarefs) == chrec_dont_know
4143 || !compute_all_dependences (*datarefs, dependence_relations, *loop_nest,
4144 compute_self_and_read_read_dependences))
4145 res = false;
4146
4147 if (dump_file && (dump_flags & TDF_STATS))
4148 {
4149 fprintf (dump_file, "Dependence tester statistics:\n");
4150
4151 fprintf (dump_file, "Number of dependence tests: %d\n",
4152 dependence_stats.num_dependence_tests);
4153 fprintf (dump_file, "Number of dependence tests classified dependent: %d\n",
4154 dependence_stats.num_dependence_dependent);
4155 fprintf (dump_file, "Number of dependence tests classified independent: %d\n",
4156 dependence_stats.num_dependence_independent);
4157 fprintf (dump_file, "Number of undetermined dependence tests: %d\n",
4158 dependence_stats.num_dependence_undetermined);
4159
4160 fprintf (dump_file, "Number of subscript tests: %d\n",
4161 dependence_stats.num_subscript_tests);
4162 fprintf (dump_file, "Number of undetermined subscript tests: %d\n",
4163 dependence_stats.num_subscript_undetermined);
4164 fprintf (dump_file, "Number of same subscript function: %d\n",
4165 dependence_stats.num_same_subscript_function);
4166
4167 fprintf (dump_file, "Number of ziv tests: %d\n",
4168 dependence_stats.num_ziv);
4169 fprintf (dump_file, "Number of ziv tests returning dependent: %d\n",
4170 dependence_stats.num_ziv_dependent);
4171 fprintf (dump_file, "Number of ziv tests returning independent: %d\n",
4172 dependence_stats.num_ziv_independent);
4173 fprintf (dump_file, "Number of ziv tests unimplemented: %d\n",
4174 dependence_stats.num_ziv_unimplemented);
4175
4176 fprintf (dump_file, "Number of siv tests: %d\n",
4177 dependence_stats.num_siv);
4178 fprintf (dump_file, "Number of siv tests returning dependent: %d\n",
4179 dependence_stats.num_siv_dependent);
4180 fprintf (dump_file, "Number of siv tests returning independent: %d\n",
4181 dependence_stats.num_siv_independent);
4182 fprintf (dump_file, "Number of siv tests unimplemented: %d\n",
4183 dependence_stats.num_siv_unimplemented);
4184
4185 fprintf (dump_file, "Number of miv tests: %d\n",
4186 dependence_stats.num_miv);
4187 fprintf (dump_file, "Number of miv tests returning dependent: %d\n",
4188 dependence_stats.num_miv_dependent);
4189 fprintf (dump_file, "Number of miv tests returning independent: %d\n",
4190 dependence_stats.num_miv_independent);
4191 fprintf (dump_file, "Number of miv tests unimplemented: %d\n",
4192 dependence_stats.num_miv_unimplemented);
4193 }
4194
4195 return res;
4196 }
4197
4198 /* Free the memory used by a data dependence relation DDR. */
4199
4200 void
4201 free_dependence_relation (struct data_dependence_relation *ddr)
4202 {
4203 if (ddr == NULL)
4204 return;
4205
4206 if (DDR_SUBSCRIPTS (ddr).exists ())
4207 free_subscripts (DDR_SUBSCRIPTS (ddr));
4208 DDR_DIST_VECTS (ddr).release ();
4209 DDR_DIR_VECTS (ddr).release ();
4210
4211 free (ddr);
4212 }
4213
4214 /* Free the memory used by the data dependence relations from
4215 DEPENDENCE_RELATIONS. */
4216
4217 void
4218 free_dependence_relations (vec<ddr_p> dependence_relations)
4219 {
4220 unsigned int i;
4221 struct data_dependence_relation *ddr;
4222
4223 FOR_EACH_VEC_ELT (dependence_relations, i, ddr)
4224 if (ddr)
4225 free_dependence_relation (ddr);
4226
4227 dependence_relations.release ();
4228 }
4229
4230 /* Free the memory used by the data references from DATAREFS. */
4231
4232 void
4233 free_data_refs (vec<data_reference_p> datarefs)
4234 {
4235 unsigned int i;
4236 struct data_reference *dr;
4237
4238 FOR_EACH_VEC_ELT (datarefs, i, dr)
4239 free_data_ref (dr);
4240 datarefs.release ();
4241 }