re PR tree-optimization/36504 (ICE when building xorg-server with -O3 -fprefetch...
[gcc.git] / gcc / tree-ssa-loop-prefetch.c
1 /* Array prefetching.
2 Copyright (C) 2005, 2007, 2008 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "tree.h"
25 #include "rtl.h"
26 #include "tm_p.h"
27 #include "hard-reg-set.h"
28 #include "basic-block.h"
29 #include "output.h"
30 #include "diagnostic.h"
31 #include "tree-flow.h"
32 #include "tree-dump.h"
33 #include "timevar.h"
34 #include "cfgloop.h"
35 #include "varray.h"
36 #include "expr.h"
37 #include "tree-pass.h"
38 #include "ggc.h"
39 #include "insn-config.h"
40 #include "recog.h"
41 #include "hashtab.h"
42 #include "tree-chrec.h"
43 #include "tree-scalar-evolution.h"
44 #include "toplev.h"
45 #include "params.h"
46 #include "langhooks.h"
47 #include "tree-inline.h"
48 #include "tree-data-ref.h"
49 #include "optabs.h"
50
51 /* This pass inserts prefetch instructions to optimize cache usage during
52 accesses to arrays in loops. It processes loops sequentially and:
53
54 1) Gathers all memory references in the single loop.
55 2) For each of the references it decides when it is profitable to prefetch
56 it. To do it, we evaluate the reuse among the accesses, and determines
57 two values: PREFETCH_BEFORE (meaning that it only makes sense to do
58 prefetching in the first PREFETCH_BEFORE iterations of the loop) and
59 PREFETCH_MOD (meaning that it only makes sense to prefetch in the
60 iterations of the loop that are zero modulo PREFETCH_MOD). For example
61 (assuming cache line size is 64 bytes, char has size 1 byte and there
62 is no hardware sequential prefetch):
63
64 char *a;
65 for (i = 0; i < max; i++)
66 {
67 a[255] = ...; (0)
68 a[i] = ...; (1)
69 a[i + 64] = ...; (2)
70 a[16*i] = ...; (3)
71 a[187*i] = ...; (4)
72 a[187*i + 50] = ...; (5)
73 }
74
75 (0) obviously has PREFETCH_BEFORE 1
76 (1) has PREFETCH_BEFORE 64, since (2) accesses the same memory
77 location 64 iterations before it, and PREFETCH_MOD 64 (since
78 it hits the same cache line otherwise).
79 (2) has PREFETCH_MOD 64
80 (3) has PREFETCH_MOD 4
81 (4) has PREFETCH_MOD 1. We do not set PREFETCH_BEFORE here, since
82 the cache line accessed by (4) is the same with probability only
83 7/32.
84 (5) has PREFETCH_MOD 1 as well.
85
86 Additionally, we use data dependence analysis to determine for each
87 reference the distance till the first reuse; this information is used
88 to determine the temporality of the issued prefetch instruction.
89
90 3) We determine how much ahead we need to prefetch. The number of
91 iterations needed is time to fetch / time spent in one iteration of
92 the loop. The problem is that we do not know either of these values,
93 so we just make a heuristic guess based on a magic (possibly)
94 target-specific constant and size of the loop.
95
96 4) Determine which of the references we prefetch. We take into account
97 that there is a maximum number of simultaneous prefetches (provided
98 by machine description). We prefetch as many prefetches as possible
99 while still within this bound (starting with those with lowest
100 prefetch_mod, since they are responsible for most of the cache
101 misses).
102
103 5) We unroll and peel loops so that we are able to satisfy PREFETCH_MOD
104 and PREFETCH_BEFORE requirements (within some bounds), and to avoid
105 prefetching nonaccessed memory.
106 TODO -- actually implement peeling.
107
108 6) We actually emit the prefetch instructions. ??? Perhaps emit the
109 prefetch instructions with guards in cases where 5) was not sufficient
110 to satisfy the constraints?
111
112 Some other TODO:
113 -- write and use more general reuse analysis (that could be also used
114 in other cache aimed loop optimizations)
115 -- make it behave sanely together with the prefetches given by user
116 (now we just ignore them; at the very least we should avoid
117 optimizing loops in that user put his own prefetches)
118 -- we assume cache line size alignment of arrays; this could be
119 improved. */
120
121 /* Magic constants follow. These should be replaced by machine specific
122 numbers. */
123
124 /* True if write can be prefetched by a read prefetch. */
125
126 #ifndef WRITE_CAN_USE_READ_PREFETCH
127 #define WRITE_CAN_USE_READ_PREFETCH 1
128 #endif
129
130 /* True if read can be prefetched by a write prefetch. */
131
132 #ifndef READ_CAN_USE_WRITE_PREFETCH
133 #define READ_CAN_USE_WRITE_PREFETCH 0
134 #endif
135
136 /* The size of the block loaded by a single prefetch. Usually, this is
137 the same as cache line size (at the moment, we only consider one level
138 of cache hierarchy). */
139
140 #ifndef PREFETCH_BLOCK
141 #define PREFETCH_BLOCK L1_CACHE_LINE_SIZE
142 #endif
143
144 /* Do we have a forward hardware sequential prefetching? */
145
146 #ifndef HAVE_FORWARD_PREFETCH
147 #define HAVE_FORWARD_PREFETCH 0
148 #endif
149
150 /* Do we have a backward hardware sequential prefetching? */
151
152 #ifndef HAVE_BACKWARD_PREFETCH
153 #define HAVE_BACKWARD_PREFETCH 0
154 #endif
155
156 /* In some cases we are only able to determine that there is a certain
157 probability that the two accesses hit the same cache line. In this
158 case, we issue the prefetches for both of them if this probability
159 is less then (1000 - ACCEPTABLE_MISS_RATE) per thousand. */
160
161 #ifndef ACCEPTABLE_MISS_RATE
162 #define ACCEPTABLE_MISS_RATE 50
163 #endif
164
165 #ifndef HAVE_prefetch
166 #define HAVE_prefetch 0
167 #endif
168
169 #define L1_CACHE_SIZE_BYTES ((unsigned) (L1_CACHE_SIZE * 1024))
170 #define L2_CACHE_SIZE_BYTES ((unsigned) (L2_CACHE_SIZE * 1024))
171
172 /* We consider a memory access nontemporal if it is not reused sooner than
173 after L2_CACHE_SIZE_BYTES of memory are accessed. However, we ignore
174 accesses closer than L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
175 so that we use nontemporal prefetches e.g. if single memory location
176 is accessed several times in a single iteration of the loop. */
177 #define NONTEMPORAL_FRACTION 16
178
179 /* In case we have to emit a memory fence instruction after the loop that
180 uses nontemporal stores, this defines the builtin to use. */
181
182 #ifndef FENCE_FOLLOWING_MOVNT
183 #define FENCE_FOLLOWING_MOVNT NULL_TREE
184 #endif
185
186 /* The group of references between that reuse may occur. */
187
188 struct mem_ref_group
189 {
190 tree base; /* Base of the reference. */
191 HOST_WIDE_INT step; /* Step of the reference. */
192 struct mem_ref *refs; /* References in the group. */
193 struct mem_ref_group *next; /* Next group of references. */
194 };
195
196 /* Assigned to PREFETCH_BEFORE when all iterations are to be prefetched. */
197
198 #define PREFETCH_ALL (~(unsigned HOST_WIDE_INT) 0)
199
200 /* The memory reference. */
201
202 struct mem_ref
203 {
204 tree stmt; /* Statement in that the reference appears. */
205 tree mem; /* The reference. */
206 HOST_WIDE_INT delta; /* Constant offset of the reference. */
207 struct mem_ref_group *group; /* The group of references it belongs to. */
208 unsigned HOST_WIDE_INT prefetch_mod;
209 /* Prefetch only each PREFETCH_MOD-th
210 iteration. */
211 unsigned HOST_WIDE_INT prefetch_before;
212 /* Prefetch only first PREFETCH_BEFORE
213 iterations. */
214 unsigned reuse_distance; /* The amount of data accessed before the first
215 reuse of this value. */
216 struct mem_ref *next; /* The next reference in the group. */
217 unsigned write_p : 1; /* Is it a write? */
218 unsigned independent_p : 1; /* True if the reference is independent on
219 all other references inside the loop. */
220 unsigned issue_prefetch_p : 1; /* Should we really issue the prefetch? */
221 unsigned storent_p : 1; /* True if we changed the store to a
222 nontemporal one. */
223 };
224
225 /* Dumps information about reference REF to FILE. */
226
227 static void
228 dump_mem_ref (FILE *file, struct mem_ref *ref)
229 {
230 fprintf (file, "Reference %p:\n", (void *) ref);
231
232 fprintf (file, " group %p (base ", (void *) ref->group);
233 print_generic_expr (file, ref->group->base, TDF_SLIM);
234 fprintf (file, ", step ");
235 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ref->group->step);
236 fprintf (file, ")\n");
237
238 fprintf (file, " delta ");
239 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ref->delta);
240 fprintf (file, "\n");
241
242 fprintf (file, " %s\n", ref->write_p ? "write" : "read");
243
244 fprintf (file, "\n");
245 }
246
247 /* Finds a group with BASE and STEP in GROUPS, or creates one if it does not
248 exist. */
249
250 static struct mem_ref_group *
251 find_or_create_group (struct mem_ref_group **groups, tree base,
252 HOST_WIDE_INT step)
253 {
254 struct mem_ref_group *group;
255
256 for (; *groups; groups = &(*groups)->next)
257 {
258 if ((*groups)->step == step
259 && operand_equal_p ((*groups)->base, base, 0))
260 return *groups;
261
262 /* Keep the list of groups sorted by decreasing step. */
263 if ((*groups)->step < step)
264 break;
265 }
266
267 group = XNEW (struct mem_ref_group);
268 group->base = base;
269 group->step = step;
270 group->refs = NULL;
271 group->next = *groups;
272 *groups = group;
273
274 return group;
275 }
276
277 /* Records a memory reference MEM in GROUP with offset DELTA and write status
278 WRITE_P. The reference occurs in statement STMT. */
279
280 static void
281 record_ref (struct mem_ref_group *group, tree stmt, tree mem,
282 HOST_WIDE_INT delta, bool write_p)
283 {
284 struct mem_ref **aref;
285
286 /* Do not record the same address twice. */
287 for (aref = &group->refs; *aref; aref = &(*aref)->next)
288 {
289 /* It does not have to be possible for write reference to reuse the read
290 prefetch, or vice versa. */
291 if (!WRITE_CAN_USE_READ_PREFETCH
292 && write_p
293 && !(*aref)->write_p)
294 continue;
295 if (!READ_CAN_USE_WRITE_PREFETCH
296 && !write_p
297 && (*aref)->write_p)
298 continue;
299
300 if ((*aref)->delta == delta)
301 return;
302 }
303
304 (*aref) = XNEW (struct mem_ref);
305 (*aref)->stmt = stmt;
306 (*aref)->mem = mem;
307 (*aref)->delta = delta;
308 (*aref)->write_p = write_p;
309 (*aref)->prefetch_before = PREFETCH_ALL;
310 (*aref)->prefetch_mod = 1;
311 (*aref)->reuse_distance = 0;
312 (*aref)->issue_prefetch_p = false;
313 (*aref)->group = group;
314 (*aref)->next = NULL;
315 (*aref)->independent_p = false;
316 (*aref)->storent_p = false;
317
318 if (dump_file && (dump_flags & TDF_DETAILS))
319 dump_mem_ref (dump_file, *aref);
320 }
321
322 /* Release memory references in GROUPS. */
323
324 static void
325 release_mem_refs (struct mem_ref_group *groups)
326 {
327 struct mem_ref_group *next_g;
328 struct mem_ref *ref, *next_r;
329
330 for (; groups; groups = next_g)
331 {
332 next_g = groups->next;
333 for (ref = groups->refs; ref; ref = next_r)
334 {
335 next_r = ref->next;
336 free (ref);
337 }
338 free (groups);
339 }
340 }
341
342 /* A structure used to pass arguments to idx_analyze_ref. */
343
344 struct ar_data
345 {
346 struct loop *loop; /* Loop of the reference. */
347 tree stmt; /* Statement of the reference. */
348 HOST_WIDE_INT *step; /* Step of the memory reference. */
349 HOST_WIDE_INT *delta; /* Offset of the memory reference. */
350 };
351
352 /* Analyzes a single INDEX of a memory reference to obtain information
353 described at analyze_ref. Callback for for_each_index. */
354
355 static bool
356 idx_analyze_ref (tree base, tree *index, void *data)
357 {
358 struct ar_data *ar_data = (struct ar_data *) data;
359 tree ibase, step, stepsize;
360 HOST_WIDE_INT istep, idelta = 0, imult = 1;
361 affine_iv iv;
362
363 if (TREE_CODE (base) == MISALIGNED_INDIRECT_REF
364 || TREE_CODE (base) == ALIGN_INDIRECT_REF)
365 return false;
366
367 if (!simple_iv (ar_data->loop, ar_data->stmt, *index, &iv, false))
368 return false;
369 ibase = iv.base;
370 step = iv.step;
371
372 if (!cst_and_fits_in_hwi (step))
373 return false;
374 istep = int_cst_value (step);
375
376 if (TREE_CODE (ibase) == POINTER_PLUS_EXPR
377 && cst_and_fits_in_hwi (TREE_OPERAND (ibase, 1)))
378 {
379 idelta = int_cst_value (TREE_OPERAND (ibase, 1));
380 ibase = TREE_OPERAND (ibase, 0);
381 }
382 if (cst_and_fits_in_hwi (ibase))
383 {
384 idelta += int_cst_value (ibase);
385 ibase = build_int_cst (TREE_TYPE (ibase), 0);
386 }
387
388 if (TREE_CODE (base) == ARRAY_REF)
389 {
390 stepsize = array_ref_element_size (base);
391 if (!cst_and_fits_in_hwi (stepsize))
392 return false;
393 imult = int_cst_value (stepsize);
394
395 istep *= imult;
396 idelta *= imult;
397 }
398
399 *ar_data->step += istep;
400 *ar_data->delta += idelta;
401 *index = ibase;
402
403 return true;
404 }
405
406 /* Tries to express REF_P in shape &BASE + STEP * iter + DELTA, where DELTA and
407 STEP are integer constants and iter is number of iterations of LOOP. The
408 reference occurs in statement STMT. Strips nonaddressable component
409 references from REF_P. */
410
411 static bool
412 analyze_ref (struct loop *loop, tree *ref_p, tree *base,
413 HOST_WIDE_INT *step, HOST_WIDE_INT *delta,
414 tree stmt)
415 {
416 struct ar_data ar_data;
417 tree off;
418 HOST_WIDE_INT bit_offset;
419 tree ref = *ref_p;
420
421 *step = 0;
422 *delta = 0;
423
424 /* First strip off the component references. Ignore bitfields. */
425 if (TREE_CODE (ref) == COMPONENT_REF
426 && DECL_NONADDRESSABLE_P (TREE_OPERAND (ref, 1)))
427 ref = TREE_OPERAND (ref, 0);
428
429 *ref_p = ref;
430
431 for (; TREE_CODE (ref) == COMPONENT_REF; ref = TREE_OPERAND (ref, 0))
432 {
433 off = DECL_FIELD_BIT_OFFSET (TREE_OPERAND (ref, 1));
434 bit_offset = TREE_INT_CST_LOW (off);
435 gcc_assert (bit_offset % BITS_PER_UNIT == 0);
436
437 *delta += bit_offset / BITS_PER_UNIT;
438 }
439
440 *base = unshare_expr (ref);
441 ar_data.loop = loop;
442 ar_data.stmt = stmt;
443 ar_data.step = step;
444 ar_data.delta = delta;
445 return for_each_index (base, idx_analyze_ref, &ar_data);
446 }
447
448 /* Record a memory reference REF to the list REFS. The reference occurs in
449 LOOP in statement STMT and it is write if WRITE_P. Returns true if the
450 reference was recorded, false otherwise. */
451
452 static bool
453 gather_memory_references_ref (struct loop *loop, struct mem_ref_group **refs,
454 tree ref, bool write_p, tree stmt)
455 {
456 tree base;
457 HOST_WIDE_INT step, delta;
458 struct mem_ref_group *agrp;
459
460 if (get_base_address (ref) == NULL)
461 return false;
462
463 if (!analyze_ref (loop, &ref, &base, &step, &delta, stmt))
464 return false;
465
466 /* Now we know that REF = &BASE + STEP * iter + DELTA, where DELTA and STEP
467 are integer constants. */
468 agrp = find_or_create_group (refs, base, step);
469 record_ref (agrp, stmt, ref, delta, write_p);
470
471 return true;
472 }
473
474 /* Record the suitable memory references in LOOP. NO_OTHER_REFS is set to
475 true if there are no other memory references inside the loop. */
476
477 static struct mem_ref_group *
478 gather_memory_references (struct loop *loop, bool *no_other_refs)
479 {
480 basic_block *body = get_loop_body_in_dom_order (loop);
481 basic_block bb;
482 unsigned i;
483 block_stmt_iterator bsi;
484 tree stmt, lhs, rhs, call;
485 struct mem_ref_group *refs = NULL;
486
487 *no_other_refs = true;
488
489 /* Scan the loop body in order, so that the former references precede the
490 later ones. */
491 for (i = 0; i < loop->num_nodes; i++)
492 {
493 bb = body[i];
494 if (bb->loop_father != loop)
495 continue;
496
497 for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi))
498 {
499 stmt = bsi_stmt (bsi);
500 call = get_call_expr_in (stmt);
501 if (call && !(call_expr_flags (call) & ECF_CONST))
502 *no_other_refs = false;
503
504 if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
505 {
506 if (!ZERO_SSA_OPERANDS (stmt, SSA_OP_ALL_VIRTUALS))
507 *no_other_refs = false;
508 continue;
509 }
510
511 lhs = GIMPLE_STMT_OPERAND (stmt, 0);
512 rhs = GIMPLE_STMT_OPERAND (stmt, 1);
513
514 if (REFERENCE_CLASS_P (rhs))
515 *no_other_refs &= gather_memory_references_ref (loop, &refs,
516 rhs, false, stmt);
517 if (REFERENCE_CLASS_P (lhs))
518 *no_other_refs &= gather_memory_references_ref (loop, &refs,
519 lhs, true, stmt);
520 }
521 }
522 free (body);
523
524 return refs;
525 }
526
527 /* Prune the prefetch candidate REF using the self-reuse. */
528
529 static void
530 prune_ref_by_self_reuse (struct mem_ref *ref)
531 {
532 HOST_WIDE_INT step = ref->group->step;
533 bool backward = step < 0;
534
535 if (step == 0)
536 {
537 /* Prefetch references to invariant address just once. */
538 ref->prefetch_before = 1;
539 return;
540 }
541
542 if (backward)
543 step = -step;
544
545 if (step > PREFETCH_BLOCK)
546 return;
547
548 if ((backward && HAVE_BACKWARD_PREFETCH)
549 || (!backward && HAVE_FORWARD_PREFETCH))
550 {
551 ref->prefetch_before = 1;
552 return;
553 }
554
555 ref->prefetch_mod = PREFETCH_BLOCK / step;
556 }
557
558 /* Divides X by BY, rounding down. */
559
560 static HOST_WIDE_INT
561 ddown (HOST_WIDE_INT x, unsigned HOST_WIDE_INT by)
562 {
563 gcc_assert (by > 0);
564
565 if (x >= 0)
566 return x / by;
567 else
568 return (x + by - 1) / by;
569 }
570
571 /* Prune the prefetch candidate REF using the reuse with BY.
572 If BY_IS_BEFORE is true, BY is before REF in the loop. */
573
574 static void
575 prune_ref_by_group_reuse (struct mem_ref *ref, struct mem_ref *by,
576 bool by_is_before)
577 {
578 HOST_WIDE_INT step = ref->group->step;
579 bool backward = step < 0;
580 HOST_WIDE_INT delta_r = ref->delta, delta_b = by->delta;
581 HOST_WIDE_INT delta = delta_b - delta_r;
582 HOST_WIDE_INT hit_from;
583 unsigned HOST_WIDE_INT prefetch_before, prefetch_block;
584
585 if (delta == 0)
586 {
587 /* If the references has the same address, only prefetch the
588 former. */
589 if (by_is_before)
590 ref->prefetch_before = 0;
591
592 return;
593 }
594
595 if (!step)
596 {
597 /* If the reference addresses are invariant and fall into the
598 same cache line, prefetch just the first one. */
599 if (!by_is_before)
600 return;
601
602 if (ddown (ref->delta, PREFETCH_BLOCK)
603 != ddown (by->delta, PREFETCH_BLOCK))
604 return;
605
606 ref->prefetch_before = 0;
607 return;
608 }
609
610 /* Only prune the reference that is behind in the array. */
611 if (backward)
612 {
613 if (delta > 0)
614 return;
615
616 /* Transform the data so that we may assume that the accesses
617 are forward. */
618 delta = - delta;
619 step = -step;
620 delta_r = PREFETCH_BLOCK - 1 - delta_r;
621 delta_b = PREFETCH_BLOCK - 1 - delta_b;
622 }
623 else
624 {
625 if (delta < 0)
626 return;
627 }
628
629 /* Check whether the two references are likely to hit the same cache
630 line, and how distant the iterations in that it occurs are from
631 each other. */
632
633 if (step <= PREFETCH_BLOCK)
634 {
635 /* The accesses are sure to meet. Let us check when. */
636 hit_from = ddown (delta_b, PREFETCH_BLOCK) * PREFETCH_BLOCK;
637 prefetch_before = (hit_from - delta_r + step - 1) / step;
638
639 if (prefetch_before < ref->prefetch_before)
640 ref->prefetch_before = prefetch_before;
641
642 return;
643 }
644
645 /* A more complicated case. First let us ensure that size of cache line
646 and step are coprime (here we assume that PREFETCH_BLOCK is a power
647 of two. */
648 prefetch_block = PREFETCH_BLOCK;
649 while ((step & 1) == 0
650 && prefetch_block > 1)
651 {
652 step >>= 1;
653 prefetch_block >>= 1;
654 delta >>= 1;
655 }
656
657 /* Now step > prefetch_block, and step and prefetch_block are coprime.
658 Determine the probability that the accesses hit the same cache line. */
659
660 prefetch_before = delta / step;
661 delta %= step;
662 if ((unsigned HOST_WIDE_INT) delta
663 <= (prefetch_block * ACCEPTABLE_MISS_RATE / 1000))
664 {
665 if (prefetch_before < ref->prefetch_before)
666 ref->prefetch_before = prefetch_before;
667
668 return;
669 }
670
671 /* Try also the following iteration. */
672 prefetch_before++;
673 delta = step - delta;
674 if ((unsigned HOST_WIDE_INT) delta
675 <= (prefetch_block * ACCEPTABLE_MISS_RATE / 1000))
676 {
677 if (prefetch_before < ref->prefetch_before)
678 ref->prefetch_before = prefetch_before;
679
680 return;
681 }
682
683 /* The ref probably does not reuse by. */
684 return;
685 }
686
687 /* Prune the prefetch candidate REF using the reuses with other references
688 in REFS. */
689
690 static void
691 prune_ref_by_reuse (struct mem_ref *ref, struct mem_ref *refs)
692 {
693 struct mem_ref *prune_by;
694 bool before = true;
695
696 prune_ref_by_self_reuse (ref);
697
698 for (prune_by = refs; prune_by; prune_by = prune_by->next)
699 {
700 if (prune_by == ref)
701 {
702 before = false;
703 continue;
704 }
705
706 if (!WRITE_CAN_USE_READ_PREFETCH
707 && ref->write_p
708 && !prune_by->write_p)
709 continue;
710 if (!READ_CAN_USE_WRITE_PREFETCH
711 && !ref->write_p
712 && prune_by->write_p)
713 continue;
714
715 prune_ref_by_group_reuse (ref, prune_by, before);
716 }
717 }
718
719 /* Prune the prefetch candidates in GROUP using the reuse analysis. */
720
721 static void
722 prune_group_by_reuse (struct mem_ref_group *group)
723 {
724 struct mem_ref *ref_pruned;
725
726 for (ref_pruned = group->refs; ref_pruned; ref_pruned = ref_pruned->next)
727 {
728 prune_ref_by_reuse (ref_pruned, group->refs);
729
730 if (dump_file && (dump_flags & TDF_DETAILS))
731 {
732 fprintf (dump_file, "Reference %p:", (void *) ref_pruned);
733
734 if (ref_pruned->prefetch_before == PREFETCH_ALL
735 && ref_pruned->prefetch_mod == 1)
736 fprintf (dump_file, " no restrictions");
737 else if (ref_pruned->prefetch_before == 0)
738 fprintf (dump_file, " do not prefetch");
739 else if (ref_pruned->prefetch_before <= ref_pruned->prefetch_mod)
740 fprintf (dump_file, " prefetch once");
741 else
742 {
743 if (ref_pruned->prefetch_before != PREFETCH_ALL)
744 {
745 fprintf (dump_file, " prefetch before ");
746 fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC,
747 ref_pruned->prefetch_before);
748 }
749 if (ref_pruned->prefetch_mod != 1)
750 {
751 fprintf (dump_file, " prefetch mod ");
752 fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC,
753 ref_pruned->prefetch_mod);
754 }
755 }
756 fprintf (dump_file, "\n");
757 }
758 }
759 }
760
761 /* Prune the list of prefetch candidates GROUPS using the reuse analysis. */
762
763 static void
764 prune_by_reuse (struct mem_ref_group *groups)
765 {
766 for (; groups; groups = groups->next)
767 prune_group_by_reuse (groups);
768 }
769
770 /* Returns true if we should issue prefetch for REF. */
771
772 static bool
773 should_issue_prefetch_p (struct mem_ref *ref)
774 {
775 /* For now do not issue prefetches for only first few of the
776 iterations. */
777 if (ref->prefetch_before != PREFETCH_ALL)
778 return false;
779
780 /* Do not prefetch nontemporal stores. */
781 if (ref->storent_p)
782 return false;
783
784 return true;
785 }
786
787 /* Decide which of the prefetch candidates in GROUPS to prefetch.
788 AHEAD is the number of iterations to prefetch ahead (which corresponds
789 to the number of simultaneous instances of one prefetch running at a
790 time). UNROLL_FACTOR is the factor by that the loop is going to be
791 unrolled. Returns true if there is anything to prefetch. */
792
793 static bool
794 schedule_prefetches (struct mem_ref_group *groups, unsigned unroll_factor,
795 unsigned ahead)
796 {
797 unsigned remaining_prefetch_slots, n_prefetches, prefetch_slots;
798 unsigned slots_per_prefetch;
799 struct mem_ref *ref;
800 bool any = false;
801
802 /* At most SIMULTANEOUS_PREFETCHES should be running at the same time. */
803 remaining_prefetch_slots = SIMULTANEOUS_PREFETCHES;
804
805 /* The prefetch will run for AHEAD iterations of the original loop, i.e.,
806 AHEAD / UNROLL_FACTOR iterations of the unrolled loop. In each iteration,
807 it will need a prefetch slot. */
808 slots_per_prefetch = (ahead + unroll_factor / 2) / unroll_factor;
809 if (dump_file && (dump_flags & TDF_DETAILS))
810 fprintf (dump_file, "Each prefetch instruction takes %u prefetch slots.\n",
811 slots_per_prefetch);
812
813 /* For now we just take memory references one by one and issue
814 prefetches for as many as possible. The groups are sorted
815 starting with the largest step, since the references with
816 large step are more likely to cause many cache misses. */
817
818 for (; groups; groups = groups->next)
819 for (ref = groups->refs; ref; ref = ref->next)
820 {
821 if (!should_issue_prefetch_p (ref))
822 continue;
823
824 /* If we need to prefetch the reference each PREFETCH_MOD iterations,
825 and we unroll the loop UNROLL_FACTOR times, we need to insert
826 ceil (UNROLL_FACTOR / PREFETCH_MOD) instructions in each
827 iteration. */
828 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
829 / ref->prefetch_mod);
830 prefetch_slots = n_prefetches * slots_per_prefetch;
831
832 /* If more than half of the prefetches would be lost anyway, do not
833 issue the prefetch. */
834 if (2 * remaining_prefetch_slots < prefetch_slots)
835 continue;
836
837 ref->issue_prefetch_p = true;
838
839 if (remaining_prefetch_slots <= prefetch_slots)
840 return true;
841 remaining_prefetch_slots -= prefetch_slots;
842 any = true;
843 }
844
845 return any;
846 }
847
848 /* Determine whether there is any reference suitable for prefetching
849 in GROUPS. */
850
851 static bool
852 anything_to_prefetch_p (struct mem_ref_group *groups)
853 {
854 struct mem_ref *ref;
855
856 for (; groups; groups = groups->next)
857 for (ref = groups->refs; ref; ref = ref->next)
858 if (should_issue_prefetch_p (ref))
859 return true;
860
861 return false;
862 }
863
864 /* Issue prefetches for the reference REF into loop as decided before.
865 HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR
866 is the factor by which LOOP was unrolled. */
867
868 static void
869 issue_prefetch_ref (struct mem_ref *ref, unsigned unroll_factor, unsigned ahead)
870 {
871 HOST_WIDE_INT delta;
872 tree addr, addr_base, prefetch, write_p, local;
873 block_stmt_iterator bsi;
874 unsigned n_prefetches, ap;
875 bool nontemporal = ref->reuse_distance >= L2_CACHE_SIZE_BYTES;
876
877 if (dump_file && (dump_flags & TDF_DETAILS))
878 fprintf (dump_file, "Issued%s prefetch for %p.\n",
879 nontemporal ? " nontemporal" : "",
880 (void *) ref);
881
882 bsi = bsi_for_stmt (ref->stmt);
883
884 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
885 / ref->prefetch_mod);
886 addr_base = build_fold_addr_expr_with_type (ref->mem, ptr_type_node);
887 addr_base = force_gimple_operand_bsi (&bsi, unshare_expr (addr_base),
888 true, NULL, true, BSI_SAME_STMT);
889 write_p = ref->write_p ? integer_one_node : integer_zero_node;
890 local = build_int_cst (integer_type_node, nontemporal ? 0 : 3);
891
892 for (ap = 0; ap < n_prefetches; ap++)
893 {
894 /* Determine the address to prefetch. */
895 delta = (ahead + ap * ref->prefetch_mod) * ref->group->step;
896 addr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node,
897 addr_base, size_int (delta));
898 addr = force_gimple_operand_bsi (&bsi, unshare_expr (addr), true, NULL,
899 true, BSI_SAME_STMT);
900
901 /* Create the prefetch instruction. */
902 prefetch = build_call_expr (built_in_decls[BUILT_IN_PREFETCH],
903 3, addr, write_p, local);
904 bsi_insert_before (&bsi, prefetch, BSI_SAME_STMT);
905 }
906 }
907
908 /* Issue prefetches for the references in GROUPS into loop as decided before.
909 HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR is the
910 factor by that LOOP was unrolled. */
911
912 static void
913 issue_prefetches (struct mem_ref_group *groups,
914 unsigned unroll_factor, unsigned ahead)
915 {
916 struct mem_ref *ref;
917
918 for (; groups; groups = groups->next)
919 for (ref = groups->refs; ref; ref = ref->next)
920 if (ref->issue_prefetch_p)
921 issue_prefetch_ref (ref, unroll_factor, ahead);
922 }
923
924 /* Returns true if REF is a memory write for that a nontemporal store insn
925 can be used. */
926
927 static bool
928 nontemporal_store_p (struct mem_ref *ref)
929 {
930 enum machine_mode mode;
931 enum insn_code code;
932
933 /* REF must be a write that is not reused. We require it to be independent
934 on all other memory references in the loop, as the nontemporal stores may
935 be reordered with respect to other memory references. */
936 if (!ref->write_p
937 || !ref->independent_p
938 || ref->reuse_distance < L2_CACHE_SIZE_BYTES)
939 return false;
940
941 /* Check that we have the storent instruction for the mode. */
942 mode = TYPE_MODE (TREE_TYPE (ref->mem));
943 if (mode == BLKmode)
944 return false;
945
946 code = optab_handler (storent_optab, mode)->insn_code;
947 return code != CODE_FOR_nothing;
948 }
949
950 /* If REF is a nontemporal store, we mark the corresponding modify statement
951 and return true. Otherwise, we return false. */
952
953 static bool
954 mark_nontemporal_store (struct mem_ref *ref)
955 {
956 if (!nontemporal_store_p (ref))
957 return false;
958
959 if (dump_file && (dump_flags & TDF_DETAILS))
960 fprintf (dump_file, "Marked reference %p as a nontemporal store.\n",
961 (void *) ref);
962
963 MOVE_NONTEMPORAL (ref->stmt) = true;
964 ref->storent_p = true;
965
966 return true;
967 }
968
969 /* Issue a memory fence instruction after LOOP. */
970
971 static void
972 emit_mfence_after_loop (struct loop *loop)
973 {
974 VEC (edge, heap) *exits = get_loop_exit_edges (loop);
975 edge exit;
976 tree call;
977 block_stmt_iterator bsi;
978 unsigned i;
979
980 for (i = 0; VEC_iterate (edge, exits, i, exit); i++)
981 {
982 call = build_function_call_expr (FENCE_FOLLOWING_MOVNT, NULL_TREE);
983
984 if (!single_pred_p (exit->dest)
985 /* If possible, we prefer not to insert the fence on other paths
986 in cfg. */
987 && !(exit->flags & EDGE_ABNORMAL))
988 split_loop_exit_edge (exit);
989 bsi = bsi_after_labels (exit->dest);
990
991 bsi_insert_before (&bsi, call, BSI_NEW_STMT);
992 mark_virtual_ops_for_renaming (call);
993 }
994
995 VEC_free (edge, heap, exits);
996 update_ssa (TODO_update_ssa_only_virtuals);
997 }
998
999 /* Returns true if we can use storent in loop, false otherwise. */
1000
1001 static bool
1002 may_use_storent_in_loop_p (struct loop *loop)
1003 {
1004 bool ret = true;
1005
1006 if (loop->inner != NULL)
1007 return false;
1008
1009 /* If we must issue a mfence insn after using storent, check that there
1010 is a suitable place for it at each of the loop exits. */
1011 if (FENCE_FOLLOWING_MOVNT != NULL_TREE)
1012 {
1013 VEC (edge, heap) *exits = get_loop_exit_edges (loop);
1014 unsigned i;
1015 edge exit;
1016
1017 for (i = 0; VEC_iterate (edge, exits, i, exit); i++)
1018 if ((exit->flags & EDGE_ABNORMAL)
1019 && exit->dest == EXIT_BLOCK_PTR)
1020 ret = false;
1021
1022 VEC_free (edge, heap, exits);
1023 }
1024
1025 return ret;
1026 }
1027
1028 /* Marks nontemporal stores in LOOP. GROUPS contains the description of memory
1029 references in the loop. */
1030
1031 static void
1032 mark_nontemporal_stores (struct loop *loop, struct mem_ref_group *groups)
1033 {
1034 struct mem_ref *ref;
1035 bool any = false;
1036
1037 if (!may_use_storent_in_loop_p (loop))
1038 return;
1039
1040 for (; groups; groups = groups->next)
1041 for (ref = groups->refs; ref; ref = ref->next)
1042 any |= mark_nontemporal_store (ref);
1043
1044 if (any && FENCE_FOLLOWING_MOVNT != NULL_TREE)
1045 emit_mfence_after_loop (loop);
1046 }
1047
1048 /* Determines whether we can profitably unroll LOOP FACTOR times, and if
1049 this is the case, fill in DESC by the description of number of
1050 iterations. */
1051
1052 static bool
1053 should_unroll_loop_p (struct loop *loop, struct tree_niter_desc *desc,
1054 unsigned factor)
1055 {
1056 if (!can_unroll_loop_p (loop, factor, desc))
1057 return false;
1058
1059 /* We only consider loops without control flow for unrolling. This is not
1060 a hard restriction -- tree_unroll_loop works with arbitrary loops
1061 as well; but the unrolling/prefetching is usually more profitable for
1062 loops consisting of a single basic block, and we want to limit the
1063 code growth. */
1064 if (loop->num_nodes > 2)
1065 return false;
1066
1067 return true;
1068 }
1069
1070 /* Determine the coefficient by that unroll LOOP, from the information
1071 contained in the list of memory references REFS. Description of
1072 umber of iterations of LOOP is stored to DESC. NINSNS is the number of
1073 insns of the LOOP. EST_NITER is the estimated number of iterations of
1074 the loop, or -1 if no estimate is available. */
1075
1076 static unsigned
1077 determine_unroll_factor (struct loop *loop, struct mem_ref_group *refs,
1078 unsigned ninsns, struct tree_niter_desc *desc,
1079 HOST_WIDE_INT est_niter)
1080 {
1081 unsigned upper_bound;
1082 unsigned nfactor, factor, mod_constraint;
1083 struct mem_ref_group *agp;
1084 struct mem_ref *ref;
1085
1086 /* First check whether the loop is not too large to unroll. We ignore
1087 PARAM_MAX_UNROLL_TIMES, because for small loops, it prevented us
1088 from unrolling them enough to make exactly one cache line covered by each
1089 iteration. Also, the goal of PARAM_MAX_UNROLL_TIMES is to prevent
1090 us from unrolling the loops too many times in cases where we only expect
1091 gains from better scheduling and decreasing loop overhead, which is not
1092 the case here. */
1093 upper_bound = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / ninsns;
1094
1095 /* If we unrolled the loop more times than it iterates, the unrolled version
1096 of the loop would be never entered. */
1097 if (est_niter >= 0 && est_niter < (HOST_WIDE_INT) upper_bound)
1098 upper_bound = est_niter;
1099
1100 if (upper_bound <= 1)
1101 return 1;
1102
1103 /* Choose the factor so that we may prefetch each cache just once,
1104 but bound the unrolling by UPPER_BOUND. */
1105 factor = 1;
1106 for (agp = refs; agp; agp = agp->next)
1107 for (ref = agp->refs; ref; ref = ref->next)
1108 if (should_issue_prefetch_p (ref))
1109 {
1110 mod_constraint = ref->prefetch_mod;
1111 nfactor = least_common_multiple (mod_constraint, factor);
1112 if (nfactor <= upper_bound)
1113 factor = nfactor;
1114 }
1115
1116 if (!should_unroll_loop_p (loop, desc, factor))
1117 return 1;
1118
1119 return factor;
1120 }
1121
1122 /* Returns the total volume of the memory references REFS, taking into account
1123 reuses in the innermost loop and cache line size. TODO -- we should also
1124 take into account reuses across the iterations of the loops in the loop
1125 nest. */
1126
1127 static unsigned
1128 volume_of_references (struct mem_ref_group *refs)
1129 {
1130 unsigned volume = 0;
1131 struct mem_ref_group *gr;
1132 struct mem_ref *ref;
1133
1134 for (gr = refs; gr; gr = gr->next)
1135 for (ref = gr->refs; ref; ref = ref->next)
1136 {
1137 /* Almost always reuses another value? */
1138 if (ref->prefetch_before != PREFETCH_ALL)
1139 continue;
1140
1141 /* If several iterations access the same cache line, use the size of
1142 the line divided by this number. Otherwise, a cache line is
1143 accessed in each iteration. TODO -- in the latter case, we should
1144 take the size of the reference into account, rounding it up on cache
1145 line size multiple. */
1146 volume += L1_CACHE_LINE_SIZE / ref->prefetch_mod;
1147 }
1148 return volume;
1149 }
1150
1151 /* Returns the volume of memory references accessed across VEC iterations of
1152 loops, whose sizes are described in the LOOP_SIZES array. N is the number
1153 of the loops in the nest (length of VEC and LOOP_SIZES vectors). */
1154
1155 static unsigned
1156 volume_of_dist_vector (lambda_vector vec, unsigned *loop_sizes, unsigned n)
1157 {
1158 unsigned i;
1159
1160 for (i = 0; i < n; i++)
1161 if (vec[i] != 0)
1162 break;
1163
1164 if (i == n)
1165 return 0;
1166
1167 gcc_assert (vec[i] > 0);
1168
1169 /* We ignore the parts of the distance vector in subloops, since usually
1170 the numbers of iterations are much smaller. */
1171 return loop_sizes[i] * vec[i];
1172 }
1173
1174 /* Add the steps of ACCESS_FN multiplied by STRIDE to the array STRIDE
1175 at the position corresponding to the loop of the step. N is the depth
1176 of the considered loop nest, and, LOOP is its innermost loop. */
1177
1178 static void
1179 add_subscript_strides (tree access_fn, unsigned stride,
1180 HOST_WIDE_INT *strides, unsigned n, struct loop *loop)
1181 {
1182 struct loop *aloop;
1183 tree step;
1184 HOST_WIDE_INT astep;
1185 unsigned min_depth = loop_depth (loop) - n;
1186
1187 while (TREE_CODE (access_fn) == POLYNOMIAL_CHREC)
1188 {
1189 aloop = get_chrec_loop (access_fn);
1190 step = CHREC_RIGHT (access_fn);
1191 access_fn = CHREC_LEFT (access_fn);
1192
1193 if ((unsigned) loop_depth (aloop) <= min_depth)
1194 continue;
1195
1196 if (host_integerp (step, 0))
1197 astep = tree_low_cst (step, 0);
1198 else
1199 astep = L1_CACHE_LINE_SIZE;
1200
1201 strides[n - 1 - loop_depth (loop) + loop_depth (aloop)] += astep * stride;
1202
1203 }
1204 }
1205
1206 /* Returns the volume of memory references accessed between two consecutive
1207 self-reuses of the reference DR. We consider the subscripts of DR in N
1208 loops, and LOOP_SIZES contains the volumes of accesses in each of the
1209 loops. LOOP is the innermost loop of the current loop nest. */
1210
1211 static unsigned
1212 self_reuse_distance (data_reference_p dr, unsigned *loop_sizes, unsigned n,
1213 struct loop *loop)
1214 {
1215 tree stride, access_fn;
1216 HOST_WIDE_INT *strides, astride;
1217 VEC (tree, heap) *access_fns;
1218 tree ref = DR_REF (dr);
1219 unsigned i, ret = ~0u;
1220
1221 /* In the following example:
1222
1223 for (i = 0; i < N; i++)
1224 for (j = 0; j < N; j++)
1225 use (a[j][i]);
1226 the same cache line is accessed each N steps (except if the change from
1227 i to i + 1 crosses the boundary of the cache line). Thus, for self-reuse,
1228 we cannot rely purely on the results of the data dependence analysis.
1229
1230 Instead, we compute the stride of the reference in each loop, and consider
1231 the innermost loop in that the stride is less than cache size. */
1232
1233 strides = XCNEWVEC (HOST_WIDE_INT, n);
1234 access_fns = DR_ACCESS_FNS (dr);
1235
1236 for (i = 0; VEC_iterate (tree, access_fns, i, access_fn); i++)
1237 {
1238 /* Keep track of the reference corresponding to the subscript, so that we
1239 know its stride. */
1240 while (handled_component_p (ref) && TREE_CODE (ref) != ARRAY_REF)
1241 ref = TREE_OPERAND (ref, 0);
1242
1243 if (TREE_CODE (ref) == ARRAY_REF)
1244 {
1245 stride = TYPE_SIZE_UNIT (TREE_TYPE (ref));
1246 if (host_integerp (stride, 1))
1247 astride = tree_low_cst (stride, 1);
1248 else
1249 astride = L1_CACHE_LINE_SIZE;
1250
1251 ref = TREE_OPERAND (ref, 0);
1252 }
1253 else
1254 astride = 1;
1255
1256 add_subscript_strides (access_fn, astride, strides, n, loop);
1257 }
1258
1259 for (i = n; i-- > 0; )
1260 {
1261 unsigned HOST_WIDE_INT s;
1262
1263 s = strides[i] < 0 ? -strides[i] : strides[i];
1264
1265 if (s < (unsigned) L1_CACHE_LINE_SIZE
1266 && (loop_sizes[i]
1267 > (unsigned) (L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION)))
1268 {
1269 ret = loop_sizes[i];
1270 break;
1271 }
1272 }
1273
1274 free (strides);
1275 return ret;
1276 }
1277
1278 /* Determines the distance till the first reuse of each reference in REFS
1279 in the loop nest of LOOP. NO_OTHER_REFS is true if there are no other
1280 memory references in the loop. */
1281
1282 static void
1283 determine_loop_nest_reuse (struct loop *loop, struct mem_ref_group *refs,
1284 bool no_other_refs)
1285 {
1286 struct loop *nest, *aloop;
1287 VEC (data_reference_p, heap) *datarefs = NULL;
1288 VEC (ddr_p, heap) *dependences = NULL;
1289 struct mem_ref_group *gr;
1290 struct mem_ref *ref, *refb;
1291 VEC (loop_p, heap) *vloops = NULL;
1292 unsigned *loop_data_size;
1293 unsigned i, j, n;
1294 unsigned volume, dist, adist;
1295 HOST_WIDE_INT vol;
1296 data_reference_p dr;
1297 ddr_p dep;
1298
1299 if (loop->inner)
1300 return;
1301
1302 /* Find the outermost loop of the loop nest of loop (we require that
1303 there are no sibling loops inside the nest). */
1304 nest = loop;
1305 while (1)
1306 {
1307 aloop = loop_outer (nest);
1308
1309 if (aloop == current_loops->tree_root
1310 || aloop->inner->next)
1311 break;
1312
1313 nest = aloop;
1314 }
1315
1316 /* For each loop, determine the amount of data accessed in each iteration.
1317 We use this to estimate whether the reference is evicted from the
1318 cache before its reuse. */
1319 find_loop_nest (nest, &vloops);
1320 n = VEC_length (loop_p, vloops);
1321 loop_data_size = XNEWVEC (unsigned, n);
1322 volume = volume_of_references (refs);
1323 i = n;
1324 while (i-- != 0)
1325 {
1326 loop_data_size[i] = volume;
1327 /* Bound the volume by the L2 cache size, since above this bound,
1328 all dependence distances are equivalent. */
1329 if (volume > L2_CACHE_SIZE_BYTES)
1330 continue;
1331
1332 aloop = VEC_index (loop_p, vloops, i);
1333 vol = estimated_loop_iterations_int (aloop, false);
1334 if (vol < 0)
1335 vol = expected_loop_iterations (aloop);
1336 volume *= vol;
1337 }
1338
1339 /* Prepare the references in the form suitable for data dependence
1340 analysis. We ignore unanalyzable data references (the results
1341 are used just as a heuristics to estimate temporality of the
1342 references, hence we do not need to worry about correctness). */
1343 for (gr = refs; gr; gr = gr->next)
1344 for (ref = gr->refs; ref; ref = ref->next)
1345 {
1346 dr = create_data_ref (nest, ref->mem, ref->stmt, !ref->write_p);
1347
1348 if (dr)
1349 {
1350 ref->reuse_distance = volume;
1351 dr->aux = ref;
1352 VEC_safe_push (data_reference_p, heap, datarefs, dr);
1353 }
1354 else
1355 no_other_refs = false;
1356 }
1357
1358 for (i = 0; VEC_iterate (data_reference_p, datarefs, i, dr); i++)
1359 {
1360 dist = self_reuse_distance (dr, loop_data_size, n, loop);
1361 ref = (struct mem_ref *) dr->aux;
1362 if (ref->reuse_distance > dist)
1363 ref->reuse_distance = dist;
1364
1365 if (no_other_refs)
1366 ref->independent_p = true;
1367 }
1368
1369 compute_all_dependences (datarefs, &dependences, vloops, true);
1370
1371 for (i = 0; VEC_iterate (ddr_p, dependences, i, dep); i++)
1372 {
1373 if (DDR_ARE_DEPENDENT (dep) == chrec_known)
1374 continue;
1375
1376 ref = (struct mem_ref *) DDR_A (dep)->aux;
1377 refb = (struct mem_ref *) DDR_B (dep)->aux;
1378
1379 if (DDR_ARE_DEPENDENT (dep) == chrec_dont_know
1380 || DDR_NUM_DIST_VECTS (dep) == 0)
1381 {
1382 /* If the dependence cannot be analyzed, assume that there might be
1383 a reuse. */
1384 dist = 0;
1385
1386 ref->independent_p = false;
1387 refb->independent_p = false;
1388 }
1389 else
1390 {
1391 /* The distance vectors are normalized to be always lexicographically
1392 positive, hence we cannot tell just from them whether DDR_A comes
1393 before DDR_B or vice versa. However, it is not important,
1394 anyway -- if DDR_A is close to DDR_B, then it is either reused in
1395 DDR_B (and it is not nontemporal), or it reuses the value of DDR_B
1396 in cache (and marking it as nontemporal would not affect
1397 anything). */
1398
1399 dist = volume;
1400 for (j = 0; j < DDR_NUM_DIST_VECTS (dep); j++)
1401 {
1402 adist = volume_of_dist_vector (DDR_DIST_VECT (dep, j),
1403 loop_data_size, n);
1404
1405 /* If this is a dependence in the innermost loop (i.e., the
1406 distances in all superloops are zero) and it is not
1407 the trivial self-dependence with distance zero, record that
1408 the references are not completely independent. */
1409 if (lambda_vector_zerop (DDR_DIST_VECT (dep, j), n - 1)
1410 && (ref != refb
1411 || DDR_DIST_VECT (dep, j)[n-1] != 0))
1412 {
1413 ref->independent_p = false;
1414 refb->independent_p = false;
1415 }
1416
1417 /* Ignore accesses closer than
1418 L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
1419 so that we use nontemporal prefetches e.g. if single memory
1420 location is accessed several times in a single iteration of
1421 the loop. */
1422 if (adist < L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION)
1423 continue;
1424
1425 if (adist < dist)
1426 dist = adist;
1427 }
1428 }
1429
1430 if (ref->reuse_distance > dist)
1431 ref->reuse_distance = dist;
1432 if (refb->reuse_distance > dist)
1433 refb->reuse_distance = dist;
1434 }
1435
1436 free_dependence_relations (dependences);
1437 free_data_refs (datarefs);
1438 free (loop_data_size);
1439
1440 if (dump_file && (dump_flags & TDF_DETAILS))
1441 {
1442 fprintf (dump_file, "Reuse distances:\n");
1443 for (gr = refs; gr; gr = gr->next)
1444 for (ref = gr->refs; ref; ref = ref->next)
1445 fprintf (dump_file, " ref %p distance %u\n",
1446 (void *) ref, ref->reuse_distance);
1447 }
1448 }
1449
1450 /* Issue prefetch instructions for array references in LOOP. Returns
1451 true if the LOOP was unrolled. */
1452
1453 static bool
1454 loop_prefetch_arrays (struct loop *loop)
1455 {
1456 struct mem_ref_group *refs;
1457 unsigned ahead, ninsns, time, unroll_factor;
1458 HOST_WIDE_INT est_niter;
1459 struct tree_niter_desc desc;
1460 bool unrolled = false, no_other_refs;
1461
1462 if (!maybe_hot_bb_p (loop->header))
1463 {
1464 if (dump_file && (dump_flags & TDF_DETAILS))
1465 fprintf (dump_file, " ignored (cold area)\n");
1466 return false;
1467 }
1468
1469 /* Step 1: gather the memory references. */
1470 refs = gather_memory_references (loop, &no_other_refs);
1471
1472 /* Step 2: estimate the reuse effects. */
1473 prune_by_reuse (refs);
1474
1475 if (!anything_to_prefetch_p (refs))
1476 goto fail;
1477
1478 determine_loop_nest_reuse (loop, refs, no_other_refs);
1479
1480 /* Step 3: determine the ahead and unroll factor. */
1481
1482 /* FIXME: the time should be weighted by the probabilities of the blocks in
1483 the loop body. */
1484 time = tree_num_loop_insns (loop, &eni_time_weights);
1485 ahead = (PREFETCH_LATENCY + time - 1) / time;
1486 est_niter = estimated_loop_iterations_int (loop, false);
1487
1488 /* The prefetches will run for AHEAD iterations of the original loop. Unless
1489 the loop rolls at least AHEAD times, prefetching the references does not
1490 make sense. */
1491 if (est_niter >= 0 && est_niter <= (HOST_WIDE_INT) ahead)
1492 {
1493 if (dump_file && (dump_flags & TDF_DETAILS))
1494 fprintf (dump_file,
1495 "Not prefetching -- loop estimated to roll only %d times\n",
1496 (int) est_niter);
1497 goto fail;
1498 }
1499
1500 mark_nontemporal_stores (loop, refs);
1501
1502 ninsns = tree_num_loop_insns (loop, &eni_size_weights);
1503 unroll_factor = determine_unroll_factor (loop, refs, ninsns, &desc,
1504 est_niter);
1505 if (dump_file && (dump_flags & TDF_DETAILS))
1506 fprintf (dump_file, "Ahead %d, unroll factor %d\n", ahead, unroll_factor);
1507
1508 /* Step 4: what to prefetch? */
1509 if (!schedule_prefetches (refs, unroll_factor, ahead))
1510 goto fail;
1511
1512 /* Step 5: unroll the loop. TODO -- peeling of first and last few
1513 iterations so that we do not issue superfluous prefetches. */
1514 if (unroll_factor != 1)
1515 {
1516 tree_unroll_loop (loop, unroll_factor,
1517 single_dom_exit (loop), &desc);
1518 unrolled = true;
1519 }
1520
1521 /* Step 6: issue the prefetches. */
1522 issue_prefetches (refs, unroll_factor, ahead);
1523
1524 fail:
1525 release_mem_refs (refs);
1526 return unrolled;
1527 }
1528
1529 /* Issue prefetch instructions for array references in loops. */
1530
1531 unsigned int
1532 tree_ssa_prefetch_arrays (void)
1533 {
1534 loop_iterator li;
1535 struct loop *loop;
1536 bool unrolled = false;
1537 int todo_flags = 0;
1538
1539 if (!HAVE_prefetch
1540 /* It is possible to ask compiler for say -mtune=i486 -march=pentium4.
1541 -mtune=i486 causes us having PREFETCH_BLOCK 0, since this is part
1542 of processor costs and i486 does not have prefetch, but
1543 -march=pentium4 causes HAVE_prefetch to be true. Ugh. */
1544 || PREFETCH_BLOCK == 0)
1545 return 0;
1546
1547 if (dump_file && (dump_flags & TDF_DETAILS))
1548 {
1549 fprintf (dump_file, "Prefetching parameters:\n");
1550 fprintf (dump_file, " simultaneous prefetches: %d\n",
1551 SIMULTANEOUS_PREFETCHES);
1552 fprintf (dump_file, " prefetch latency: %d\n", PREFETCH_LATENCY);
1553 fprintf (dump_file, " prefetch block size: %d\n", PREFETCH_BLOCK);
1554 fprintf (dump_file, " L1 cache size: %d lines, %d kB\n",
1555 L1_CACHE_SIZE_BYTES / L1_CACHE_LINE_SIZE, L1_CACHE_SIZE);
1556 fprintf (dump_file, " L1 cache line size: %d\n", L1_CACHE_LINE_SIZE);
1557 fprintf (dump_file, " L2 cache size: %d kB\n", L2_CACHE_SIZE);
1558 fprintf (dump_file, "\n");
1559 }
1560
1561 initialize_original_copy_tables ();
1562
1563 if (!built_in_decls[BUILT_IN_PREFETCH])
1564 {
1565 tree type = build_function_type (void_type_node,
1566 tree_cons (NULL_TREE,
1567 const_ptr_type_node,
1568 NULL_TREE));
1569 tree decl = add_builtin_function ("__builtin_prefetch", type,
1570 BUILT_IN_PREFETCH, BUILT_IN_NORMAL,
1571 NULL, NULL_TREE);
1572 DECL_IS_NOVOPS (decl) = true;
1573 built_in_decls[BUILT_IN_PREFETCH] = decl;
1574 }
1575
1576 /* We assume that size of cache line is a power of two, so verify this
1577 here. */
1578 gcc_assert ((PREFETCH_BLOCK & (PREFETCH_BLOCK - 1)) == 0);
1579
1580 FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
1581 {
1582 if (dump_file && (dump_flags & TDF_DETAILS))
1583 fprintf (dump_file, "Processing loop %d:\n", loop->num);
1584
1585 unrolled |= loop_prefetch_arrays (loop);
1586
1587 if (dump_file && (dump_flags & TDF_DETAILS))
1588 fprintf (dump_file, "\n\n");
1589 }
1590
1591 if (unrolled)
1592 {
1593 scev_reset ();
1594 todo_flags |= TODO_cleanup_cfg;
1595 }
1596
1597 free_original_copy_tables ();
1598 return todo_flags;
1599 }