2009-06-08 Ghassan Shobaki <ghassan.shobaki@amd.com>
[gcc.git] / gcc / tree-ssa-loop-prefetch.c
1 /* Array prefetching.
2 Copyright (C) 2005, 2007, 2008 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "tree.h"
25 #include "rtl.h"
26 #include "tm_p.h"
27 #include "hard-reg-set.h"
28 #include "basic-block.h"
29 #include "output.h"
30 #include "diagnostic.h"
31 #include "tree-flow.h"
32 #include "tree-dump.h"
33 #include "timevar.h"
34 #include "cfgloop.h"
35 #include "varray.h"
36 #include "expr.h"
37 #include "tree-pass.h"
38 #include "ggc.h"
39 #include "insn-config.h"
40 #include "recog.h"
41 #include "hashtab.h"
42 #include "tree-chrec.h"
43 #include "tree-scalar-evolution.h"
44 #include "toplev.h"
45 #include "params.h"
46 #include "langhooks.h"
47 #include "tree-inline.h"
48 #include "tree-data-ref.h"
49 #include "optabs.h"
50
51 /* This pass inserts prefetch instructions to optimize cache usage during
52 accesses to arrays in loops. It processes loops sequentially and:
53
54 1) Gathers all memory references in the single loop.
55 2) For each of the references it decides when it is profitable to prefetch
56 it. To do it, we evaluate the reuse among the accesses, and determines
57 two values: PREFETCH_BEFORE (meaning that it only makes sense to do
58 prefetching in the first PREFETCH_BEFORE iterations of the loop) and
59 PREFETCH_MOD (meaning that it only makes sense to prefetch in the
60 iterations of the loop that are zero modulo PREFETCH_MOD). For example
61 (assuming cache line size is 64 bytes, char has size 1 byte and there
62 is no hardware sequential prefetch):
63
64 char *a;
65 for (i = 0; i < max; i++)
66 {
67 a[255] = ...; (0)
68 a[i] = ...; (1)
69 a[i + 64] = ...; (2)
70 a[16*i] = ...; (3)
71 a[187*i] = ...; (4)
72 a[187*i + 50] = ...; (5)
73 }
74
75 (0) obviously has PREFETCH_BEFORE 1
76 (1) has PREFETCH_BEFORE 64, since (2) accesses the same memory
77 location 64 iterations before it, and PREFETCH_MOD 64 (since
78 it hits the same cache line otherwise).
79 (2) has PREFETCH_MOD 64
80 (3) has PREFETCH_MOD 4
81 (4) has PREFETCH_MOD 1. We do not set PREFETCH_BEFORE here, since
82 the cache line accessed by (4) is the same with probability only
83 7/32.
84 (5) has PREFETCH_MOD 1 as well.
85
86 Additionally, we use data dependence analysis to determine for each
87 reference the distance till the first reuse; this information is used
88 to determine the temporality of the issued prefetch instruction.
89
90 3) We determine how much ahead we need to prefetch. The number of
91 iterations needed is time to fetch / time spent in one iteration of
92 the loop. The problem is that we do not know either of these values,
93 so we just make a heuristic guess based on a magic (possibly)
94 target-specific constant and size of the loop.
95
96 4) Determine which of the references we prefetch. We take into account
97 that there is a maximum number of simultaneous prefetches (provided
98 by machine description). We prefetch as many prefetches as possible
99 while still within this bound (starting with those with lowest
100 prefetch_mod, since they are responsible for most of the cache
101 misses).
102
103 5) We unroll and peel loops so that we are able to satisfy PREFETCH_MOD
104 and PREFETCH_BEFORE requirements (within some bounds), and to avoid
105 prefetching nonaccessed memory.
106 TODO -- actually implement peeling.
107
108 6) We actually emit the prefetch instructions. ??? Perhaps emit the
109 prefetch instructions with guards in cases where 5) was not sufficient
110 to satisfy the constraints?
111
112 The function is_loop_prefetching_profitable() implements a cost model
113 to determine if prefetching is profitable for a given loop. The cost
114 model has two heuristcs:
115 1. A heuristic that determines whether the given loop has enough CPU
116 ops that can be overlapped with cache missing memory ops.
117 If not, the loop won't benefit from prefetching. This is implemented
118 by requirung the ratio between the instruction count and the mem ref
119 count to be above a certain minimum.
120 2. A heuristic that disables prefetching in a loop with an unknown trip
121 count if the prefetching cost is above a certain limit. The relative
122 prefetching cost is estimated by taking the ratio between the
123 prefetch count and the total intruction count (this models the I-cache
124 cost).
125 The limits used in these heuristics are defined as parameters with
126 reasonable default values. Machine-specific default values will be
127 added later.
128
129 Some other TODO:
130 -- write and use more general reuse analysis (that could be also used
131 in other cache aimed loop optimizations)
132 -- make it behave sanely together with the prefetches given by user
133 (now we just ignore them; at the very least we should avoid
134 optimizing loops in that user put his own prefetches)
135 -- we assume cache line size alignment of arrays; this could be
136 improved. */
137
138 /* Magic constants follow. These should be replaced by machine specific
139 numbers. */
140
141 /* True if write can be prefetched by a read prefetch. */
142
143 #ifndef WRITE_CAN_USE_READ_PREFETCH
144 #define WRITE_CAN_USE_READ_PREFETCH 1
145 #endif
146
147 /* True if read can be prefetched by a write prefetch. */
148
149 #ifndef READ_CAN_USE_WRITE_PREFETCH
150 #define READ_CAN_USE_WRITE_PREFETCH 0
151 #endif
152
153 /* The size of the block loaded by a single prefetch. Usually, this is
154 the same as cache line size (at the moment, we only consider one level
155 of cache hierarchy). */
156
157 #ifndef PREFETCH_BLOCK
158 #define PREFETCH_BLOCK L1_CACHE_LINE_SIZE
159 #endif
160
161 /* Do we have a forward hardware sequential prefetching? */
162
163 #ifndef HAVE_FORWARD_PREFETCH
164 #define HAVE_FORWARD_PREFETCH 0
165 #endif
166
167 /* Do we have a backward hardware sequential prefetching? */
168
169 #ifndef HAVE_BACKWARD_PREFETCH
170 #define HAVE_BACKWARD_PREFETCH 0
171 #endif
172
173 /* In some cases we are only able to determine that there is a certain
174 probability that the two accesses hit the same cache line. In this
175 case, we issue the prefetches for both of them if this probability
176 is less then (1000 - ACCEPTABLE_MISS_RATE) per thousand. */
177
178 #ifndef ACCEPTABLE_MISS_RATE
179 #define ACCEPTABLE_MISS_RATE 50
180 #endif
181
182 #ifndef HAVE_prefetch
183 #define HAVE_prefetch 0
184 #endif
185
186 #define L1_CACHE_SIZE_BYTES ((unsigned) (L1_CACHE_SIZE * 1024))
187 #define L2_CACHE_SIZE_BYTES ((unsigned) (L2_CACHE_SIZE * 1024))
188
189 /* We consider a memory access nontemporal if it is not reused sooner than
190 after L2_CACHE_SIZE_BYTES of memory are accessed. However, we ignore
191 accesses closer than L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
192 so that we use nontemporal prefetches e.g. if single memory location
193 is accessed several times in a single iteration of the loop. */
194 #define NONTEMPORAL_FRACTION 16
195
196 /* In case we have to emit a memory fence instruction after the loop that
197 uses nontemporal stores, this defines the builtin to use. */
198
199 #ifndef FENCE_FOLLOWING_MOVNT
200 #define FENCE_FOLLOWING_MOVNT NULL_TREE
201 #endif
202
203 /* The group of references between that reuse may occur. */
204
205 struct mem_ref_group
206 {
207 tree base; /* Base of the reference. */
208 HOST_WIDE_INT step; /* Step of the reference. */
209 struct mem_ref *refs; /* References in the group. */
210 struct mem_ref_group *next; /* Next group of references. */
211 };
212
213 /* Assigned to PREFETCH_BEFORE when all iterations are to be prefetched. */
214
215 #define PREFETCH_ALL (~(unsigned HOST_WIDE_INT) 0)
216
217 /* The memory reference. */
218
219 struct mem_ref
220 {
221 gimple stmt; /* Statement in that the reference appears. */
222 tree mem; /* The reference. */
223 HOST_WIDE_INT delta; /* Constant offset of the reference. */
224 struct mem_ref_group *group; /* The group of references it belongs to. */
225 unsigned HOST_WIDE_INT prefetch_mod;
226 /* Prefetch only each PREFETCH_MOD-th
227 iteration. */
228 unsigned HOST_WIDE_INT prefetch_before;
229 /* Prefetch only first PREFETCH_BEFORE
230 iterations. */
231 unsigned reuse_distance; /* The amount of data accessed before the first
232 reuse of this value. */
233 struct mem_ref *next; /* The next reference in the group. */
234 unsigned write_p : 1; /* Is it a write? */
235 unsigned independent_p : 1; /* True if the reference is independent on
236 all other references inside the loop. */
237 unsigned issue_prefetch_p : 1; /* Should we really issue the prefetch? */
238 unsigned storent_p : 1; /* True if we changed the store to a
239 nontemporal one. */
240 };
241
242 /* Dumps information about reference REF to FILE. */
243
244 static void
245 dump_mem_ref (FILE *file, struct mem_ref *ref)
246 {
247 fprintf (file, "Reference %p:\n", (void *) ref);
248
249 fprintf (file, " group %p (base ", (void *) ref->group);
250 print_generic_expr (file, ref->group->base, TDF_SLIM);
251 fprintf (file, ", step ");
252 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ref->group->step);
253 fprintf (file, ")\n");
254
255 fprintf (file, " delta ");
256 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ref->delta);
257 fprintf (file, "\n");
258
259 fprintf (file, " %s\n", ref->write_p ? "write" : "read");
260
261 fprintf (file, "\n");
262 }
263
264 /* Finds a group with BASE and STEP in GROUPS, or creates one if it does not
265 exist. */
266
267 static struct mem_ref_group *
268 find_or_create_group (struct mem_ref_group **groups, tree base,
269 HOST_WIDE_INT step)
270 {
271 struct mem_ref_group *group;
272
273 for (; *groups; groups = &(*groups)->next)
274 {
275 if ((*groups)->step == step
276 && operand_equal_p ((*groups)->base, base, 0))
277 return *groups;
278
279 /* Keep the list of groups sorted by decreasing step. */
280 if ((*groups)->step < step)
281 break;
282 }
283
284 group = XNEW (struct mem_ref_group);
285 group->base = base;
286 group->step = step;
287 group->refs = NULL;
288 group->next = *groups;
289 *groups = group;
290
291 return group;
292 }
293
294 /* Records a memory reference MEM in GROUP with offset DELTA and write status
295 WRITE_P. The reference occurs in statement STMT. */
296
297 static void
298 record_ref (struct mem_ref_group *group, gimple stmt, tree mem,
299 HOST_WIDE_INT delta, bool write_p)
300 {
301 struct mem_ref **aref;
302
303 /* Do not record the same address twice. */
304 for (aref = &group->refs; *aref; aref = &(*aref)->next)
305 {
306 /* It does not have to be possible for write reference to reuse the read
307 prefetch, or vice versa. */
308 if (!WRITE_CAN_USE_READ_PREFETCH
309 && write_p
310 && !(*aref)->write_p)
311 continue;
312 if (!READ_CAN_USE_WRITE_PREFETCH
313 && !write_p
314 && (*aref)->write_p)
315 continue;
316
317 if ((*aref)->delta == delta)
318 return;
319 }
320
321 (*aref) = XNEW (struct mem_ref);
322 (*aref)->stmt = stmt;
323 (*aref)->mem = mem;
324 (*aref)->delta = delta;
325 (*aref)->write_p = write_p;
326 (*aref)->prefetch_before = PREFETCH_ALL;
327 (*aref)->prefetch_mod = 1;
328 (*aref)->reuse_distance = 0;
329 (*aref)->issue_prefetch_p = false;
330 (*aref)->group = group;
331 (*aref)->next = NULL;
332 (*aref)->independent_p = false;
333 (*aref)->storent_p = false;
334
335 if (dump_file && (dump_flags & TDF_DETAILS))
336 dump_mem_ref (dump_file, *aref);
337 }
338
339 /* Release memory references in GROUPS. */
340
341 static void
342 release_mem_refs (struct mem_ref_group *groups)
343 {
344 struct mem_ref_group *next_g;
345 struct mem_ref *ref, *next_r;
346
347 for (; groups; groups = next_g)
348 {
349 next_g = groups->next;
350 for (ref = groups->refs; ref; ref = next_r)
351 {
352 next_r = ref->next;
353 free (ref);
354 }
355 free (groups);
356 }
357 }
358
359 /* A structure used to pass arguments to idx_analyze_ref. */
360
361 struct ar_data
362 {
363 struct loop *loop; /* Loop of the reference. */
364 gimple stmt; /* Statement of the reference. */
365 HOST_WIDE_INT *step; /* Step of the memory reference. */
366 HOST_WIDE_INT *delta; /* Offset of the memory reference. */
367 };
368
369 /* Analyzes a single INDEX of a memory reference to obtain information
370 described at analyze_ref. Callback for for_each_index. */
371
372 static bool
373 idx_analyze_ref (tree base, tree *index, void *data)
374 {
375 struct ar_data *ar_data = (struct ar_data *) data;
376 tree ibase, step, stepsize;
377 HOST_WIDE_INT istep, idelta = 0, imult = 1;
378 affine_iv iv;
379
380 if (TREE_CODE (base) == MISALIGNED_INDIRECT_REF
381 || TREE_CODE (base) == ALIGN_INDIRECT_REF)
382 return false;
383
384 if (!simple_iv (ar_data->loop, loop_containing_stmt (ar_data->stmt),
385 *index, &iv, false))
386 return false;
387 ibase = iv.base;
388 step = iv.step;
389
390 if (!cst_and_fits_in_hwi (step))
391 return false;
392 istep = int_cst_value (step);
393
394 if (TREE_CODE (ibase) == POINTER_PLUS_EXPR
395 && cst_and_fits_in_hwi (TREE_OPERAND (ibase, 1)))
396 {
397 idelta = int_cst_value (TREE_OPERAND (ibase, 1));
398 ibase = TREE_OPERAND (ibase, 0);
399 }
400 if (cst_and_fits_in_hwi (ibase))
401 {
402 idelta += int_cst_value (ibase);
403 ibase = build_int_cst (TREE_TYPE (ibase), 0);
404 }
405
406 if (TREE_CODE (base) == ARRAY_REF)
407 {
408 stepsize = array_ref_element_size (base);
409 if (!cst_and_fits_in_hwi (stepsize))
410 return false;
411 imult = int_cst_value (stepsize);
412
413 istep *= imult;
414 idelta *= imult;
415 }
416
417 *ar_data->step += istep;
418 *ar_data->delta += idelta;
419 *index = ibase;
420
421 return true;
422 }
423
424 /* Tries to express REF_P in shape &BASE + STEP * iter + DELTA, where DELTA and
425 STEP are integer constants and iter is number of iterations of LOOP. The
426 reference occurs in statement STMT. Strips nonaddressable component
427 references from REF_P. */
428
429 static bool
430 analyze_ref (struct loop *loop, tree *ref_p, tree *base,
431 HOST_WIDE_INT *step, HOST_WIDE_INT *delta,
432 gimple stmt)
433 {
434 struct ar_data ar_data;
435 tree off;
436 HOST_WIDE_INT bit_offset;
437 tree ref = *ref_p;
438
439 *step = 0;
440 *delta = 0;
441
442 /* First strip off the component references. Ignore bitfields. */
443 if (TREE_CODE (ref) == COMPONENT_REF
444 && DECL_NONADDRESSABLE_P (TREE_OPERAND (ref, 1)))
445 ref = TREE_OPERAND (ref, 0);
446
447 *ref_p = ref;
448
449 for (; TREE_CODE (ref) == COMPONENT_REF; ref = TREE_OPERAND (ref, 0))
450 {
451 off = DECL_FIELD_BIT_OFFSET (TREE_OPERAND (ref, 1));
452 bit_offset = TREE_INT_CST_LOW (off);
453 gcc_assert (bit_offset % BITS_PER_UNIT == 0);
454
455 *delta += bit_offset / BITS_PER_UNIT;
456 }
457
458 *base = unshare_expr (ref);
459 ar_data.loop = loop;
460 ar_data.stmt = stmt;
461 ar_data.step = step;
462 ar_data.delta = delta;
463 return for_each_index (base, idx_analyze_ref, &ar_data);
464 }
465
466 /* Record a memory reference REF to the list REFS. The reference occurs in
467 LOOP in statement STMT and it is write if WRITE_P. Returns true if the
468 reference was recorded, false otherwise. */
469
470 static bool
471 gather_memory_references_ref (struct loop *loop, struct mem_ref_group **refs,
472 tree ref, bool write_p, gimple stmt)
473 {
474 tree base;
475 HOST_WIDE_INT step, delta;
476 struct mem_ref_group *agrp;
477
478 if (get_base_address (ref) == NULL)
479 return false;
480
481 if (!analyze_ref (loop, &ref, &base, &step, &delta, stmt))
482 return false;
483
484 /* Now we know that REF = &BASE + STEP * iter + DELTA, where DELTA and STEP
485 are integer constants. */
486 agrp = find_or_create_group (refs, base, step);
487 record_ref (agrp, stmt, ref, delta, write_p);
488
489 return true;
490 }
491
492 /* Record the suitable memory references in LOOP. NO_OTHER_REFS is set to
493 true if there are no other memory references inside the loop. */
494
495 static struct mem_ref_group *
496 gather_memory_references (struct loop *loop, bool *no_other_refs, unsigned *ref_count)
497 {
498 basic_block *body = get_loop_body_in_dom_order (loop);
499 basic_block bb;
500 unsigned i;
501 gimple_stmt_iterator bsi;
502 gimple stmt;
503 tree lhs, rhs;
504 struct mem_ref_group *refs = NULL;
505
506 *no_other_refs = true;
507 *ref_count = 0;
508
509 /* Scan the loop body in order, so that the former references precede the
510 later ones. */
511 for (i = 0; i < loop->num_nodes; i++)
512 {
513 bb = body[i];
514 if (bb->loop_father != loop)
515 continue;
516
517 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
518 {
519 stmt = gsi_stmt (bsi);
520
521 if (gimple_code (stmt) != GIMPLE_ASSIGN)
522 {
523 if (gimple_vuse (stmt)
524 || (is_gimple_call (stmt)
525 && !(gimple_call_flags (stmt) & ECF_CONST)))
526 *no_other_refs = false;
527 continue;
528 }
529
530 lhs = gimple_assign_lhs (stmt);
531 rhs = gimple_assign_rhs1 (stmt);
532
533 if (REFERENCE_CLASS_P (rhs))
534 {
535 *no_other_refs &= gather_memory_references_ref (loop, &refs,
536 rhs, false, stmt);
537 *ref_count += 1;
538 }
539 if (REFERENCE_CLASS_P (lhs))
540 {
541 *no_other_refs &= gather_memory_references_ref (loop, &refs,
542 lhs, true, stmt);
543 *ref_count += 1;
544 }
545 }
546 }
547 free (body);
548
549 return refs;
550 }
551
552 /* Prune the prefetch candidate REF using the self-reuse. */
553
554 static void
555 prune_ref_by_self_reuse (struct mem_ref *ref)
556 {
557 HOST_WIDE_INT step = ref->group->step;
558 bool backward = step < 0;
559
560 if (step == 0)
561 {
562 /* Prefetch references to invariant address just once. */
563 ref->prefetch_before = 1;
564 return;
565 }
566
567 if (backward)
568 step = -step;
569
570 if (step > PREFETCH_BLOCK)
571 return;
572
573 if ((backward && HAVE_BACKWARD_PREFETCH)
574 || (!backward && HAVE_FORWARD_PREFETCH))
575 {
576 ref->prefetch_before = 1;
577 return;
578 }
579
580 ref->prefetch_mod = PREFETCH_BLOCK / step;
581 }
582
583 /* Divides X by BY, rounding down. */
584
585 static HOST_WIDE_INT
586 ddown (HOST_WIDE_INT x, unsigned HOST_WIDE_INT by)
587 {
588 gcc_assert (by > 0);
589
590 if (x >= 0)
591 return x / by;
592 else
593 return (x + by - 1) / by;
594 }
595
596 /* Prune the prefetch candidate REF using the reuse with BY.
597 If BY_IS_BEFORE is true, BY is before REF in the loop. */
598
599 static void
600 prune_ref_by_group_reuse (struct mem_ref *ref, struct mem_ref *by,
601 bool by_is_before)
602 {
603 HOST_WIDE_INT step = ref->group->step;
604 bool backward = step < 0;
605 HOST_WIDE_INT delta_r = ref->delta, delta_b = by->delta;
606 HOST_WIDE_INT delta = delta_b - delta_r;
607 HOST_WIDE_INT hit_from;
608 unsigned HOST_WIDE_INT prefetch_before, prefetch_block;
609
610 if (delta == 0)
611 {
612 /* If the references has the same address, only prefetch the
613 former. */
614 if (by_is_before)
615 ref->prefetch_before = 0;
616
617 return;
618 }
619
620 if (!step)
621 {
622 /* If the reference addresses are invariant and fall into the
623 same cache line, prefetch just the first one. */
624 if (!by_is_before)
625 return;
626
627 if (ddown (ref->delta, PREFETCH_BLOCK)
628 != ddown (by->delta, PREFETCH_BLOCK))
629 return;
630
631 ref->prefetch_before = 0;
632 return;
633 }
634
635 /* Only prune the reference that is behind in the array. */
636 if (backward)
637 {
638 if (delta > 0)
639 return;
640
641 /* Transform the data so that we may assume that the accesses
642 are forward. */
643 delta = - delta;
644 step = -step;
645 delta_r = PREFETCH_BLOCK - 1 - delta_r;
646 delta_b = PREFETCH_BLOCK - 1 - delta_b;
647 }
648 else
649 {
650 if (delta < 0)
651 return;
652 }
653
654 /* Check whether the two references are likely to hit the same cache
655 line, and how distant the iterations in that it occurs are from
656 each other. */
657
658 if (step <= PREFETCH_BLOCK)
659 {
660 /* The accesses are sure to meet. Let us check when. */
661 hit_from = ddown (delta_b, PREFETCH_BLOCK) * PREFETCH_BLOCK;
662 prefetch_before = (hit_from - delta_r + step - 1) / step;
663
664 if (prefetch_before < ref->prefetch_before)
665 ref->prefetch_before = prefetch_before;
666
667 return;
668 }
669
670 /* A more complicated case. First let us ensure that size of cache line
671 and step are coprime (here we assume that PREFETCH_BLOCK is a power
672 of two. */
673 prefetch_block = PREFETCH_BLOCK;
674 while ((step & 1) == 0
675 && prefetch_block > 1)
676 {
677 step >>= 1;
678 prefetch_block >>= 1;
679 delta >>= 1;
680 }
681
682 /* Now step > prefetch_block, and step and prefetch_block are coprime.
683 Determine the probability that the accesses hit the same cache line. */
684
685 prefetch_before = delta / step;
686 delta %= step;
687 if ((unsigned HOST_WIDE_INT) delta
688 <= (prefetch_block * ACCEPTABLE_MISS_RATE / 1000))
689 {
690 if (prefetch_before < ref->prefetch_before)
691 ref->prefetch_before = prefetch_before;
692
693 return;
694 }
695
696 /* Try also the following iteration. */
697 prefetch_before++;
698 delta = step - delta;
699 if ((unsigned HOST_WIDE_INT) delta
700 <= (prefetch_block * ACCEPTABLE_MISS_RATE / 1000))
701 {
702 if (prefetch_before < ref->prefetch_before)
703 ref->prefetch_before = prefetch_before;
704
705 return;
706 }
707
708 /* The ref probably does not reuse by. */
709 return;
710 }
711
712 /* Prune the prefetch candidate REF using the reuses with other references
713 in REFS. */
714
715 static void
716 prune_ref_by_reuse (struct mem_ref *ref, struct mem_ref *refs)
717 {
718 struct mem_ref *prune_by;
719 bool before = true;
720
721 prune_ref_by_self_reuse (ref);
722
723 for (prune_by = refs; prune_by; prune_by = prune_by->next)
724 {
725 if (prune_by == ref)
726 {
727 before = false;
728 continue;
729 }
730
731 if (!WRITE_CAN_USE_READ_PREFETCH
732 && ref->write_p
733 && !prune_by->write_p)
734 continue;
735 if (!READ_CAN_USE_WRITE_PREFETCH
736 && !ref->write_p
737 && prune_by->write_p)
738 continue;
739
740 prune_ref_by_group_reuse (ref, prune_by, before);
741 }
742 }
743
744 /* Prune the prefetch candidates in GROUP using the reuse analysis. */
745
746 static void
747 prune_group_by_reuse (struct mem_ref_group *group)
748 {
749 struct mem_ref *ref_pruned;
750
751 for (ref_pruned = group->refs; ref_pruned; ref_pruned = ref_pruned->next)
752 {
753 prune_ref_by_reuse (ref_pruned, group->refs);
754
755 if (dump_file && (dump_flags & TDF_DETAILS))
756 {
757 fprintf (dump_file, "Reference %p:", (void *) ref_pruned);
758
759 if (ref_pruned->prefetch_before == PREFETCH_ALL
760 && ref_pruned->prefetch_mod == 1)
761 fprintf (dump_file, " no restrictions");
762 else if (ref_pruned->prefetch_before == 0)
763 fprintf (dump_file, " do not prefetch");
764 else if (ref_pruned->prefetch_before <= ref_pruned->prefetch_mod)
765 fprintf (dump_file, " prefetch once");
766 else
767 {
768 if (ref_pruned->prefetch_before != PREFETCH_ALL)
769 {
770 fprintf (dump_file, " prefetch before ");
771 fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC,
772 ref_pruned->prefetch_before);
773 }
774 if (ref_pruned->prefetch_mod != 1)
775 {
776 fprintf (dump_file, " prefetch mod ");
777 fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC,
778 ref_pruned->prefetch_mod);
779 }
780 }
781 fprintf (dump_file, "\n");
782 }
783 }
784 }
785
786 /* Prune the list of prefetch candidates GROUPS using the reuse analysis. */
787
788 static void
789 prune_by_reuse (struct mem_ref_group *groups)
790 {
791 for (; groups; groups = groups->next)
792 prune_group_by_reuse (groups);
793 }
794
795 /* Returns true if we should issue prefetch for REF. */
796
797 static bool
798 should_issue_prefetch_p (struct mem_ref *ref)
799 {
800 /* For now do not issue prefetches for only first few of the
801 iterations. */
802 if (ref->prefetch_before != PREFETCH_ALL)
803 return false;
804
805 /* Do not prefetch nontemporal stores. */
806 if (ref->storent_p)
807 return false;
808
809 return true;
810 }
811
812 /* Decide which of the prefetch candidates in GROUPS to prefetch.
813 AHEAD is the number of iterations to prefetch ahead (which corresponds
814 to the number of simultaneous instances of one prefetch running at a
815 time). UNROLL_FACTOR is the factor by that the loop is going to be
816 unrolled. Returns true if there is anything to prefetch. */
817
818 static bool
819 schedule_prefetches (struct mem_ref_group *groups, unsigned unroll_factor,
820 unsigned ahead)
821 {
822 unsigned remaining_prefetch_slots, n_prefetches, prefetch_slots;
823 unsigned slots_per_prefetch;
824 struct mem_ref *ref;
825 bool any = false;
826
827 /* At most SIMULTANEOUS_PREFETCHES should be running at the same time. */
828 remaining_prefetch_slots = SIMULTANEOUS_PREFETCHES;
829
830 /* The prefetch will run for AHEAD iterations of the original loop, i.e.,
831 AHEAD / UNROLL_FACTOR iterations of the unrolled loop. In each iteration,
832 it will need a prefetch slot. */
833 slots_per_prefetch = (ahead + unroll_factor / 2) / unroll_factor;
834 if (dump_file && (dump_flags & TDF_DETAILS))
835 fprintf (dump_file, "Each prefetch instruction takes %u prefetch slots.\n",
836 slots_per_prefetch);
837
838 /* For now we just take memory references one by one and issue
839 prefetches for as many as possible. The groups are sorted
840 starting with the largest step, since the references with
841 large step are more likely to cause many cache misses. */
842
843 for (; groups; groups = groups->next)
844 for (ref = groups->refs; ref; ref = ref->next)
845 {
846 if (!should_issue_prefetch_p (ref))
847 continue;
848
849 /* If we need to prefetch the reference each PREFETCH_MOD iterations,
850 and we unroll the loop UNROLL_FACTOR times, we need to insert
851 ceil (UNROLL_FACTOR / PREFETCH_MOD) instructions in each
852 iteration. */
853 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
854 / ref->prefetch_mod);
855 prefetch_slots = n_prefetches * slots_per_prefetch;
856
857 /* If more than half of the prefetches would be lost anyway, do not
858 issue the prefetch. */
859 if (2 * remaining_prefetch_slots < prefetch_slots)
860 continue;
861
862 ref->issue_prefetch_p = true;
863
864 if (remaining_prefetch_slots <= prefetch_slots)
865 return true;
866 remaining_prefetch_slots -= prefetch_slots;
867 any = true;
868 }
869
870 return any;
871 }
872
873 /* Estimate the number of prefetches in the given GROUPS. */
874
875 static int
876 estimate_prefetch_count (struct mem_ref_group *groups)
877 {
878 struct mem_ref *ref;
879 int prefetch_count = 0;
880
881 for (; groups; groups = groups->next)
882 for (ref = groups->refs; ref; ref = ref->next)
883 if (should_issue_prefetch_p (ref))
884 prefetch_count++;
885
886 return prefetch_count;
887 }
888
889 /* Issue prefetches for the reference REF into loop as decided before.
890 HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR
891 is the factor by which LOOP was unrolled. */
892
893 static void
894 issue_prefetch_ref (struct mem_ref *ref, unsigned unroll_factor, unsigned ahead)
895 {
896 HOST_WIDE_INT delta;
897 tree addr, addr_base, write_p, local;
898 gimple prefetch;
899 gimple_stmt_iterator bsi;
900 unsigned n_prefetches, ap;
901 bool nontemporal = ref->reuse_distance >= L2_CACHE_SIZE_BYTES;
902
903 if (dump_file && (dump_flags & TDF_DETAILS))
904 fprintf (dump_file, "Issued%s prefetch for %p.\n",
905 nontemporal ? " nontemporal" : "",
906 (void *) ref);
907
908 bsi = gsi_for_stmt (ref->stmt);
909
910 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
911 / ref->prefetch_mod);
912 addr_base = build_fold_addr_expr_with_type (ref->mem, ptr_type_node);
913 addr_base = force_gimple_operand_gsi (&bsi, unshare_expr (addr_base),
914 true, NULL, true, GSI_SAME_STMT);
915 write_p = ref->write_p ? integer_one_node : integer_zero_node;
916 local = build_int_cst (integer_type_node, nontemporal ? 0 : 3);
917
918 for (ap = 0; ap < n_prefetches; ap++)
919 {
920 /* Determine the address to prefetch. */
921 delta = (ahead + ap * ref->prefetch_mod) * ref->group->step;
922 addr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node,
923 addr_base, size_int (delta));
924 addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true, NULL,
925 true, GSI_SAME_STMT);
926
927 /* Create the prefetch instruction. */
928 prefetch = gimple_build_call (built_in_decls[BUILT_IN_PREFETCH],
929 3, addr, write_p, local);
930 gsi_insert_before (&bsi, prefetch, GSI_SAME_STMT);
931 }
932 }
933
934 /* Issue prefetches for the references in GROUPS into loop as decided before.
935 HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR is the
936 factor by that LOOP was unrolled. */
937
938 static void
939 issue_prefetches (struct mem_ref_group *groups,
940 unsigned unroll_factor, unsigned ahead)
941 {
942 struct mem_ref *ref;
943
944 for (; groups; groups = groups->next)
945 for (ref = groups->refs; ref; ref = ref->next)
946 if (ref->issue_prefetch_p)
947 issue_prefetch_ref (ref, unroll_factor, ahead);
948 }
949
950 /* Returns true if REF is a memory write for that a nontemporal store insn
951 can be used. */
952
953 static bool
954 nontemporal_store_p (struct mem_ref *ref)
955 {
956 enum machine_mode mode;
957 enum insn_code code;
958
959 /* REF must be a write that is not reused. We require it to be independent
960 on all other memory references in the loop, as the nontemporal stores may
961 be reordered with respect to other memory references. */
962 if (!ref->write_p
963 || !ref->independent_p
964 || ref->reuse_distance < L2_CACHE_SIZE_BYTES)
965 return false;
966
967 /* Check that we have the storent instruction for the mode. */
968 mode = TYPE_MODE (TREE_TYPE (ref->mem));
969 if (mode == BLKmode)
970 return false;
971
972 code = optab_handler (storent_optab, mode)->insn_code;
973 return code != CODE_FOR_nothing;
974 }
975
976 /* If REF is a nontemporal store, we mark the corresponding modify statement
977 and return true. Otherwise, we return false. */
978
979 static bool
980 mark_nontemporal_store (struct mem_ref *ref)
981 {
982 if (!nontemporal_store_p (ref))
983 return false;
984
985 if (dump_file && (dump_flags & TDF_DETAILS))
986 fprintf (dump_file, "Marked reference %p as a nontemporal store.\n",
987 (void *) ref);
988
989 gimple_assign_set_nontemporal_move (ref->stmt, true);
990 ref->storent_p = true;
991
992 return true;
993 }
994
995 /* Issue a memory fence instruction after LOOP. */
996
997 static void
998 emit_mfence_after_loop (struct loop *loop)
999 {
1000 VEC (edge, heap) *exits = get_loop_exit_edges (loop);
1001 edge exit;
1002 gimple call;
1003 gimple_stmt_iterator bsi;
1004 unsigned i;
1005
1006 for (i = 0; VEC_iterate (edge, exits, i, exit); i++)
1007 {
1008 call = gimple_build_call (FENCE_FOLLOWING_MOVNT, 0);
1009
1010 if (!single_pred_p (exit->dest)
1011 /* If possible, we prefer not to insert the fence on other paths
1012 in cfg. */
1013 && !(exit->flags & EDGE_ABNORMAL))
1014 split_loop_exit_edge (exit);
1015 bsi = gsi_after_labels (exit->dest);
1016
1017 gsi_insert_before (&bsi, call, GSI_NEW_STMT);
1018 mark_virtual_ops_for_renaming (call);
1019 }
1020
1021 VEC_free (edge, heap, exits);
1022 update_ssa (TODO_update_ssa_only_virtuals);
1023 }
1024
1025 /* Returns true if we can use storent in loop, false otherwise. */
1026
1027 static bool
1028 may_use_storent_in_loop_p (struct loop *loop)
1029 {
1030 bool ret = true;
1031
1032 if (loop->inner != NULL)
1033 return false;
1034
1035 /* If we must issue a mfence insn after using storent, check that there
1036 is a suitable place for it at each of the loop exits. */
1037 if (FENCE_FOLLOWING_MOVNT != NULL_TREE)
1038 {
1039 VEC (edge, heap) *exits = get_loop_exit_edges (loop);
1040 unsigned i;
1041 edge exit;
1042
1043 for (i = 0; VEC_iterate (edge, exits, i, exit); i++)
1044 if ((exit->flags & EDGE_ABNORMAL)
1045 && exit->dest == EXIT_BLOCK_PTR)
1046 ret = false;
1047
1048 VEC_free (edge, heap, exits);
1049 }
1050
1051 return ret;
1052 }
1053
1054 /* Marks nontemporal stores in LOOP. GROUPS contains the description of memory
1055 references in the loop. */
1056
1057 static void
1058 mark_nontemporal_stores (struct loop *loop, struct mem_ref_group *groups)
1059 {
1060 struct mem_ref *ref;
1061 bool any = false;
1062
1063 if (!may_use_storent_in_loop_p (loop))
1064 return;
1065
1066 for (; groups; groups = groups->next)
1067 for (ref = groups->refs; ref; ref = ref->next)
1068 any |= mark_nontemporal_store (ref);
1069
1070 if (any && FENCE_FOLLOWING_MOVNT != NULL_TREE)
1071 emit_mfence_after_loop (loop);
1072 }
1073
1074 /* Determines whether we can profitably unroll LOOP FACTOR times, and if
1075 this is the case, fill in DESC by the description of number of
1076 iterations. */
1077
1078 static bool
1079 should_unroll_loop_p (struct loop *loop, struct tree_niter_desc *desc,
1080 unsigned factor)
1081 {
1082 if (!can_unroll_loop_p (loop, factor, desc))
1083 return false;
1084
1085 /* We only consider loops without control flow for unrolling. This is not
1086 a hard restriction -- tree_unroll_loop works with arbitrary loops
1087 as well; but the unrolling/prefetching is usually more profitable for
1088 loops consisting of a single basic block, and we want to limit the
1089 code growth. */
1090 if (loop->num_nodes > 2)
1091 return false;
1092
1093 return true;
1094 }
1095
1096 /* Determine the coefficient by that unroll LOOP, from the information
1097 contained in the list of memory references REFS. Description of
1098 umber of iterations of LOOP is stored to DESC. NINSNS is the number of
1099 insns of the LOOP. EST_NITER is the estimated number of iterations of
1100 the loop, or -1 if no estimate is available. */
1101
1102 static unsigned
1103 determine_unroll_factor (struct loop *loop, struct mem_ref_group *refs,
1104 unsigned ninsns, struct tree_niter_desc *desc,
1105 HOST_WIDE_INT est_niter)
1106 {
1107 unsigned upper_bound;
1108 unsigned nfactor, factor, mod_constraint;
1109 struct mem_ref_group *agp;
1110 struct mem_ref *ref;
1111
1112 /* First check whether the loop is not too large to unroll. We ignore
1113 PARAM_MAX_UNROLL_TIMES, because for small loops, it prevented us
1114 from unrolling them enough to make exactly one cache line covered by each
1115 iteration. Also, the goal of PARAM_MAX_UNROLL_TIMES is to prevent
1116 us from unrolling the loops too many times in cases where we only expect
1117 gains from better scheduling and decreasing loop overhead, which is not
1118 the case here. */
1119 upper_bound = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / ninsns;
1120
1121 /* If we unrolled the loop more times than it iterates, the unrolled version
1122 of the loop would be never entered. */
1123 if (est_niter >= 0 && est_niter < (HOST_WIDE_INT) upper_bound)
1124 upper_bound = est_niter;
1125
1126 if (upper_bound <= 1)
1127 return 1;
1128
1129 /* Choose the factor so that we may prefetch each cache just once,
1130 but bound the unrolling by UPPER_BOUND. */
1131 factor = 1;
1132 for (agp = refs; agp; agp = agp->next)
1133 for (ref = agp->refs; ref; ref = ref->next)
1134 if (should_issue_prefetch_p (ref))
1135 {
1136 mod_constraint = ref->prefetch_mod;
1137 nfactor = least_common_multiple (mod_constraint, factor);
1138 if (nfactor <= upper_bound)
1139 factor = nfactor;
1140 }
1141
1142 if (!should_unroll_loop_p (loop, desc, factor))
1143 return 1;
1144
1145 return factor;
1146 }
1147
1148 /* Returns the total volume of the memory references REFS, taking into account
1149 reuses in the innermost loop and cache line size. TODO -- we should also
1150 take into account reuses across the iterations of the loops in the loop
1151 nest. */
1152
1153 static unsigned
1154 volume_of_references (struct mem_ref_group *refs)
1155 {
1156 unsigned volume = 0;
1157 struct mem_ref_group *gr;
1158 struct mem_ref *ref;
1159
1160 for (gr = refs; gr; gr = gr->next)
1161 for (ref = gr->refs; ref; ref = ref->next)
1162 {
1163 /* Almost always reuses another value? */
1164 if (ref->prefetch_before != PREFETCH_ALL)
1165 continue;
1166
1167 /* If several iterations access the same cache line, use the size of
1168 the line divided by this number. Otherwise, a cache line is
1169 accessed in each iteration. TODO -- in the latter case, we should
1170 take the size of the reference into account, rounding it up on cache
1171 line size multiple. */
1172 volume += L1_CACHE_LINE_SIZE / ref->prefetch_mod;
1173 }
1174 return volume;
1175 }
1176
1177 /* Returns the volume of memory references accessed across VEC iterations of
1178 loops, whose sizes are described in the LOOP_SIZES array. N is the number
1179 of the loops in the nest (length of VEC and LOOP_SIZES vectors). */
1180
1181 static unsigned
1182 volume_of_dist_vector (lambda_vector vec, unsigned *loop_sizes, unsigned n)
1183 {
1184 unsigned i;
1185
1186 for (i = 0; i < n; i++)
1187 if (vec[i] != 0)
1188 break;
1189
1190 if (i == n)
1191 return 0;
1192
1193 gcc_assert (vec[i] > 0);
1194
1195 /* We ignore the parts of the distance vector in subloops, since usually
1196 the numbers of iterations are much smaller. */
1197 return loop_sizes[i] * vec[i];
1198 }
1199
1200 /* Add the steps of ACCESS_FN multiplied by STRIDE to the array STRIDE
1201 at the position corresponding to the loop of the step. N is the depth
1202 of the considered loop nest, and, LOOP is its innermost loop. */
1203
1204 static void
1205 add_subscript_strides (tree access_fn, unsigned stride,
1206 HOST_WIDE_INT *strides, unsigned n, struct loop *loop)
1207 {
1208 struct loop *aloop;
1209 tree step;
1210 HOST_WIDE_INT astep;
1211 unsigned min_depth = loop_depth (loop) - n;
1212
1213 while (TREE_CODE (access_fn) == POLYNOMIAL_CHREC)
1214 {
1215 aloop = get_chrec_loop (access_fn);
1216 step = CHREC_RIGHT (access_fn);
1217 access_fn = CHREC_LEFT (access_fn);
1218
1219 if ((unsigned) loop_depth (aloop) <= min_depth)
1220 continue;
1221
1222 if (host_integerp (step, 0))
1223 astep = tree_low_cst (step, 0);
1224 else
1225 astep = L1_CACHE_LINE_SIZE;
1226
1227 strides[n - 1 - loop_depth (loop) + loop_depth (aloop)] += astep * stride;
1228
1229 }
1230 }
1231
1232 /* Returns the volume of memory references accessed between two consecutive
1233 self-reuses of the reference DR. We consider the subscripts of DR in N
1234 loops, and LOOP_SIZES contains the volumes of accesses in each of the
1235 loops. LOOP is the innermost loop of the current loop nest. */
1236
1237 static unsigned
1238 self_reuse_distance (data_reference_p dr, unsigned *loop_sizes, unsigned n,
1239 struct loop *loop)
1240 {
1241 tree stride, access_fn;
1242 HOST_WIDE_INT *strides, astride;
1243 VEC (tree, heap) *access_fns;
1244 tree ref = DR_REF (dr);
1245 unsigned i, ret = ~0u;
1246
1247 /* In the following example:
1248
1249 for (i = 0; i < N; i++)
1250 for (j = 0; j < N; j++)
1251 use (a[j][i]);
1252 the same cache line is accessed each N steps (except if the change from
1253 i to i + 1 crosses the boundary of the cache line). Thus, for self-reuse,
1254 we cannot rely purely on the results of the data dependence analysis.
1255
1256 Instead, we compute the stride of the reference in each loop, and consider
1257 the innermost loop in that the stride is less than cache size. */
1258
1259 strides = XCNEWVEC (HOST_WIDE_INT, n);
1260 access_fns = DR_ACCESS_FNS (dr);
1261
1262 for (i = 0; VEC_iterate (tree, access_fns, i, access_fn); i++)
1263 {
1264 /* Keep track of the reference corresponding to the subscript, so that we
1265 know its stride. */
1266 while (handled_component_p (ref) && TREE_CODE (ref) != ARRAY_REF)
1267 ref = TREE_OPERAND (ref, 0);
1268
1269 if (TREE_CODE (ref) == ARRAY_REF)
1270 {
1271 stride = TYPE_SIZE_UNIT (TREE_TYPE (ref));
1272 if (host_integerp (stride, 1))
1273 astride = tree_low_cst (stride, 1);
1274 else
1275 astride = L1_CACHE_LINE_SIZE;
1276
1277 ref = TREE_OPERAND (ref, 0);
1278 }
1279 else
1280 astride = 1;
1281
1282 add_subscript_strides (access_fn, astride, strides, n, loop);
1283 }
1284
1285 for (i = n; i-- > 0; )
1286 {
1287 unsigned HOST_WIDE_INT s;
1288
1289 s = strides[i] < 0 ? -strides[i] : strides[i];
1290
1291 if (s < (unsigned) L1_CACHE_LINE_SIZE
1292 && (loop_sizes[i]
1293 > (unsigned) (L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION)))
1294 {
1295 ret = loop_sizes[i];
1296 break;
1297 }
1298 }
1299
1300 free (strides);
1301 return ret;
1302 }
1303
1304 /* Determines the distance till the first reuse of each reference in REFS
1305 in the loop nest of LOOP. NO_OTHER_REFS is true if there are no other
1306 memory references in the loop. */
1307
1308 static void
1309 determine_loop_nest_reuse (struct loop *loop, struct mem_ref_group *refs,
1310 bool no_other_refs)
1311 {
1312 struct loop *nest, *aloop;
1313 VEC (data_reference_p, heap) *datarefs = NULL;
1314 VEC (ddr_p, heap) *dependences = NULL;
1315 struct mem_ref_group *gr;
1316 struct mem_ref *ref, *refb;
1317 VEC (loop_p, heap) *vloops = NULL;
1318 unsigned *loop_data_size;
1319 unsigned i, j, n;
1320 unsigned volume, dist, adist;
1321 HOST_WIDE_INT vol;
1322 data_reference_p dr;
1323 ddr_p dep;
1324
1325 if (loop->inner)
1326 return;
1327
1328 /* Find the outermost loop of the loop nest of loop (we require that
1329 there are no sibling loops inside the nest). */
1330 nest = loop;
1331 while (1)
1332 {
1333 aloop = loop_outer (nest);
1334
1335 if (aloop == current_loops->tree_root
1336 || aloop->inner->next)
1337 break;
1338
1339 nest = aloop;
1340 }
1341
1342 /* For each loop, determine the amount of data accessed in each iteration.
1343 We use this to estimate whether the reference is evicted from the
1344 cache before its reuse. */
1345 find_loop_nest (nest, &vloops);
1346 n = VEC_length (loop_p, vloops);
1347 loop_data_size = XNEWVEC (unsigned, n);
1348 volume = volume_of_references (refs);
1349 i = n;
1350 while (i-- != 0)
1351 {
1352 loop_data_size[i] = volume;
1353 /* Bound the volume by the L2 cache size, since above this bound,
1354 all dependence distances are equivalent. */
1355 if (volume > L2_CACHE_SIZE_BYTES)
1356 continue;
1357
1358 aloop = VEC_index (loop_p, vloops, i);
1359 vol = estimated_loop_iterations_int (aloop, false);
1360 if (vol < 0)
1361 vol = expected_loop_iterations (aloop);
1362 volume *= vol;
1363 }
1364
1365 /* Prepare the references in the form suitable for data dependence
1366 analysis. We ignore unanalyzable data references (the results
1367 are used just as a heuristics to estimate temporality of the
1368 references, hence we do not need to worry about correctness). */
1369 for (gr = refs; gr; gr = gr->next)
1370 for (ref = gr->refs; ref; ref = ref->next)
1371 {
1372 dr = create_data_ref (nest, ref->mem, ref->stmt, !ref->write_p);
1373
1374 if (dr)
1375 {
1376 ref->reuse_distance = volume;
1377 dr->aux = ref;
1378 VEC_safe_push (data_reference_p, heap, datarefs, dr);
1379 }
1380 else
1381 no_other_refs = false;
1382 }
1383
1384 for (i = 0; VEC_iterate (data_reference_p, datarefs, i, dr); i++)
1385 {
1386 dist = self_reuse_distance (dr, loop_data_size, n, loop);
1387 ref = (struct mem_ref *) dr->aux;
1388 if (ref->reuse_distance > dist)
1389 ref->reuse_distance = dist;
1390
1391 if (no_other_refs)
1392 ref->independent_p = true;
1393 }
1394
1395 compute_all_dependences (datarefs, &dependences, vloops, true);
1396
1397 for (i = 0; VEC_iterate (ddr_p, dependences, i, dep); i++)
1398 {
1399 if (DDR_ARE_DEPENDENT (dep) == chrec_known)
1400 continue;
1401
1402 ref = (struct mem_ref *) DDR_A (dep)->aux;
1403 refb = (struct mem_ref *) DDR_B (dep)->aux;
1404
1405 if (DDR_ARE_DEPENDENT (dep) == chrec_dont_know
1406 || DDR_NUM_DIST_VECTS (dep) == 0)
1407 {
1408 /* If the dependence cannot be analyzed, assume that there might be
1409 a reuse. */
1410 dist = 0;
1411
1412 ref->independent_p = false;
1413 refb->independent_p = false;
1414 }
1415 else
1416 {
1417 /* The distance vectors are normalized to be always lexicographically
1418 positive, hence we cannot tell just from them whether DDR_A comes
1419 before DDR_B or vice versa. However, it is not important,
1420 anyway -- if DDR_A is close to DDR_B, then it is either reused in
1421 DDR_B (and it is not nontemporal), or it reuses the value of DDR_B
1422 in cache (and marking it as nontemporal would not affect
1423 anything). */
1424
1425 dist = volume;
1426 for (j = 0; j < DDR_NUM_DIST_VECTS (dep); j++)
1427 {
1428 adist = volume_of_dist_vector (DDR_DIST_VECT (dep, j),
1429 loop_data_size, n);
1430
1431 /* If this is a dependence in the innermost loop (i.e., the
1432 distances in all superloops are zero) and it is not
1433 the trivial self-dependence with distance zero, record that
1434 the references are not completely independent. */
1435 if (lambda_vector_zerop (DDR_DIST_VECT (dep, j), n - 1)
1436 && (ref != refb
1437 || DDR_DIST_VECT (dep, j)[n-1] != 0))
1438 {
1439 ref->independent_p = false;
1440 refb->independent_p = false;
1441 }
1442
1443 /* Ignore accesses closer than
1444 L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
1445 so that we use nontemporal prefetches e.g. if single memory
1446 location is accessed several times in a single iteration of
1447 the loop. */
1448 if (adist < L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION)
1449 continue;
1450
1451 if (adist < dist)
1452 dist = adist;
1453 }
1454 }
1455
1456 if (ref->reuse_distance > dist)
1457 ref->reuse_distance = dist;
1458 if (refb->reuse_distance > dist)
1459 refb->reuse_distance = dist;
1460 }
1461
1462 free_dependence_relations (dependences);
1463 free_data_refs (datarefs);
1464 free (loop_data_size);
1465
1466 if (dump_file && (dump_flags & TDF_DETAILS))
1467 {
1468 fprintf (dump_file, "Reuse distances:\n");
1469 for (gr = refs; gr; gr = gr->next)
1470 for (ref = gr->refs; ref; ref = ref->next)
1471 fprintf (dump_file, " ref %p distance %u\n",
1472 (void *) ref, ref->reuse_distance);
1473 }
1474 }
1475
1476 /* Do a cost-benefit analysis to determine if prefetching is profitable
1477 for the current loop given the following parameters:
1478 AHEAD: the iteration ahead distance,
1479 EST_NITER: the estimated trip count,
1480 NINSNS: estimated number of instructions in the loop,
1481 PREFETCH_COUNT: an estimate of the number of prefetches
1482 MEM_REF_COUNT: total number of memory references in the loop. */
1483
1484 static bool
1485 is_loop_prefetching_profitable (unsigned ahead, HOST_WIDE_INT est_niter,
1486 unsigned ninsns, unsigned prefetch_count,
1487 unsigned mem_ref_count)
1488 {
1489 int insn_to_mem_ratio, insn_to_prefetch_ratio;
1490
1491 if (mem_ref_count == 0)
1492 return false;
1493
1494 /* Prefetching improves performance by overlapping cache missing
1495 memory accesses with CPU operations. If the loop does not have
1496 enough CPU operations to overlap with memory operations, prefetching
1497 won't give a significant benefit. One approximate way of checking
1498 this is to require the ratio of instructions to memory references to
1499 be above a certain limit. This approximation works well in practice.
1500 TODO: Implement a more precise computation by estimating the time
1501 for each CPU or memory op in the loop. Time estimates for memory ops
1502 should account for cache misses. */
1503 insn_to_mem_ratio = ninsns / mem_ref_count;
1504
1505 if (insn_to_mem_ratio < PREFETCH_MIN_INSN_TO_MEM_RATIO)
1506 return false;
1507
1508 /* Profitability of prefetching is highly dependent on the trip count.
1509 For a given AHEAD distance, the first AHEAD iterations do not benefit
1510 from prefetching, and the last AHEAD iterations execute useless
1511 prefetches. So, if the trip count is not large enough relative to AHEAD,
1512 prefetching may cause serious performance degradation. To avoid this
1513 problem when the trip count is not known at compile time, we
1514 conservatively skip loops with high prefetching costs. For now, only
1515 the I-cache cost is considered. The relative I-cache cost is estimated
1516 by taking the ratio between the number of prefetches and the total
1517 number of instructions. Since we are using integer arithmetic, we
1518 compute the reciprocal of this ratio.
1519 TODO: Account for loop unrolling, which may reduce the costs of
1520 shorter stride prefetches. Note that not accounting for loop
1521 unrolling over-estimates the cost and hence gives more conservative
1522 results. */
1523 if (est_niter < 0)
1524 {
1525 insn_to_prefetch_ratio = ninsns / prefetch_count;
1526 return insn_to_prefetch_ratio >= MIN_INSN_TO_PREFETCH_RATIO;
1527 }
1528
1529 if (est_niter <= (HOST_WIDE_INT) ahead)
1530 {
1531 if (dump_file && (dump_flags & TDF_DETAILS))
1532 fprintf (dump_file,
1533 "Not prefetching -- loop estimated to roll only %d times\n",
1534 (int) est_niter);
1535 return false;
1536 }
1537 return true;
1538 }
1539
1540
1541 /* Issue prefetch instructions for array references in LOOP. Returns
1542 true if the LOOP was unrolled. */
1543
1544 static bool
1545 loop_prefetch_arrays (struct loop *loop)
1546 {
1547 struct mem_ref_group *refs;
1548 unsigned ahead, ninsns, time, unroll_factor;
1549 HOST_WIDE_INT est_niter;
1550 struct tree_niter_desc desc;
1551 bool unrolled = false, no_other_refs;
1552 unsigned prefetch_count;
1553 unsigned mem_ref_count;
1554
1555 if (optimize_loop_nest_for_size_p (loop))
1556 {
1557 if (dump_file && (dump_flags & TDF_DETAILS))
1558 fprintf (dump_file, " ignored (cold area)\n");
1559 return false;
1560 }
1561
1562 /* Step 1: gather the memory references. */
1563 refs = gather_memory_references (loop, &no_other_refs, &mem_ref_count);
1564
1565 /* Step 2: estimate the reuse effects. */
1566 prune_by_reuse (refs);
1567
1568 prefetch_count = estimate_prefetch_count (refs);
1569 if (prefetch_count == 0)
1570 goto fail;
1571
1572 determine_loop_nest_reuse (loop, refs, no_other_refs);
1573
1574 /* Step 3: determine the ahead and unroll factor. */
1575
1576 /* FIXME: the time should be weighted by the probabilities of the blocks in
1577 the loop body. */
1578 time = tree_num_loop_insns (loop, &eni_time_weights);
1579 ahead = (PREFETCH_LATENCY + time - 1) / time;
1580 est_niter = estimated_loop_iterations_int (loop, false);
1581
1582 ninsns = tree_num_loop_insns (loop, &eni_size_weights);
1583 unroll_factor = determine_unroll_factor (loop, refs, ninsns, &desc,
1584 est_niter);
1585 if (dump_file && (dump_flags & TDF_DETAILS))
1586 fprintf (dump_file, "Ahead %d, unroll factor %d, trip count "
1587 HOST_WIDE_INT_PRINT_DEC "\n"
1588 "insn count %d, mem ref count %d, prefetch count %d\n",
1589 ahead, unroll_factor, est_niter,
1590 ninsns, mem_ref_count, prefetch_count);
1591
1592 if (!is_loop_prefetching_profitable (ahead, est_niter, ninsns,
1593 prefetch_count, mem_ref_count))
1594 goto fail;
1595
1596 mark_nontemporal_stores (loop, refs);
1597
1598 /* Step 4: what to prefetch? */
1599 if (!schedule_prefetches (refs, unroll_factor, ahead))
1600 goto fail;
1601
1602 /* Step 5: unroll the loop. TODO -- peeling of first and last few
1603 iterations so that we do not issue superfluous prefetches. */
1604 if (unroll_factor != 1)
1605 {
1606 tree_unroll_loop (loop, unroll_factor,
1607 single_dom_exit (loop), &desc);
1608 unrolled = true;
1609 }
1610
1611 /* Step 6: issue the prefetches. */
1612 issue_prefetches (refs, unroll_factor, ahead);
1613
1614 fail:
1615 release_mem_refs (refs);
1616 return unrolled;
1617 }
1618
1619 /* Issue prefetch instructions for array references in loops. */
1620
1621 unsigned int
1622 tree_ssa_prefetch_arrays (void)
1623 {
1624 loop_iterator li;
1625 struct loop *loop;
1626 bool unrolled = false;
1627 int todo_flags = 0;
1628
1629 if (!HAVE_prefetch
1630 /* It is possible to ask compiler for say -mtune=i486 -march=pentium4.
1631 -mtune=i486 causes us having PREFETCH_BLOCK 0, since this is part
1632 of processor costs and i486 does not have prefetch, but
1633 -march=pentium4 causes HAVE_prefetch to be true. Ugh. */
1634 || PREFETCH_BLOCK == 0)
1635 return 0;
1636
1637 if (dump_file && (dump_flags & TDF_DETAILS))
1638 {
1639 fprintf (dump_file, "Prefetching parameters:\n");
1640 fprintf (dump_file, " simultaneous prefetches: %d\n",
1641 SIMULTANEOUS_PREFETCHES);
1642 fprintf (dump_file, " prefetch latency: %d\n", PREFETCH_LATENCY);
1643 fprintf (dump_file, " prefetch block size: %d\n", PREFETCH_BLOCK);
1644 fprintf (dump_file, " L1 cache size: %d lines, %d kB\n",
1645 L1_CACHE_SIZE_BYTES / L1_CACHE_LINE_SIZE, L1_CACHE_SIZE);
1646 fprintf (dump_file, " L1 cache line size: %d\n", L1_CACHE_LINE_SIZE);
1647 fprintf (dump_file, " L2 cache size: %d kB\n", L2_CACHE_SIZE);
1648 fprintf (dump_file, " min insn-to-prefetch ratio: %d \n",
1649 MIN_INSN_TO_PREFETCH_RATIO);
1650 fprintf (dump_file, " min insn-to-mem ratio: %d \n",
1651 PREFETCH_MIN_INSN_TO_MEM_RATIO);
1652 fprintf (dump_file, "\n");
1653 }
1654
1655 initialize_original_copy_tables ();
1656
1657 if (!built_in_decls[BUILT_IN_PREFETCH])
1658 {
1659 tree type = build_function_type (void_type_node,
1660 tree_cons (NULL_TREE,
1661 const_ptr_type_node,
1662 NULL_TREE));
1663 tree decl = add_builtin_function ("__builtin_prefetch", type,
1664 BUILT_IN_PREFETCH, BUILT_IN_NORMAL,
1665 NULL, NULL_TREE);
1666 DECL_IS_NOVOPS (decl) = true;
1667 built_in_decls[BUILT_IN_PREFETCH] = decl;
1668 }
1669
1670 /* We assume that size of cache line is a power of two, so verify this
1671 here. */
1672 gcc_assert ((PREFETCH_BLOCK & (PREFETCH_BLOCK - 1)) == 0);
1673
1674 FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
1675 {
1676 if (dump_file && (dump_flags & TDF_DETAILS))
1677 fprintf (dump_file, "Processing loop %d:\n", loop->num);
1678
1679 unrolled |= loop_prefetch_arrays (loop);
1680
1681 if (dump_file && (dump_flags & TDF_DETAILS))
1682 fprintf (dump_file, "\n\n");
1683 }
1684
1685 if (unrolled)
1686 {
1687 scev_reset ();
1688 todo_flags |= TODO_cleanup_cfg;
1689 }
1690
1691 free_original_copy_tables ();
1692 return todo_flags;
1693 }