tree-ssa-loop-prefetch.c (mem_ref_group, ar_data): Change step to tree.
[gcc.git] / gcc / tree-ssa-loop-prefetch.c
1 /* Array prefetching.
2 Copyright (C) 2005, 2007, 2008 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "tree.h"
25 #include "rtl.h"
26 #include "tm_p.h"
27 #include "hard-reg-set.h"
28 #include "basic-block.h"
29 #include "output.h"
30 #include "diagnostic.h"
31 #include "tree-flow.h"
32 #include "tree-dump.h"
33 #include "timevar.h"
34 #include "cfgloop.h"
35 #include "expr.h"
36 #include "tree-pass.h"
37 #include "ggc.h"
38 #include "insn-config.h"
39 #include "recog.h"
40 #include "hashtab.h"
41 #include "tree-chrec.h"
42 #include "tree-scalar-evolution.h"
43 #include "toplev.h"
44 #include "params.h"
45 #include "langhooks.h"
46 #include "tree-inline.h"
47 #include "tree-data-ref.h"
48 #include "optabs.h"
49
50 /* This pass inserts prefetch instructions to optimize cache usage during
51 accesses to arrays in loops. It processes loops sequentially and:
52
53 1) Gathers all memory references in the single loop.
54 2) For each of the references it decides when it is profitable to prefetch
55 it. To do it, we evaluate the reuse among the accesses, and determines
56 two values: PREFETCH_BEFORE (meaning that it only makes sense to do
57 prefetching in the first PREFETCH_BEFORE iterations of the loop) and
58 PREFETCH_MOD (meaning that it only makes sense to prefetch in the
59 iterations of the loop that are zero modulo PREFETCH_MOD). For example
60 (assuming cache line size is 64 bytes, char has size 1 byte and there
61 is no hardware sequential prefetch):
62
63 char *a;
64 for (i = 0; i < max; i++)
65 {
66 a[255] = ...; (0)
67 a[i] = ...; (1)
68 a[i + 64] = ...; (2)
69 a[16*i] = ...; (3)
70 a[187*i] = ...; (4)
71 a[187*i + 50] = ...; (5)
72 }
73
74 (0) obviously has PREFETCH_BEFORE 1
75 (1) has PREFETCH_BEFORE 64, since (2) accesses the same memory
76 location 64 iterations before it, and PREFETCH_MOD 64 (since
77 it hits the same cache line otherwise).
78 (2) has PREFETCH_MOD 64
79 (3) has PREFETCH_MOD 4
80 (4) has PREFETCH_MOD 1. We do not set PREFETCH_BEFORE here, since
81 the cache line accessed by (4) is the same with probability only
82 7/32.
83 (5) has PREFETCH_MOD 1 as well.
84
85 Additionally, we use data dependence analysis to determine for each
86 reference the distance till the first reuse; this information is used
87 to determine the temporality of the issued prefetch instruction.
88
89 3) We determine how much ahead we need to prefetch. The number of
90 iterations needed is time to fetch / time spent in one iteration of
91 the loop. The problem is that we do not know either of these values,
92 so we just make a heuristic guess based on a magic (possibly)
93 target-specific constant and size of the loop.
94
95 4) Determine which of the references we prefetch. We take into account
96 that there is a maximum number of simultaneous prefetches (provided
97 by machine description). We prefetch as many prefetches as possible
98 while still within this bound (starting with those with lowest
99 prefetch_mod, since they are responsible for most of the cache
100 misses).
101
102 5) We unroll and peel loops so that we are able to satisfy PREFETCH_MOD
103 and PREFETCH_BEFORE requirements (within some bounds), and to avoid
104 prefetching nonaccessed memory.
105 TODO -- actually implement peeling.
106
107 6) We actually emit the prefetch instructions. ??? Perhaps emit the
108 prefetch instructions with guards in cases where 5) was not sufficient
109 to satisfy the constraints?
110
111 The function is_loop_prefetching_profitable() implements a cost model
112 to determine if prefetching is profitable for a given loop. The cost
113 model has two heuristcs:
114 1. A heuristic that determines whether the given loop has enough CPU
115 ops that can be overlapped with cache missing memory ops.
116 If not, the loop won't benefit from prefetching. This is implemented
117 by requirung the ratio between the instruction count and the mem ref
118 count to be above a certain minimum.
119 2. A heuristic that disables prefetching in a loop with an unknown trip
120 count if the prefetching cost is above a certain limit. The relative
121 prefetching cost is estimated by taking the ratio between the
122 prefetch count and the total intruction count (this models the I-cache
123 cost).
124 The limits used in these heuristics are defined as parameters with
125 reasonable default values. Machine-specific default values will be
126 added later.
127
128 Some other TODO:
129 -- write and use more general reuse analysis (that could be also used
130 in other cache aimed loop optimizations)
131 -- make it behave sanely together with the prefetches given by user
132 (now we just ignore them; at the very least we should avoid
133 optimizing loops in that user put his own prefetches)
134 -- we assume cache line size alignment of arrays; this could be
135 improved. */
136
137 /* Magic constants follow. These should be replaced by machine specific
138 numbers. */
139
140 /* True if write can be prefetched by a read prefetch. */
141
142 #ifndef WRITE_CAN_USE_READ_PREFETCH
143 #define WRITE_CAN_USE_READ_PREFETCH 1
144 #endif
145
146 /* True if read can be prefetched by a write prefetch. */
147
148 #ifndef READ_CAN_USE_WRITE_PREFETCH
149 #define READ_CAN_USE_WRITE_PREFETCH 0
150 #endif
151
152 /* The size of the block loaded by a single prefetch. Usually, this is
153 the same as cache line size (at the moment, we only consider one level
154 of cache hierarchy). */
155
156 #ifndef PREFETCH_BLOCK
157 #define PREFETCH_BLOCK L1_CACHE_LINE_SIZE
158 #endif
159
160 /* Do we have a forward hardware sequential prefetching? */
161
162 #ifndef HAVE_FORWARD_PREFETCH
163 #define HAVE_FORWARD_PREFETCH 0
164 #endif
165
166 /* Do we have a backward hardware sequential prefetching? */
167
168 #ifndef HAVE_BACKWARD_PREFETCH
169 #define HAVE_BACKWARD_PREFETCH 0
170 #endif
171
172 /* In some cases we are only able to determine that there is a certain
173 probability that the two accesses hit the same cache line. In this
174 case, we issue the prefetches for both of them if this probability
175 is less then (1000 - ACCEPTABLE_MISS_RATE) per thousand. */
176
177 #ifndef ACCEPTABLE_MISS_RATE
178 #define ACCEPTABLE_MISS_RATE 50
179 #endif
180
181 #ifndef HAVE_prefetch
182 #define HAVE_prefetch 0
183 #endif
184
185 #define L1_CACHE_SIZE_BYTES ((unsigned) (L1_CACHE_SIZE * 1024))
186 #define L2_CACHE_SIZE_BYTES ((unsigned) (L2_CACHE_SIZE * 1024))
187
188 /* We consider a memory access nontemporal if it is not reused sooner than
189 after L2_CACHE_SIZE_BYTES of memory are accessed. However, we ignore
190 accesses closer than L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
191 so that we use nontemporal prefetches e.g. if single memory location
192 is accessed several times in a single iteration of the loop. */
193 #define NONTEMPORAL_FRACTION 16
194
195 /* In case we have to emit a memory fence instruction after the loop that
196 uses nontemporal stores, this defines the builtin to use. */
197
198 #ifndef FENCE_FOLLOWING_MOVNT
199 #define FENCE_FOLLOWING_MOVNT NULL_TREE
200 #endif
201
202 /* It is not profitable to prefetch when the trip count is not at
203 least TRIP_COUNT_TO_AHEAD_RATIO times the prefetch ahead distance.
204 For example, in a loop with a prefetch ahead distance of 10,
205 supposing that TRIP_COUNT_TO_AHEAD_RATIO is equal to 4, it is
206 profitable to prefetch when the trip count is greater or equal to
207 40. In that case, 30 out of the 40 iterations will benefit from
208 prefetching. */
209
210 #ifndef TRIP_COUNT_TO_AHEAD_RATIO
211 #define TRIP_COUNT_TO_AHEAD_RATIO 4
212 #endif
213
214 /* The group of references between that reuse may occur. */
215
216 struct mem_ref_group
217 {
218 tree base; /* Base of the reference. */
219 tree step; /* Step of the reference. */
220 struct mem_ref *refs; /* References in the group. */
221 struct mem_ref_group *next; /* Next group of references. */
222 };
223
224 /* Assigned to PREFETCH_BEFORE when all iterations are to be prefetched. */
225
226 #define PREFETCH_ALL (~(unsigned HOST_WIDE_INT) 0)
227
228 /* Do not generate a prefetch if the unroll factor is significantly less
229 than what is required by the prefetch. This is to avoid redundant
230 prefetches. For example, if prefetch_mod is 16 and unroll_factor is
231 1, this means prefetching requires unrolling the loop 16 times, but
232 the loop is not going to be unrolled. In this case (ratio = 16),
233 prefetching is not likely to be beneficial. */
234
235 #ifndef PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO
236 #define PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO 8
237 #endif
238
239 /* The memory reference. */
240
241 struct mem_ref
242 {
243 gimple stmt; /* Statement in that the reference appears. */
244 tree mem; /* The reference. */
245 HOST_WIDE_INT delta; /* Constant offset of the reference. */
246 struct mem_ref_group *group; /* The group of references it belongs to. */
247 unsigned HOST_WIDE_INT prefetch_mod;
248 /* Prefetch only each PREFETCH_MOD-th
249 iteration. */
250 unsigned HOST_WIDE_INT prefetch_before;
251 /* Prefetch only first PREFETCH_BEFORE
252 iterations. */
253 unsigned reuse_distance; /* The amount of data accessed before the first
254 reuse of this value. */
255 struct mem_ref *next; /* The next reference in the group. */
256 unsigned write_p : 1; /* Is it a write? */
257 unsigned independent_p : 1; /* True if the reference is independent on
258 all other references inside the loop. */
259 unsigned issue_prefetch_p : 1; /* Should we really issue the prefetch? */
260 unsigned storent_p : 1; /* True if we changed the store to a
261 nontemporal one. */
262 };
263
264 /* Dumps information about reference REF to FILE. */
265
266 static void
267 dump_mem_ref (FILE *file, struct mem_ref *ref)
268 {
269 fprintf (file, "Reference %p:\n", (void *) ref);
270
271 fprintf (file, " group %p (base ", (void *) ref->group);
272 print_generic_expr (file, ref->group->base, TDF_SLIM);
273 fprintf (file, ", step ");
274 if (cst_and_fits_in_hwi (ref->group->step))
275 fprintf (file, HOST_WIDE_INT_PRINT_DEC, int_cst_value (ref->group->step));
276 else
277 print_generic_expr (file, ref->group->step, TDF_TREE);
278 fprintf (file, ")\n");
279
280 fprintf (file, " delta ");
281 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ref->delta);
282 fprintf (file, "\n");
283
284 fprintf (file, " %s\n", ref->write_p ? "write" : "read");
285
286 fprintf (file, "\n");
287 }
288
289 /* Finds a group with BASE and STEP in GROUPS, or creates one if it does not
290 exist. */
291
292 static struct mem_ref_group *
293 find_or_create_group (struct mem_ref_group **groups, tree base, tree step)
294 {
295 struct mem_ref_group *group;
296
297 for (; *groups; groups = &(*groups)->next)
298 {
299 if (operand_equal_p ((*groups)->step, step, 0)
300 && operand_equal_p ((*groups)->base, base, 0))
301 return *groups;
302
303 /* If step is an integer constant, keep the list of groups sorted
304 by decreasing step. */
305 if (cst_and_fits_in_hwi ((*groups)->step) && cst_and_fits_in_hwi (step)
306 && int_cst_value ((*groups)->step) < int_cst_value (step))
307 break;
308 }
309
310 group = XNEW (struct mem_ref_group);
311 group->base = base;
312 group->step = step;
313 group->refs = NULL;
314 group->next = *groups;
315 *groups = group;
316
317 return group;
318 }
319
320 /* Records a memory reference MEM in GROUP with offset DELTA and write status
321 WRITE_P. The reference occurs in statement STMT. */
322
323 static void
324 record_ref (struct mem_ref_group *group, gimple stmt, tree mem,
325 HOST_WIDE_INT delta, bool write_p)
326 {
327 struct mem_ref **aref;
328
329 /* Do not record the same address twice. */
330 for (aref = &group->refs; *aref; aref = &(*aref)->next)
331 {
332 /* It does not have to be possible for write reference to reuse the read
333 prefetch, or vice versa. */
334 if (!WRITE_CAN_USE_READ_PREFETCH
335 && write_p
336 && !(*aref)->write_p)
337 continue;
338 if (!READ_CAN_USE_WRITE_PREFETCH
339 && !write_p
340 && (*aref)->write_p)
341 continue;
342
343 if ((*aref)->delta == delta)
344 return;
345 }
346
347 (*aref) = XNEW (struct mem_ref);
348 (*aref)->stmt = stmt;
349 (*aref)->mem = mem;
350 (*aref)->delta = delta;
351 (*aref)->write_p = write_p;
352 (*aref)->prefetch_before = PREFETCH_ALL;
353 (*aref)->prefetch_mod = 1;
354 (*aref)->reuse_distance = 0;
355 (*aref)->issue_prefetch_p = false;
356 (*aref)->group = group;
357 (*aref)->next = NULL;
358 (*aref)->independent_p = false;
359 (*aref)->storent_p = false;
360
361 if (dump_file && (dump_flags & TDF_DETAILS))
362 dump_mem_ref (dump_file, *aref);
363 }
364
365 /* Release memory references in GROUPS. */
366
367 static void
368 release_mem_refs (struct mem_ref_group *groups)
369 {
370 struct mem_ref_group *next_g;
371 struct mem_ref *ref, *next_r;
372
373 for (; groups; groups = next_g)
374 {
375 next_g = groups->next;
376 for (ref = groups->refs; ref; ref = next_r)
377 {
378 next_r = ref->next;
379 free (ref);
380 }
381 free (groups);
382 }
383 }
384
385 /* A structure used to pass arguments to idx_analyze_ref. */
386
387 struct ar_data
388 {
389 struct loop *loop; /* Loop of the reference. */
390 gimple stmt; /* Statement of the reference. */
391 tree *step; /* Step of the memory reference. */
392 HOST_WIDE_INT *delta; /* Offset of the memory reference. */
393 };
394
395 /* Analyzes a single INDEX of a memory reference to obtain information
396 described at analyze_ref. Callback for for_each_index. */
397
398 static bool
399 idx_analyze_ref (tree base, tree *index, void *data)
400 {
401 struct ar_data *ar_data = (struct ar_data *) data;
402 tree ibase, step, stepsize;
403 HOST_WIDE_INT idelta = 0, imult = 1;
404 affine_iv iv;
405
406 if (TREE_CODE (base) == MISALIGNED_INDIRECT_REF
407 || TREE_CODE (base) == ALIGN_INDIRECT_REF)
408 return false;
409
410 if (!simple_iv (ar_data->loop, loop_containing_stmt (ar_data->stmt),
411 *index, &iv, true))
412 return false;
413 ibase = iv.base;
414 step = iv.step;
415
416 if (TREE_CODE (ibase) == POINTER_PLUS_EXPR
417 && cst_and_fits_in_hwi (TREE_OPERAND (ibase, 1)))
418 {
419 idelta = int_cst_value (TREE_OPERAND (ibase, 1));
420 ibase = TREE_OPERAND (ibase, 0);
421 }
422 if (cst_and_fits_in_hwi (ibase))
423 {
424 idelta += int_cst_value (ibase);
425 ibase = build_int_cst (TREE_TYPE (ibase), 0);
426 }
427
428 if (*ar_data->step == NULL_TREE)
429 *ar_data->step = step;
430 else
431 *ar_data->step = fold_build2 (PLUS_EXPR, sizetype,
432 fold_convert (sizetype, *ar_data->step),
433 fold_convert (sizetype, step));
434 if (TREE_CODE (base) == ARRAY_REF)
435 {
436 stepsize = array_ref_element_size (base);
437 if (!cst_and_fits_in_hwi (stepsize))
438 return false;
439 imult = int_cst_value (stepsize);
440
441 *ar_data->step = fold_build2 (MULT_EXPR, sizetype,
442 fold_convert (sizetype, *ar_data->step),
443 fold_convert (sizetype, step));
444 idelta *= imult;
445 }
446
447 *ar_data->delta += idelta;
448 *index = ibase;
449
450 return true;
451 }
452
453 /* Tries to express REF_P in shape &BASE + STEP * iter + DELTA, where DELTA and
454 STEP are integer constants and iter is number of iterations of LOOP. The
455 reference occurs in statement STMT. Strips nonaddressable component
456 references from REF_P. */
457
458 static bool
459 analyze_ref (struct loop *loop, tree *ref_p, tree *base,
460 tree *step, HOST_WIDE_INT *delta,
461 gimple stmt)
462 {
463 struct ar_data ar_data;
464 tree off;
465 HOST_WIDE_INT bit_offset;
466 tree ref = *ref_p;
467
468 *step = NULL_TREE;
469 *delta = 0;
470
471 /* First strip off the component references. Ignore bitfields. */
472 if (TREE_CODE (ref) == COMPONENT_REF
473 && DECL_NONADDRESSABLE_P (TREE_OPERAND (ref, 1)))
474 ref = TREE_OPERAND (ref, 0);
475
476 *ref_p = ref;
477
478 for (; TREE_CODE (ref) == COMPONENT_REF; ref = TREE_OPERAND (ref, 0))
479 {
480 off = DECL_FIELD_BIT_OFFSET (TREE_OPERAND (ref, 1));
481 bit_offset = TREE_INT_CST_LOW (off);
482 gcc_assert (bit_offset % BITS_PER_UNIT == 0);
483
484 *delta += bit_offset / BITS_PER_UNIT;
485 }
486
487 *base = unshare_expr (ref);
488 ar_data.loop = loop;
489 ar_data.stmt = stmt;
490 ar_data.step = step;
491 ar_data.delta = delta;
492 return for_each_index (base, idx_analyze_ref, &ar_data);
493 }
494
495 /* Record a memory reference REF to the list REFS. The reference occurs in
496 LOOP in statement STMT and it is write if WRITE_P. Returns true if the
497 reference was recorded, false otherwise. */
498
499 static bool
500 gather_memory_references_ref (struct loop *loop, struct mem_ref_group **refs,
501 tree ref, bool write_p, gimple stmt)
502 {
503 tree base, step;
504 HOST_WIDE_INT delta;
505 struct mem_ref_group *agrp;
506
507 if (get_base_address (ref) == NULL)
508 return false;
509
510 if (!analyze_ref (loop, &ref, &base, &step, &delta, stmt))
511 return false;
512 /* If analyze_ref fails the default is a NULL_TREE. We can stop here. */
513 if (step == NULL_TREE)
514 return false;
515
516 /* Now we know that REF = &BASE + STEP * iter + DELTA, where DELTA and STEP
517 are integer constants. */
518 agrp = find_or_create_group (refs, base, step);
519 record_ref (agrp, stmt, ref, delta, write_p);
520
521 return true;
522 }
523
524 /* Record the suitable memory references in LOOP. NO_OTHER_REFS is set to
525 true if there are no other memory references inside the loop. */
526
527 static struct mem_ref_group *
528 gather_memory_references (struct loop *loop, bool *no_other_refs, unsigned *ref_count)
529 {
530 basic_block *body = get_loop_body_in_dom_order (loop);
531 basic_block bb;
532 unsigned i;
533 gimple_stmt_iterator bsi;
534 gimple stmt;
535 tree lhs, rhs;
536 struct mem_ref_group *refs = NULL;
537
538 *no_other_refs = true;
539 *ref_count = 0;
540
541 /* Scan the loop body in order, so that the former references precede the
542 later ones. */
543 for (i = 0; i < loop->num_nodes; i++)
544 {
545 bb = body[i];
546 if (bb->loop_father != loop)
547 continue;
548
549 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
550 {
551 stmt = gsi_stmt (bsi);
552
553 if (gimple_code (stmt) != GIMPLE_ASSIGN)
554 {
555 if (gimple_vuse (stmt)
556 || (is_gimple_call (stmt)
557 && !(gimple_call_flags (stmt) & ECF_CONST)))
558 *no_other_refs = false;
559 continue;
560 }
561
562 lhs = gimple_assign_lhs (stmt);
563 rhs = gimple_assign_rhs1 (stmt);
564
565 if (REFERENCE_CLASS_P (rhs))
566 {
567 *no_other_refs &= gather_memory_references_ref (loop, &refs,
568 rhs, false, stmt);
569 *ref_count += 1;
570 }
571 if (REFERENCE_CLASS_P (lhs))
572 {
573 *no_other_refs &= gather_memory_references_ref (loop, &refs,
574 lhs, true, stmt);
575 *ref_count += 1;
576 }
577 }
578 }
579 free (body);
580
581 return refs;
582 }
583
584 /* Prune the prefetch candidate REF using the self-reuse. */
585
586 static void
587 prune_ref_by_self_reuse (struct mem_ref *ref)
588 {
589 HOST_WIDE_INT step;
590 bool backward;
591
592 /* If the step size is non constant, we cannot calculate prefetch_mod. */
593 if (!cst_and_fits_in_hwi (ref->group->step))
594 return;
595
596 step = int_cst_value (ref->group->step);
597
598 backward = step < 0;
599
600 if (step == 0)
601 {
602 /* Prefetch references to invariant address just once. */
603 ref->prefetch_before = 1;
604 return;
605 }
606
607 if (backward)
608 step = -step;
609
610 if (step > PREFETCH_BLOCK)
611 return;
612
613 if ((backward && HAVE_BACKWARD_PREFETCH)
614 || (!backward && HAVE_FORWARD_PREFETCH))
615 {
616 ref->prefetch_before = 1;
617 return;
618 }
619
620 ref->prefetch_mod = PREFETCH_BLOCK / step;
621 }
622
623 /* Divides X by BY, rounding down. */
624
625 static HOST_WIDE_INT
626 ddown (HOST_WIDE_INT x, unsigned HOST_WIDE_INT by)
627 {
628 gcc_assert (by > 0);
629
630 if (x >= 0)
631 return x / by;
632 else
633 return (x + by - 1) / by;
634 }
635
636 /* Given a CACHE_LINE_SIZE and two inductive memory references
637 with a common STEP greater than CACHE_LINE_SIZE and an address
638 difference DELTA, compute the probability that they will fall
639 in different cache lines. DISTINCT_ITERS is the number of
640 distinct iterations after which the pattern repeats itself.
641 ALIGN_UNIT is the unit of alignment in bytes. */
642
643 static int
644 compute_miss_rate (unsigned HOST_WIDE_INT cache_line_size,
645 HOST_WIDE_INT step, HOST_WIDE_INT delta,
646 unsigned HOST_WIDE_INT distinct_iters,
647 int align_unit)
648 {
649 unsigned align, iter;
650 int total_positions, miss_positions, miss_rate;
651 int address1, address2, cache_line1, cache_line2;
652
653 total_positions = 0;
654 miss_positions = 0;
655
656 /* Iterate through all possible alignments of the first
657 memory reference within its cache line. */
658 for (align = 0; align < cache_line_size; align += align_unit)
659
660 /* Iterate through all distinct iterations. */
661 for (iter = 0; iter < distinct_iters; iter++)
662 {
663 address1 = align + step * iter;
664 address2 = address1 + delta;
665 cache_line1 = address1 / cache_line_size;
666 cache_line2 = address2 / cache_line_size;
667 total_positions += 1;
668 if (cache_line1 != cache_line2)
669 miss_positions += 1;
670 }
671 miss_rate = 1000 * miss_positions / total_positions;
672 return miss_rate;
673 }
674
675 /* Prune the prefetch candidate REF using the reuse with BY.
676 If BY_IS_BEFORE is true, BY is before REF in the loop. */
677
678 static void
679 prune_ref_by_group_reuse (struct mem_ref *ref, struct mem_ref *by,
680 bool by_is_before)
681 {
682 HOST_WIDE_INT step;
683 bool backward;
684 HOST_WIDE_INT delta_r = ref->delta, delta_b = by->delta;
685 HOST_WIDE_INT delta = delta_b - delta_r;
686 HOST_WIDE_INT hit_from;
687 unsigned HOST_WIDE_INT prefetch_before, prefetch_block;
688 int miss_rate;
689 HOST_WIDE_INT reduced_step;
690 unsigned HOST_WIDE_INT reduced_prefetch_block;
691 tree ref_type;
692 int align_unit;
693
694 /* If the step is non constant we cannot calculate prefetch_before. */
695 if (!cst_and_fits_in_hwi (ref->group->step)) {
696 return;
697 }
698
699 step = int_cst_value (ref->group->step);
700
701 backward = step < 0;
702
703
704 if (delta == 0)
705 {
706 /* If the references has the same address, only prefetch the
707 former. */
708 if (by_is_before)
709 ref->prefetch_before = 0;
710
711 return;
712 }
713
714 if (!step)
715 {
716 /* If the reference addresses are invariant and fall into the
717 same cache line, prefetch just the first one. */
718 if (!by_is_before)
719 return;
720
721 if (ddown (ref->delta, PREFETCH_BLOCK)
722 != ddown (by->delta, PREFETCH_BLOCK))
723 return;
724
725 ref->prefetch_before = 0;
726 return;
727 }
728
729 /* Only prune the reference that is behind in the array. */
730 if (backward)
731 {
732 if (delta > 0)
733 return;
734
735 /* Transform the data so that we may assume that the accesses
736 are forward. */
737 delta = - delta;
738 step = -step;
739 delta_r = PREFETCH_BLOCK - 1 - delta_r;
740 delta_b = PREFETCH_BLOCK - 1 - delta_b;
741 }
742 else
743 {
744 if (delta < 0)
745 return;
746 }
747
748 /* Check whether the two references are likely to hit the same cache
749 line, and how distant the iterations in that it occurs are from
750 each other. */
751
752 if (step <= PREFETCH_BLOCK)
753 {
754 /* The accesses are sure to meet. Let us check when. */
755 hit_from = ddown (delta_b, PREFETCH_BLOCK) * PREFETCH_BLOCK;
756 prefetch_before = (hit_from - delta_r + step - 1) / step;
757
758 /* Do not reduce prefetch_before if we meet beyond cache size. */
759 if (prefetch_before > (unsigned) abs (L2_CACHE_SIZE_BYTES / step))
760 prefetch_before = PREFETCH_ALL;
761 if (prefetch_before < ref->prefetch_before)
762 ref->prefetch_before = prefetch_before;
763
764 return;
765 }
766
767 /* A more complicated case with step > prefetch_block. First reduce
768 the ratio between the step and the cache line size to its simplest
769 terms. The resulting denominator will then represent the number of
770 distinct iterations after which each address will go back to its
771 initial location within the cache line. This computation assumes
772 that PREFETCH_BLOCK is a power of two. */
773 prefetch_block = PREFETCH_BLOCK;
774 reduced_prefetch_block = prefetch_block;
775 reduced_step = step;
776 while ((reduced_step & 1) == 0
777 && reduced_prefetch_block > 1)
778 {
779 reduced_step >>= 1;
780 reduced_prefetch_block >>= 1;
781 }
782
783 prefetch_before = delta / step;
784 delta %= step;
785 ref_type = TREE_TYPE (ref->mem);
786 align_unit = TYPE_ALIGN (ref_type) / 8;
787 miss_rate = compute_miss_rate(prefetch_block, step, delta,
788 reduced_prefetch_block, align_unit);
789 if (miss_rate <= ACCEPTABLE_MISS_RATE)
790 {
791 /* Do not reduce prefetch_before if we meet beyond cache size. */
792 if (prefetch_before > L2_CACHE_SIZE_BYTES / PREFETCH_BLOCK)
793 prefetch_before = PREFETCH_ALL;
794 if (prefetch_before < ref->prefetch_before)
795 ref->prefetch_before = prefetch_before;
796
797 return;
798 }
799
800 /* Try also the following iteration. */
801 prefetch_before++;
802 delta = step - delta;
803 miss_rate = compute_miss_rate(prefetch_block, step, delta,
804 reduced_prefetch_block, align_unit);
805 if (miss_rate <= ACCEPTABLE_MISS_RATE)
806 {
807 if (prefetch_before < ref->prefetch_before)
808 ref->prefetch_before = prefetch_before;
809
810 return;
811 }
812
813 /* The ref probably does not reuse by. */
814 return;
815 }
816
817 /* Prune the prefetch candidate REF using the reuses with other references
818 in REFS. */
819
820 static void
821 prune_ref_by_reuse (struct mem_ref *ref, struct mem_ref *refs)
822 {
823 struct mem_ref *prune_by;
824 bool before = true;
825
826 prune_ref_by_self_reuse (ref);
827
828 for (prune_by = refs; prune_by; prune_by = prune_by->next)
829 {
830 if (prune_by == ref)
831 {
832 before = false;
833 continue;
834 }
835
836 if (!WRITE_CAN_USE_READ_PREFETCH
837 && ref->write_p
838 && !prune_by->write_p)
839 continue;
840 if (!READ_CAN_USE_WRITE_PREFETCH
841 && !ref->write_p
842 && prune_by->write_p)
843 continue;
844
845 prune_ref_by_group_reuse (ref, prune_by, before);
846 }
847 }
848
849 /* Prune the prefetch candidates in GROUP using the reuse analysis. */
850
851 static void
852 prune_group_by_reuse (struct mem_ref_group *group)
853 {
854 struct mem_ref *ref_pruned;
855
856 for (ref_pruned = group->refs; ref_pruned; ref_pruned = ref_pruned->next)
857 {
858 prune_ref_by_reuse (ref_pruned, group->refs);
859
860 if (dump_file && (dump_flags & TDF_DETAILS))
861 {
862 fprintf (dump_file, "Reference %p:", (void *) ref_pruned);
863
864 if (ref_pruned->prefetch_before == PREFETCH_ALL
865 && ref_pruned->prefetch_mod == 1)
866 fprintf (dump_file, " no restrictions");
867 else if (ref_pruned->prefetch_before == 0)
868 fprintf (dump_file, " do not prefetch");
869 else if (ref_pruned->prefetch_before <= ref_pruned->prefetch_mod)
870 fprintf (dump_file, " prefetch once");
871 else
872 {
873 if (ref_pruned->prefetch_before != PREFETCH_ALL)
874 {
875 fprintf (dump_file, " prefetch before ");
876 fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC,
877 ref_pruned->prefetch_before);
878 }
879 if (ref_pruned->prefetch_mod != 1)
880 {
881 fprintf (dump_file, " prefetch mod ");
882 fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC,
883 ref_pruned->prefetch_mod);
884 }
885 }
886 fprintf (dump_file, "\n");
887 }
888 }
889 }
890
891 /* Prune the list of prefetch candidates GROUPS using the reuse analysis. */
892
893 static void
894 prune_by_reuse (struct mem_ref_group *groups)
895 {
896 for (; groups; groups = groups->next)
897 prune_group_by_reuse (groups);
898 }
899
900 /* Returns true if we should issue prefetch for REF. */
901
902 static bool
903 should_issue_prefetch_p (struct mem_ref *ref)
904 {
905 /* For now do not issue prefetches for only first few of the
906 iterations. */
907 if (ref->prefetch_before != PREFETCH_ALL)
908 {
909 if (dump_file && (dump_flags & TDF_DETAILS))
910 fprintf (dump_file, "Ignoring %p due to prefetch_before\n",
911 (void *) ref);
912 return false;
913 }
914
915 /* Do not prefetch nontemporal stores. */
916 if (ref->storent_p)
917 {
918 if (dump_file && (dump_flags & TDF_DETAILS))
919 fprintf (dump_file, "Ignoring nontemporal store %p\n", (void *) ref);
920 return false;
921 }
922
923 return true;
924 }
925
926 /* Decide which of the prefetch candidates in GROUPS to prefetch.
927 AHEAD is the number of iterations to prefetch ahead (which corresponds
928 to the number of simultaneous instances of one prefetch running at a
929 time). UNROLL_FACTOR is the factor by that the loop is going to be
930 unrolled. Returns true if there is anything to prefetch. */
931
932 static bool
933 schedule_prefetches (struct mem_ref_group *groups, unsigned unroll_factor,
934 unsigned ahead)
935 {
936 unsigned remaining_prefetch_slots, n_prefetches, prefetch_slots;
937 unsigned slots_per_prefetch;
938 struct mem_ref *ref;
939 bool any = false;
940
941 /* At most SIMULTANEOUS_PREFETCHES should be running at the same time. */
942 remaining_prefetch_slots = SIMULTANEOUS_PREFETCHES;
943
944 /* The prefetch will run for AHEAD iterations of the original loop, i.e.,
945 AHEAD / UNROLL_FACTOR iterations of the unrolled loop. In each iteration,
946 it will need a prefetch slot. */
947 slots_per_prefetch = (ahead + unroll_factor / 2) / unroll_factor;
948 if (dump_file && (dump_flags & TDF_DETAILS))
949 fprintf (dump_file, "Each prefetch instruction takes %u prefetch slots.\n",
950 slots_per_prefetch);
951
952 /* For now we just take memory references one by one and issue
953 prefetches for as many as possible. The groups are sorted
954 starting with the largest step, since the references with
955 large step are more likely to cause many cache misses. */
956
957 for (; groups; groups = groups->next)
958 for (ref = groups->refs; ref; ref = ref->next)
959 {
960 if (!should_issue_prefetch_p (ref))
961 continue;
962
963 /* The loop is far from being sufficiently unrolled for this
964 prefetch. Do not generate prefetch to avoid many redudant
965 prefetches. */
966 if (ref->prefetch_mod / unroll_factor > PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO)
967 continue;
968
969 /* If we need to prefetch the reference each PREFETCH_MOD iterations,
970 and we unroll the loop UNROLL_FACTOR times, we need to insert
971 ceil (UNROLL_FACTOR / PREFETCH_MOD) instructions in each
972 iteration. */
973 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
974 / ref->prefetch_mod);
975 prefetch_slots = n_prefetches * slots_per_prefetch;
976
977 /* If more than half of the prefetches would be lost anyway, do not
978 issue the prefetch. */
979 if (2 * remaining_prefetch_slots < prefetch_slots)
980 continue;
981
982 ref->issue_prefetch_p = true;
983
984 if (remaining_prefetch_slots <= prefetch_slots)
985 return true;
986 remaining_prefetch_slots -= prefetch_slots;
987 any = true;
988 }
989
990 return any;
991 }
992
993 /* Estimate the number of prefetches in the given GROUPS. */
994
995 static int
996 estimate_prefetch_count (struct mem_ref_group *groups)
997 {
998 struct mem_ref *ref;
999 int prefetch_count = 0;
1000
1001 for (; groups; groups = groups->next)
1002 for (ref = groups->refs; ref; ref = ref->next)
1003 if (should_issue_prefetch_p (ref))
1004 prefetch_count++;
1005
1006 return prefetch_count;
1007 }
1008
1009 /* Issue prefetches for the reference REF into loop as decided before.
1010 HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR
1011 is the factor by which LOOP was unrolled. */
1012
1013 static void
1014 issue_prefetch_ref (struct mem_ref *ref, unsigned unroll_factor, unsigned ahead)
1015 {
1016 HOST_WIDE_INT delta;
1017 tree addr, addr_base, write_p, local, forward;
1018 gimple prefetch;
1019 gimple_stmt_iterator bsi;
1020 unsigned n_prefetches, ap;
1021 bool nontemporal = ref->reuse_distance >= L2_CACHE_SIZE_BYTES;
1022
1023 if (dump_file && (dump_flags & TDF_DETAILS))
1024 fprintf (dump_file, "Issued%s prefetch for %p.\n",
1025 nontemporal ? " nontemporal" : "",
1026 (void *) ref);
1027
1028 bsi = gsi_for_stmt (ref->stmt);
1029
1030 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
1031 / ref->prefetch_mod);
1032 addr_base = build_fold_addr_expr_with_type (ref->mem, ptr_type_node);
1033 addr_base = force_gimple_operand_gsi (&bsi, unshare_expr (addr_base),
1034 true, NULL, true, GSI_SAME_STMT);
1035 write_p = ref->write_p ? integer_one_node : integer_zero_node;
1036 local = build_int_cst (integer_type_node, nontemporal ? 0 : 3);
1037
1038 for (ap = 0; ap < n_prefetches; ap++)
1039 {
1040 if (cst_and_fits_in_hwi (ref->group->step))
1041 {
1042 /* Determine the address to prefetch. */
1043 delta = (ahead + ap * ref->prefetch_mod) *
1044 int_cst_value (ref->group->step);
1045 addr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node,
1046 addr_base, size_int (delta));
1047 addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true, NULL,
1048 true, GSI_SAME_STMT);
1049 }
1050 else
1051 {
1052 /* The step size is non-constant but loop-invariant. We use the
1053 heuristic to simply prefetch ahead iterations ahead. */
1054 forward = fold_build2 (MULT_EXPR, sizetype,
1055 fold_convert (sizetype, ref->group->step),
1056 fold_convert (sizetype, size_int (ahead)));
1057 addr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, addr_base,
1058 forward);
1059 addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true,
1060 NULL, true, GSI_SAME_STMT);
1061 }
1062 /* Create the prefetch instruction. */
1063 prefetch = gimple_build_call (built_in_decls[BUILT_IN_PREFETCH],
1064 3, addr, write_p, local);
1065 gsi_insert_before (&bsi, prefetch, GSI_SAME_STMT);
1066 }
1067 }
1068
1069 /* Issue prefetches for the references in GROUPS into loop as decided before.
1070 HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR is the
1071 factor by that LOOP was unrolled. */
1072
1073 static void
1074 issue_prefetches (struct mem_ref_group *groups,
1075 unsigned unroll_factor, unsigned ahead)
1076 {
1077 struct mem_ref *ref;
1078
1079 for (; groups; groups = groups->next)
1080 for (ref = groups->refs; ref; ref = ref->next)
1081 if (ref->issue_prefetch_p)
1082 issue_prefetch_ref (ref, unroll_factor, ahead);
1083 }
1084
1085 /* Returns true if REF is a memory write for that a nontemporal store insn
1086 can be used. */
1087
1088 static bool
1089 nontemporal_store_p (struct mem_ref *ref)
1090 {
1091 enum machine_mode mode;
1092 enum insn_code code;
1093
1094 /* REF must be a write that is not reused. We require it to be independent
1095 on all other memory references in the loop, as the nontemporal stores may
1096 be reordered with respect to other memory references. */
1097 if (!ref->write_p
1098 || !ref->independent_p
1099 || ref->reuse_distance < L2_CACHE_SIZE_BYTES)
1100 return false;
1101
1102 /* Check that we have the storent instruction for the mode. */
1103 mode = TYPE_MODE (TREE_TYPE (ref->mem));
1104 if (mode == BLKmode)
1105 return false;
1106
1107 code = optab_handler (storent_optab, mode)->insn_code;
1108 return code != CODE_FOR_nothing;
1109 }
1110
1111 /* If REF is a nontemporal store, we mark the corresponding modify statement
1112 and return true. Otherwise, we return false. */
1113
1114 static bool
1115 mark_nontemporal_store (struct mem_ref *ref)
1116 {
1117 if (!nontemporal_store_p (ref))
1118 return false;
1119
1120 if (dump_file && (dump_flags & TDF_DETAILS))
1121 fprintf (dump_file, "Marked reference %p as a nontemporal store.\n",
1122 (void *) ref);
1123
1124 gimple_assign_set_nontemporal_move (ref->stmt, true);
1125 ref->storent_p = true;
1126
1127 return true;
1128 }
1129
1130 /* Issue a memory fence instruction after LOOP. */
1131
1132 static void
1133 emit_mfence_after_loop (struct loop *loop)
1134 {
1135 VEC (edge, heap) *exits = get_loop_exit_edges (loop);
1136 edge exit;
1137 gimple call;
1138 gimple_stmt_iterator bsi;
1139 unsigned i;
1140
1141 for (i = 0; VEC_iterate (edge, exits, i, exit); i++)
1142 {
1143 call = gimple_build_call (FENCE_FOLLOWING_MOVNT, 0);
1144
1145 if (!single_pred_p (exit->dest)
1146 /* If possible, we prefer not to insert the fence on other paths
1147 in cfg. */
1148 && !(exit->flags & EDGE_ABNORMAL))
1149 split_loop_exit_edge (exit);
1150 bsi = gsi_after_labels (exit->dest);
1151
1152 gsi_insert_before (&bsi, call, GSI_NEW_STMT);
1153 mark_virtual_ops_for_renaming (call);
1154 }
1155
1156 VEC_free (edge, heap, exits);
1157 update_ssa (TODO_update_ssa_only_virtuals);
1158 }
1159
1160 /* Returns true if we can use storent in loop, false otherwise. */
1161
1162 static bool
1163 may_use_storent_in_loop_p (struct loop *loop)
1164 {
1165 bool ret = true;
1166
1167 if (loop->inner != NULL)
1168 return false;
1169
1170 /* If we must issue a mfence insn after using storent, check that there
1171 is a suitable place for it at each of the loop exits. */
1172 if (FENCE_FOLLOWING_MOVNT != NULL_TREE)
1173 {
1174 VEC (edge, heap) *exits = get_loop_exit_edges (loop);
1175 unsigned i;
1176 edge exit;
1177
1178 for (i = 0; VEC_iterate (edge, exits, i, exit); i++)
1179 if ((exit->flags & EDGE_ABNORMAL)
1180 && exit->dest == EXIT_BLOCK_PTR)
1181 ret = false;
1182
1183 VEC_free (edge, heap, exits);
1184 }
1185
1186 return ret;
1187 }
1188
1189 /* Marks nontemporal stores in LOOP. GROUPS contains the description of memory
1190 references in the loop. */
1191
1192 static void
1193 mark_nontemporal_stores (struct loop *loop, struct mem_ref_group *groups)
1194 {
1195 struct mem_ref *ref;
1196 bool any = false;
1197
1198 if (!may_use_storent_in_loop_p (loop))
1199 return;
1200
1201 for (; groups; groups = groups->next)
1202 for (ref = groups->refs; ref; ref = ref->next)
1203 any |= mark_nontemporal_store (ref);
1204
1205 if (any && FENCE_FOLLOWING_MOVNT != NULL_TREE)
1206 emit_mfence_after_loop (loop);
1207 }
1208
1209 /* Determines whether we can profitably unroll LOOP FACTOR times, and if
1210 this is the case, fill in DESC by the description of number of
1211 iterations. */
1212
1213 static bool
1214 should_unroll_loop_p (struct loop *loop, struct tree_niter_desc *desc,
1215 unsigned factor)
1216 {
1217 if (!can_unroll_loop_p (loop, factor, desc))
1218 return false;
1219
1220 /* We only consider loops without control flow for unrolling. This is not
1221 a hard restriction -- tree_unroll_loop works with arbitrary loops
1222 as well; but the unrolling/prefetching is usually more profitable for
1223 loops consisting of a single basic block, and we want to limit the
1224 code growth. */
1225 if (loop->num_nodes > 2)
1226 return false;
1227
1228 return true;
1229 }
1230
1231 /* Determine the coefficient by that unroll LOOP, from the information
1232 contained in the list of memory references REFS. Description of
1233 umber of iterations of LOOP is stored to DESC. NINSNS is the number of
1234 insns of the LOOP. EST_NITER is the estimated number of iterations of
1235 the loop, or -1 if no estimate is available. */
1236
1237 static unsigned
1238 determine_unroll_factor (struct loop *loop, struct mem_ref_group *refs,
1239 unsigned ninsns, struct tree_niter_desc *desc,
1240 HOST_WIDE_INT est_niter)
1241 {
1242 unsigned upper_bound;
1243 unsigned nfactor, factor, mod_constraint;
1244 struct mem_ref_group *agp;
1245 struct mem_ref *ref;
1246
1247 /* First check whether the loop is not too large to unroll. We ignore
1248 PARAM_MAX_UNROLL_TIMES, because for small loops, it prevented us
1249 from unrolling them enough to make exactly one cache line covered by each
1250 iteration. Also, the goal of PARAM_MAX_UNROLL_TIMES is to prevent
1251 us from unrolling the loops too many times in cases where we only expect
1252 gains from better scheduling and decreasing loop overhead, which is not
1253 the case here. */
1254 upper_bound = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / ninsns;
1255
1256 /* If we unrolled the loop more times than it iterates, the unrolled version
1257 of the loop would be never entered. */
1258 if (est_niter >= 0 && est_niter < (HOST_WIDE_INT) upper_bound)
1259 upper_bound = est_niter;
1260
1261 if (upper_bound <= 1)
1262 return 1;
1263
1264 /* Choose the factor so that we may prefetch each cache just once,
1265 but bound the unrolling by UPPER_BOUND. */
1266 factor = 1;
1267 for (agp = refs; agp; agp = agp->next)
1268 for (ref = agp->refs; ref; ref = ref->next)
1269 if (should_issue_prefetch_p (ref))
1270 {
1271 mod_constraint = ref->prefetch_mod;
1272 nfactor = least_common_multiple (mod_constraint, factor);
1273 if (nfactor <= upper_bound)
1274 factor = nfactor;
1275 }
1276
1277 if (!should_unroll_loop_p (loop, desc, factor))
1278 return 1;
1279
1280 return factor;
1281 }
1282
1283 /* Returns the total volume of the memory references REFS, taking into account
1284 reuses in the innermost loop and cache line size. TODO -- we should also
1285 take into account reuses across the iterations of the loops in the loop
1286 nest. */
1287
1288 static unsigned
1289 volume_of_references (struct mem_ref_group *refs)
1290 {
1291 unsigned volume = 0;
1292 struct mem_ref_group *gr;
1293 struct mem_ref *ref;
1294
1295 for (gr = refs; gr; gr = gr->next)
1296 for (ref = gr->refs; ref; ref = ref->next)
1297 {
1298 /* Almost always reuses another value? */
1299 if (ref->prefetch_before != PREFETCH_ALL)
1300 continue;
1301
1302 /* If several iterations access the same cache line, use the size of
1303 the line divided by this number. Otherwise, a cache line is
1304 accessed in each iteration. TODO -- in the latter case, we should
1305 take the size of the reference into account, rounding it up on cache
1306 line size multiple. */
1307 volume += L1_CACHE_LINE_SIZE / ref->prefetch_mod;
1308 }
1309 return volume;
1310 }
1311
1312 /* Returns the volume of memory references accessed across VEC iterations of
1313 loops, whose sizes are described in the LOOP_SIZES array. N is the number
1314 of the loops in the nest (length of VEC and LOOP_SIZES vectors). */
1315
1316 static unsigned
1317 volume_of_dist_vector (lambda_vector vec, unsigned *loop_sizes, unsigned n)
1318 {
1319 unsigned i;
1320
1321 for (i = 0; i < n; i++)
1322 if (vec[i] != 0)
1323 break;
1324
1325 if (i == n)
1326 return 0;
1327
1328 gcc_assert (vec[i] > 0);
1329
1330 /* We ignore the parts of the distance vector in subloops, since usually
1331 the numbers of iterations are much smaller. */
1332 return loop_sizes[i] * vec[i];
1333 }
1334
1335 /* Add the steps of ACCESS_FN multiplied by STRIDE to the array STRIDE
1336 at the position corresponding to the loop of the step. N is the depth
1337 of the considered loop nest, and, LOOP is its innermost loop. */
1338
1339 static void
1340 add_subscript_strides (tree access_fn, unsigned stride,
1341 HOST_WIDE_INT *strides, unsigned n, struct loop *loop)
1342 {
1343 struct loop *aloop;
1344 tree step;
1345 HOST_WIDE_INT astep;
1346 unsigned min_depth = loop_depth (loop) - n;
1347
1348 while (TREE_CODE (access_fn) == POLYNOMIAL_CHREC)
1349 {
1350 aloop = get_chrec_loop (access_fn);
1351 step = CHREC_RIGHT (access_fn);
1352 access_fn = CHREC_LEFT (access_fn);
1353
1354 if ((unsigned) loop_depth (aloop) <= min_depth)
1355 continue;
1356
1357 if (host_integerp (step, 0))
1358 astep = tree_low_cst (step, 0);
1359 else
1360 astep = L1_CACHE_LINE_SIZE;
1361
1362 strides[n - 1 - loop_depth (loop) + loop_depth (aloop)] += astep * stride;
1363
1364 }
1365 }
1366
1367 /* Returns the volume of memory references accessed between two consecutive
1368 self-reuses of the reference DR. We consider the subscripts of DR in N
1369 loops, and LOOP_SIZES contains the volumes of accesses in each of the
1370 loops. LOOP is the innermost loop of the current loop nest. */
1371
1372 static unsigned
1373 self_reuse_distance (data_reference_p dr, unsigned *loop_sizes, unsigned n,
1374 struct loop *loop)
1375 {
1376 tree stride, access_fn;
1377 HOST_WIDE_INT *strides, astride;
1378 VEC (tree, heap) *access_fns;
1379 tree ref = DR_REF (dr);
1380 unsigned i, ret = ~0u;
1381
1382 /* In the following example:
1383
1384 for (i = 0; i < N; i++)
1385 for (j = 0; j < N; j++)
1386 use (a[j][i]);
1387 the same cache line is accessed each N steps (except if the change from
1388 i to i + 1 crosses the boundary of the cache line). Thus, for self-reuse,
1389 we cannot rely purely on the results of the data dependence analysis.
1390
1391 Instead, we compute the stride of the reference in each loop, and consider
1392 the innermost loop in that the stride is less than cache size. */
1393
1394 strides = XCNEWVEC (HOST_WIDE_INT, n);
1395 access_fns = DR_ACCESS_FNS (dr);
1396
1397 for (i = 0; VEC_iterate (tree, access_fns, i, access_fn); i++)
1398 {
1399 /* Keep track of the reference corresponding to the subscript, so that we
1400 know its stride. */
1401 while (handled_component_p (ref) && TREE_CODE (ref) != ARRAY_REF)
1402 ref = TREE_OPERAND (ref, 0);
1403
1404 if (TREE_CODE (ref) == ARRAY_REF)
1405 {
1406 stride = TYPE_SIZE_UNIT (TREE_TYPE (ref));
1407 if (host_integerp (stride, 1))
1408 astride = tree_low_cst (stride, 1);
1409 else
1410 astride = L1_CACHE_LINE_SIZE;
1411
1412 ref = TREE_OPERAND (ref, 0);
1413 }
1414 else
1415 astride = 1;
1416
1417 add_subscript_strides (access_fn, astride, strides, n, loop);
1418 }
1419
1420 for (i = n; i-- > 0; )
1421 {
1422 unsigned HOST_WIDE_INT s;
1423
1424 s = strides[i] < 0 ? -strides[i] : strides[i];
1425
1426 if (s < (unsigned) L1_CACHE_LINE_SIZE
1427 && (loop_sizes[i]
1428 > (unsigned) (L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION)))
1429 {
1430 ret = loop_sizes[i];
1431 break;
1432 }
1433 }
1434
1435 free (strides);
1436 return ret;
1437 }
1438
1439 /* Determines the distance till the first reuse of each reference in REFS
1440 in the loop nest of LOOP. NO_OTHER_REFS is true if there are no other
1441 memory references in the loop. */
1442
1443 static void
1444 determine_loop_nest_reuse (struct loop *loop, struct mem_ref_group *refs,
1445 bool no_other_refs)
1446 {
1447 struct loop *nest, *aloop;
1448 VEC (data_reference_p, heap) *datarefs = NULL;
1449 VEC (ddr_p, heap) *dependences = NULL;
1450 struct mem_ref_group *gr;
1451 struct mem_ref *ref, *refb;
1452 VEC (loop_p, heap) *vloops = NULL;
1453 unsigned *loop_data_size;
1454 unsigned i, j, n;
1455 unsigned volume, dist, adist;
1456 HOST_WIDE_INT vol;
1457 data_reference_p dr;
1458 ddr_p dep;
1459
1460 if (loop->inner)
1461 return;
1462
1463 /* Find the outermost loop of the loop nest of loop (we require that
1464 there are no sibling loops inside the nest). */
1465 nest = loop;
1466 while (1)
1467 {
1468 aloop = loop_outer (nest);
1469
1470 if (aloop == current_loops->tree_root
1471 || aloop->inner->next)
1472 break;
1473
1474 nest = aloop;
1475 }
1476
1477 /* For each loop, determine the amount of data accessed in each iteration.
1478 We use this to estimate whether the reference is evicted from the
1479 cache before its reuse. */
1480 find_loop_nest (nest, &vloops);
1481 n = VEC_length (loop_p, vloops);
1482 loop_data_size = XNEWVEC (unsigned, n);
1483 volume = volume_of_references (refs);
1484 i = n;
1485 while (i-- != 0)
1486 {
1487 loop_data_size[i] = volume;
1488 /* Bound the volume by the L2 cache size, since above this bound,
1489 all dependence distances are equivalent. */
1490 if (volume > L2_CACHE_SIZE_BYTES)
1491 continue;
1492
1493 aloop = VEC_index (loop_p, vloops, i);
1494 vol = estimated_loop_iterations_int (aloop, false);
1495 if (vol < 0)
1496 vol = expected_loop_iterations (aloop);
1497 volume *= vol;
1498 }
1499
1500 /* Prepare the references in the form suitable for data dependence
1501 analysis. We ignore unanalyzable data references (the results
1502 are used just as a heuristics to estimate temporality of the
1503 references, hence we do not need to worry about correctness). */
1504 for (gr = refs; gr; gr = gr->next)
1505 for (ref = gr->refs; ref; ref = ref->next)
1506 {
1507 dr = create_data_ref (nest, ref->mem, ref->stmt, !ref->write_p);
1508
1509 if (dr)
1510 {
1511 ref->reuse_distance = volume;
1512 dr->aux = ref;
1513 VEC_safe_push (data_reference_p, heap, datarefs, dr);
1514 }
1515 else
1516 no_other_refs = false;
1517 }
1518
1519 for (i = 0; VEC_iterate (data_reference_p, datarefs, i, dr); i++)
1520 {
1521 dist = self_reuse_distance (dr, loop_data_size, n, loop);
1522 ref = (struct mem_ref *) dr->aux;
1523 if (ref->reuse_distance > dist)
1524 ref->reuse_distance = dist;
1525
1526 if (no_other_refs)
1527 ref->independent_p = true;
1528 }
1529
1530 compute_all_dependences (datarefs, &dependences, vloops, true);
1531
1532 for (i = 0; VEC_iterate (ddr_p, dependences, i, dep); i++)
1533 {
1534 if (DDR_ARE_DEPENDENT (dep) == chrec_known)
1535 continue;
1536
1537 ref = (struct mem_ref *) DDR_A (dep)->aux;
1538 refb = (struct mem_ref *) DDR_B (dep)->aux;
1539
1540 if (DDR_ARE_DEPENDENT (dep) == chrec_dont_know
1541 || DDR_NUM_DIST_VECTS (dep) == 0)
1542 {
1543 /* If the dependence cannot be analyzed, assume that there might be
1544 a reuse. */
1545 dist = 0;
1546
1547 ref->independent_p = false;
1548 refb->independent_p = false;
1549 }
1550 else
1551 {
1552 /* The distance vectors are normalized to be always lexicographically
1553 positive, hence we cannot tell just from them whether DDR_A comes
1554 before DDR_B or vice versa. However, it is not important,
1555 anyway -- if DDR_A is close to DDR_B, then it is either reused in
1556 DDR_B (and it is not nontemporal), or it reuses the value of DDR_B
1557 in cache (and marking it as nontemporal would not affect
1558 anything). */
1559
1560 dist = volume;
1561 for (j = 0; j < DDR_NUM_DIST_VECTS (dep); j++)
1562 {
1563 adist = volume_of_dist_vector (DDR_DIST_VECT (dep, j),
1564 loop_data_size, n);
1565
1566 /* If this is a dependence in the innermost loop (i.e., the
1567 distances in all superloops are zero) and it is not
1568 the trivial self-dependence with distance zero, record that
1569 the references are not completely independent. */
1570 if (lambda_vector_zerop (DDR_DIST_VECT (dep, j), n - 1)
1571 && (ref != refb
1572 || DDR_DIST_VECT (dep, j)[n-1] != 0))
1573 {
1574 ref->independent_p = false;
1575 refb->independent_p = false;
1576 }
1577
1578 /* Ignore accesses closer than
1579 L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
1580 so that we use nontemporal prefetches e.g. if single memory
1581 location is accessed several times in a single iteration of
1582 the loop. */
1583 if (adist < L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION)
1584 continue;
1585
1586 if (adist < dist)
1587 dist = adist;
1588 }
1589 }
1590
1591 if (ref->reuse_distance > dist)
1592 ref->reuse_distance = dist;
1593 if (refb->reuse_distance > dist)
1594 refb->reuse_distance = dist;
1595 }
1596
1597 free_dependence_relations (dependences);
1598 free_data_refs (datarefs);
1599 free (loop_data_size);
1600
1601 if (dump_file && (dump_flags & TDF_DETAILS))
1602 {
1603 fprintf (dump_file, "Reuse distances:\n");
1604 for (gr = refs; gr; gr = gr->next)
1605 for (ref = gr->refs; ref; ref = ref->next)
1606 fprintf (dump_file, " ref %p distance %u\n",
1607 (void *) ref, ref->reuse_distance);
1608 }
1609 }
1610
1611 /* Do a cost-benefit analysis to determine if prefetching is profitable
1612 for the current loop given the following parameters:
1613 AHEAD: the iteration ahead distance,
1614 EST_NITER: the estimated trip count,
1615 NINSNS: estimated number of instructions in the loop,
1616 PREFETCH_COUNT: an estimate of the number of prefetches
1617 MEM_REF_COUNT: total number of memory references in the loop. */
1618
1619 static bool
1620 is_loop_prefetching_profitable (unsigned ahead, HOST_WIDE_INT est_niter,
1621 unsigned ninsns, unsigned prefetch_count,
1622 unsigned mem_ref_count, unsigned unroll_factor)
1623 {
1624 int insn_to_mem_ratio, insn_to_prefetch_ratio;
1625
1626 if (mem_ref_count == 0)
1627 return false;
1628
1629 /* Prefetching improves performance by overlapping cache missing
1630 memory accesses with CPU operations. If the loop does not have
1631 enough CPU operations to overlap with memory operations, prefetching
1632 won't give a significant benefit. One approximate way of checking
1633 this is to require the ratio of instructions to memory references to
1634 be above a certain limit. This approximation works well in practice.
1635 TODO: Implement a more precise computation by estimating the time
1636 for each CPU or memory op in the loop. Time estimates for memory ops
1637 should account for cache misses. */
1638 insn_to_mem_ratio = ninsns / mem_ref_count;
1639
1640 if (insn_to_mem_ratio < PREFETCH_MIN_INSN_TO_MEM_RATIO)
1641 {
1642 if (dump_file && (dump_flags & TDF_DETAILS))
1643 fprintf (dump_file,
1644 "Not prefetching -- instruction to memory reference ratio (%d) too small\n",
1645 insn_to_mem_ratio);
1646 return false;
1647 }
1648
1649 /* Prefetching most likely causes performance degradation when the instruction
1650 to prefetch ratio is too small. Too many prefetch instructions in a loop
1651 may reduce the I-cache performance.
1652 (unroll_factor * ninsns) is used to estimate the number of instructions in
1653 the unrolled loop. This implementation is a bit simplistic -- the number
1654 of issued prefetch instructions is also affected by unrolling. So,
1655 prefetch_mod and the unroll factor should be taken into account when
1656 determining prefetch_count. Also, the number of insns of the unrolled
1657 loop will usually be significantly smaller than the number of insns of the
1658 original loop * unroll_factor (at least the induction variable increases
1659 and the exit branches will get eliminated), so it might be better to use
1660 tree_estimate_loop_size + estimated_unrolled_size. */
1661 insn_to_prefetch_ratio = (unroll_factor * ninsns) / prefetch_count;
1662 if (insn_to_prefetch_ratio < MIN_INSN_TO_PREFETCH_RATIO)
1663 {
1664 if (dump_file && (dump_flags & TDF_DETAILS))
1665 fprintf (dump_file,
1666 "Not prefetching -- instruction to prefetch ratio (%d) too small\n",
1667 insn_to_prefetch_ratio);
1668 return false;
1669 }
1670
1671 /* Could not do further estimation if the trip count is unknown. Just assume
1672 prefetching is profitable. Too aggressive??? */
1673 if (est_niter < 0)
1674 return true;
1675
1676 if (est_niter < (HOST_WIDE_INT) (TRIP_COUNT_TO_AHEAD_RATIO * ahead))
1677 {
1678 if (dump_file && (dump_flags & TDF_DETAILS))
1679 fprintf (dump_file,
1680 "Not prefetching -- loop estimated to roll only %d times\n",
1681 (int) est_niter);
1682 return false;
1683 }
1684 return true;
1685 }
1686
1687
1688 /* Issue prefetch instructions for array references in LOOP. Returns
1689 true if the LOOP was unrolled. */
1690
1691 static bool
1692 loop_prefetch_arrays (struct loop *loop)
1693 {
1694 struct mem_ref_group *refs;
1695 unsigned ahead, ninsns, time, unroll_factor;
1696 HOST_WIDE_INT est_niter;
1697 struct tree_niter_desc desc;
1698 bool unrolled = false, no_other_refs;
1699 unsigned prefetch_count;
1700 unsigned mem_ref_count;
1701
1702 if (optimize_loop_nest_for_size_p (loop))
1703 {
1704 if (dump_file && (dump_flags & TDF_DETAILS))
1705 fprintf (dump_file, " ignored (cold area)\n");
1706 return false;
1707 }
1708
1709 /* Step 1: gather the memory references. */
1710 refs = gather_memory_references (loop, &no_other_refs, &mem_ref_count);
1711
1712 /* Step 2: estimate the reuse effects. */
1713 prune_by_reuse (refs);
1714
1715 prefetch_count = estimate_prefetch_count (refs);
1716 if (prefetch_count == 0)
1717 goto fail;
1718
1719 determine_loop_nest_reuse (loop, refs, no_other_refs);
1720
1721 /* Step 3: determine the ahead and unroll factor. */
1722
1723 /* FIXME: the time should be weighted by the probabilities of the blocks in
1724 the loop body. */
1725 time = tree_num_loop_insns (loop, &eni_time_weights);
1726 ahead = (PREFETCH_LATENCY + time - 1) / time;
1727 est_niter = estimated_loop_iterations_int (loop, false);
1728
1729 ninsns = tree_num_loop_insns (loop, &eni_size_weights);
1730 unroll_factor = determine_unroll_factor (loop, refs, ninsns, &desc,
1731 est_niter);
1732 if (dump_file && (dump_flags & TDF_DETAILS))
1733 fprintf (dump_file, "Ahead %d, unroll factor %d, trip count "
1734 HOST_WIDE_INT_PRINT_DEC "\n"
1735 "insn count %d, mem ref count %d, prefetch count %d\n",
1736 ahead, unroll_factor, est_niter,
1737 ninsns, mem_ref_count, prefetch_count);
1738
1739 if (!is_loop_prefetching_profitable (ahead, est_niter, ninsns, prefetch_count,
1740 mem_ref_count, unroll_factor))
1741 goto fail;
1742
1743 mark_nontemporal_stores (loop, refs);
1744
1745 /* Step 4: what to prefetch? */
1746 if (!schedule_prefetches (refs, unroll_factor, ahead))
1747 goto fail;
1748
1749 /* Step 5: unroll the loop. TODO -- peeling of first and last few
1750 iterations so that we do not issue superfluous prefetches. */
1751 if (unroll_factor != 1)
1752 {
1753 tree_unroll_loop (loop, unroll_factor,
1754 single_dom_exit (loop), &desc);
1755 unrolled = true;
1756 }
1757
1758 /* Step 6: issue the prefetches. */
1759 issue_prefetches (refs, unroll_factor, ahead);
1760
1761 fail:
1762 release_mem_refs (refs);
1763 return unrolled;
1764 }
1765
1766 /* Issue prefetch instructions for array references in loops. */
1767
1768 unsigned int
1769 tree_ssa_prefetch_arrays (void)
1770 {
1771 loop_iterator li;
1772 struct loop *loop;
1773 bool unrolled = false;
1774 int todo_flags = 0;
1775
1776 if (!HAVE_prefetch
1777 /* It is possible to ask compiler for say -mtune=i486 -march=pentium4.
1778 -mtune=i486 causes us having PREFETCH_BLOCK 0, since this is part
1779 of processor costs and i486 does not have prefetch, but
1780 -march=pentium4 causes HAVE_prefetch to be true. Ugh. */
1781 || PREFETCH_BLOCK == 0)
1782 return 0;
1783
1784 if (dump_file && (dump_flags & TDF_DETAILS))
1785 {
1786 fprintf (dump_file, "Prefetching parameters:\n");
1787 fprintf (dump_file, " simultaneous prefetches: %d\n",
1788 SIMULTANEOUS_PREFETCHES);
1789 fprintf (dump_file, " prefetch latency: %d\n", PREFETCH_LATENCY);
1790 fprintf (dump_file, " prefetch block size: %d\n", PREFETCH_BLOCK);
1791 fprintf (dump_file, " L1 cache size: %d lines, %d kB\n",
1792 L1_CACHE_SIZE_BYTES / L1_CACHE_LINE_SIZE, L1_CACHE_SIZE);
1793 fprintf (dump_file, " L1 cache line size: %d\n", L1_CACHE_LINE_SIZE);
1794 fprintf (dump_file, " L2 cache size: %d kB\n", L2_CACHE_SIZE);
1795 fprintf (dump_file, " min insn-to-prefetch ratio: %d \n",
1796 MIN_INSN_TO_PREFETCH_RATIO);
1797 fprintf (dump_file, " min insn-to-mem ratio: %d \n",
1798 PREFETCH_MIN_INSN_TO_MEM_RATIO);
1799 fprintf (dump_file, "\n");
1800 }
1801
1802 initialize_original_copy_tables ();
1803
1804 if (!built_in_decls[BUILT_IN_PREFETCH])
1805 {
1806 tree type = build_function_type (void_type_node,
1807 tree_cons (NULL_TREE,
1808 const_ptr_type_node,
1809 NULL_TREE));
1810 tree decl = add_builtin_function ("__builtin_prefetch", type,
1811 BUILT_IN_PREFETCH, BUILT_IN_NORMAL,
1812 NULL, NULL_TREE);
1813 DECL_IS_NOVOPS (decl) = true;
1814 built_in_decls[BUILT_IN_PREFETCH] = decl;
1815 }
1816
1817 /* We assume that size of cache line is a power of two, so verify this
1818 here. */
1819 gcc_assert ((PREFETCH_BLOCK & (PREFETCH_BLOCK - 1)) == 0);
1820
1821 FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
1822 {
1823 if (dump_file && (dump_flags & TDF_DETAILS))
1824 fprintf (dump_file, "Processing loop %d:\n", loop->num);
1825
1826 unrolled |= loop_prefetch_arrays (loop);
1827
1828 if (dump_file && (dump_flags & TDF_DETAILS))
1829 fprintf (dump_file, "\n\n");
1830 }
1831
1832 if (unrolled)
1833 {
1834 scev_reset ();
1835 todo_flags |= TODO_cleanup_cfg;
1836 }
1837
1838 free_original_copy_tables ();
1839 return todo_flags;
1840 }