tree-ssa-loop-prefetch.c (prune_ref_by_group_reuse): Reset prefetch_before to PREFETC...
[gcc.git] / gcc / tree-ssa-loop-prefetch.c
1 /* Array prefetching.
2 Copyright (C) 2005, 2007, 2008 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "tree.h"
25 #include "rtl.h"
26 #include "tm_p.h"
27 #include "hard-reg-set.h"
28 #include "basic-block.h"
29 #include "output.h"
30 #include "diagnostic.h"
31 #include "tree-flow.h"
32 #include "tree-dump.h"
33 #include "timevar.h"
34 #include "cfgloop.h"
35 #include "expr.h"
36 #include "tree-pass.h"
37 #include "ggc.h"
38 #include "insn-config.h"
39 #include "recog.h"
40 #include "hashtab.h"
41 #include "tree-chrec.h"
42 #include "tree-scalar-evolution.h"
43 #include "toplev.h"
44 #include "params.h"
45 #include "langhooks.h"
46 #include "tree-inline.h"
47 #include "tree-data-ref.h"
48 #include "optabs.h"
49
50 /* This pass inserts prefetch instructions to optimize cache usage during
51 accesses to arrays in loops. It processes loops sequentially and:
52
53 1) Gathers all memory references in the single loop.
54 2) For each of the references it decides when it is profitable to prefetch
55 it. To do it, we evaluate the reuse among the accesses, and determines
56 two values: PREFETCH_BEFORE (meaning that it only makes sense to do
57 prefetching in the first PREFETCH_BEFORE iterations of the loop) and
58 PREFETCH_MOD (meaning that it only makes sense to prefetch in the
59 iterations of the loop that are zero modulo PREFETCH_MOD). For example
60 (assuming cache line size is 64 bytes, char has size 1 byte and there
61 is no hardware sequential prefetch):
62
63 char *a;
64 for (i = 0; i < max; i++)
65 {
66 a[255] = ...; (0)
67 a[i] = ...; (1)
68 a[i + 64] = ...; (2)
69 a[16*i] = ...; (3)
70 a[187*i] = ...; (4)
71 a[187*i + 50] = ...; (5)
72 }
73
74 (0) obviously has PREFETCH_BEFORE 1
75 (1) has PREFETCH_BEFORE 64, since (2) accesses the same memory
76 location 64 iterations before it, and PREFETCH_MOD 64 (since
77 it hits the same cache line otherwise).
78 (2) has PREFETCH_MOD 64
79 (3) has PREFETCH_MOD 4
80 (4) has PREFETCH_MOD 1. We do not set PREFETCH_BEFORE here, since
81 the cache line accessed by (4) is the same with probability only
82 7/32.
83 (5) has PREFETCH_MOD 1 as well.
84
85 Additionally, we use data dependence analysis to determine for each
86 reference the distance till the first reuse; this information is used
87 to determine the temporality of the issued prefetch instruction.
88
89 3) We determine how much ahead we need to prefetch. The number of
90 iterations needed is time to fetch / time spent in one iteration of
91 the loop. The problem is that we do not know either of these values,
92 so we just make a heuristic guess based on a magic (possibly)
93 target-specific constant and size of the loop.
94
95 4) Determine which of the references we prefetch. We take into account
96 that there is a maximum number of simultaneous prefetches (provided
97 by machine description). We prefetch as many prefetches as possible
98 while still within this bound (starting with those with lowest
99 prefetch_mod, since they are responsible for most of the cache
100 misses).
101
102 5) We unroll and peel loops so that we are able to satisfy PREFETCH_MOD
103 and PREFETCH_BEFORE requirements (within some bounds), and to avoid
104 prefetching nonaccessed memory.
105 TODO -- actually implement peeling.
106
107 6) We actually emit the prefetch instructions. ??? Perhaps emit the
108 prefetch instructions with guards in cases where 5) was not sufficient
109 to satisfy the constraints?
110
111 The function is_loop_prefetching_profitable() implements a cost model
112 to determine if prefetching is profitable for a given loop. The cost
113 model has two heuristcs:
114 1. A heuristic that determines whether the given loop has enough CPU
115 ops that can be overlapped with cache missing memory ops.
116 If not, the loop won't benefit from prefetching. This is implemented
117 by requirung the ratio between the instruction count and the mem ref
118 count to be above a certain minimum.
119 2. A heuristic that disables prefetching in a loop with an unknown trip
120 count if the prefetching cost is above a certain limit. The relative
121 prefetching cost is estimated by taking the ratio between the
122 prefetch count and the total intruction count (this models the I-cache
123 cost).
124 The limits used in these heuristics are defined as parameters with
125 reasonable default values. Machine-specific default values will be
126 added later.
127
128 Some other TODO:
129 -- write and use more general reuse analysis (that could be also used
130 in other cache aimed loop optimizations)
131 -- make it behave sanely together with the prefetches given by user
132 (now we just ignore them; at the very least we should avoid
133 optimizing loops in that user put his own prefetches)
134 -- we assume cache line size alignment of arrays; this could be
135 improved. */
136
137 /* Magic constants follow. These should be replaced by machine specific
138 numbers. */
139
140 /* True if write can be prefetched by a read prefetch. */
141
142 #ifndef WRITE_CAN_USE_READ_PREFETCH
143 #define WRITE_CAN_USE_READ_PREFETCH 1
144 #endif
145
146 /* True if read can be prefetched by a write prefetch. */
147
148 #ifndef READ_CAN_USE_WRITE_PREFETCH
149 #define READ_CAN_USE_WRITE_PREFETCH 0
150 #endif
151
152 /* The size of the block loaded by a single prefetch. Usually, this is
153 the same as cache line size (at the moment, we only consider one level
154 of cache hierarchy). */
155
156 #ifndef PREFETCH_BLOCK
157 #define PREFETCH_BLOCK L1_CACHE_LINE_SIZE
158 #endif
159
160 /* Do we have a forward hardware sequential prefetching? */
161
162 #ifndef HAVE_FORWARD_PREFETCH
163 #define HAVE_FORWARD_PREFETCH 0
164 #endif
165
166 /* Do we have a backward hardware sequential prefetching? */
167
168 #ifndef HAVE_BACKWARD_PREFETCH
169 #define HAVE_BACKWARD_PREFETCH 0
170 #endif
171
172 /* In some cases we are only able to determine that there is a certain
173 probability that the two accesses hit the same cache line. In this
174 case, we issue the prefetches for both of them if this probability
175 is less then (1000 - ACCEPTABLE_MISS_RATE) per thousand. */
176
177 #ifndef ACCEPTABLE_MISS_RATE
178 #define ACCEPTABLE_MISS_RATE 50
179 #endif
180
181 #ifndef HAVE_prefetch
182 #define HAVE_prefetch 0
183 #endif
184
185 #define L1_CACHE_SIZE_BYTES ((unsigned) (L1_CACHE_SIZE * 1024))
186 #define L2_CACHE_SIZE_BYTES ((unsigned) (L2_CACHE_SIZE * 1024))
187
188 /* We consider a memory access nontemporal if it is not reused sooner than
189 after L2_CACHE_SIZE_BYTES of memory are accessed. However, we ignore
190 accesses closer than L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
191 so that we use nontemporal prefetches e.g. if single memory location
192 is accessed several times in a single iteration of the loop. */
193 #define NONTEMPORAL_FRACTION 16
194
195 /* In case we have to emit a memory fence instruction after the loop that
196 uses nontemporal stores, this defines the builtin to use. */
197
198 #ifndef FENCE_FOLLOWING_MOVNT
199 #define FENCE_FOLLOWING_MOVNT NULL_TREE
200 #endif
201
202 /* It is not profitable to prefetch when the trip count is not at
203 least TRIP_COUNT_TO_AHEAD_RATIO times the prefetch ahead distance.
204 For example, in a loop with a prefetch ahead distance of 10,
205 supposing that TRIP_COUNT_TO_AHEAD_RATIO is equal to 4, it is
206 profitable to prefetch when the trip count is greater or equal to
207 40. In that case, 30 out of the 40 iterations will benefit from
208 prefetching. */
209
210 #ifndef TRIP_COUNT_TO_AHEAD_RATIO
211 #define TRIP_COUNT_TO_AHEAD_RATIO 4
212 #endif
213
214 /* The group of references between that reuse may occur. */
215
216 struct mem_ref_group
217 {
218 tree base; /* Base of the reference. */
219 HOST_WIDE_INT step; /* Step of the reference. */
220 struct mem_ref *refs; /* References in the group. */
221 struct mem_ref_group *next; /* Next group of references. */
222 };
223
224 /* Assigned to PREFETCH_BEFORE when all iterations are to be prefetched. */
225
226 #define PREFETCH_ALL (~(unsigned HOST_WIDE_INT) 0)
227
228 /* The memory reference. */
229
230 struct mem_ref
231 {
232 gimple stmt; /* Statement in that the reference appears. */
233 tree mem; /* The reference. */
234 HOST_WIDE_INT delta; /* Constant offset of the reference. */
235 struct mem_ref_group *group; /* The group of references it belongs to. */
236 unsigned HOST_WIDE_INT prefetch_mod;
237 /* Prefetch only each PREFETCH_MOD-th
238 iteration. */
239 unsigned HOST_WIDE_INT prefetch_before;
240 /* Prefetch only first PREFETCH_BEFORE
241 iterations. */
242 unsigned reuse_distance; /* The amount of data accessed before the first
243 reuse of this value. */
244 struct mem_ref *next; /* The next reference in the group. */
245 unsigned write_p : 1; /* Is it a write? */
246 unsigned independent_p : 1; /* True if the reference is independent on
247 all other references inside the loop. */
248 unsigned issue_prefetch_p : 1; /* Should we really issue the prefetch? */
249 unsigned storent_p : 1; /* True if we changed the store to a
250 nontemporal one. */
251 };
252
253 /* Dumps information about reference REF to FILE. */
254
255 static void
256 dump_mem_ref (FILE *file, struct mem_ref *ref)
257 {
258 fprintf (file, "Reference %p:\n", (void *) ref);
259
260 fprintf (file, " group %p (base ", (void *) ref->group);
261 print_generic_expr (file, ref->group->base, TDF_SLIM);
262 fprintf (file, ", step ");
263 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ref->group->step);
264 fprintf (file, ")\n");
265
266 fprintf (file, " delta ");
267 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ref->delta);
268 fprintf (file, "\n");
269
270 fprintf (file, " %s\n", ref->write_p ? "write" : "read");
271
272 fprintf (file, "\n");
273 }
274
275 /* Finds a group with BASE and STEP in GROUPS, or creates one if it does not
276 exist. */
277
278 static struct mem_ref_group *
279 find_or_create_group (struct mem_ref_group **groups, tree base,
280 HOST_WIDE_INT step)
281 {
282 struct mem_ref_group *group;
283
284 for (; *groups; groups = &(*groups)->next)
285 {
286 if ((*groups)->step == step
287 && operand_equal_p ((*groups)->base, base, 0))
288 return *groups;
289
290 /* Keep the list of groups sorted by decreasing step. */
291 if ((*groups)->step < step)
292 break;
293 }
294
295 group = XNEW (struct mem_ref_group);
296 group->base = base;
297 group->step = step;
298 group->refs = NULL;
299 group->next = *groups;
300 *groups = group;
301
302 return group;
303 }
304
305 /* Records a memory reference MEM in GROUP with offset DELTA and write status
306 WRITE_P. The reference occurs in statement STMT. */
307
308 static void
309 record_ref (struct mem_ref_group *group, gimple stmt, tree mem,
310 HOST_WIDE_INT delta, bool write_p)
311 {
312 struct mem_ref **aref;
313
314 /* Do not record the same address twice. */
315 for (aref = &group->refs; *aref; aref = &(*aref)->next)
316 {
317 /* It does not have to be possible for write reference to reuse the read
318 prefetch, or vice versa. */
319 if (!WRITE_CAN_USE_READ_PREFETCH
320 && write_p
321 && !(*aref)->write_p)
322 continue;
323 if (!READ_CAN_USE_WRITE_PREFETCH
324 && !write_p
325 && (*aref)->write_p)
326 continue;
327
328 if ((*aref)->delta == delta)
329 return;
330 }
331
332 (*aref) = XNEW (struct mem_ref);
333 (*aref)->stmt = stmt;
334 (*aref)->mem = mem;
335 (*aref)->delta = delta;
336 (*aref)->write_p = write_p;
337 (*aref)->prefetch_before = PREFETCH_ALL;
338 (*aref)->prefetch_mod = 1;
339 (*aref)->reuse_distance = 0;
340 (*aref)->issue_prefetch_p = false;
341 (*aref)->group = group;
342 (*aref)->next = NULL;
343 (*aref)->independent_p = false;
344 (*aref)->storent_p = false;
345
346 if (dump_file && (dump_flags & TDF_DETAILS))
347 dump_mem_ref (dump_file, *aref);
348 }
349
350 /* Release memory references in GROUPS. */
351
352 static void
353 release_mem_refs (struct mem_ref_group *groups)
354 {
355 struct mem_ref_group *next_g;
356 struct mem_ref *ref, *next_r;
357
358 for (; groups; groups = next_g)
359 {
360 next_g = groups->next;
361 for (ref = groups->refs; ref; ref = next_r)
362 {
363 next_r = ref->next;
364 free (ref);
365 }
366 free (groups);
367 }
368 }
369
370 /* A structure used to pass arguments to idx_analyze_ref. */
371
372 struct ar_data
373 {
374 struct loop *loop; /* Loop of the reference. */
375 gimple stmt; /* Statement of the reference. */
376 HOST_WIDE_INT *step; /* Step of the memory reference. */
377 HOST_WIDE_INT *delta; /* Offset of the memory reference. */
378 };
379
380 /* Analyzes a single INDEX of a memory reference to obtain information
381 described at analyze_ref. Callback for for_each_index. */
382
383 static bool
384 idx_analyze_ref (tree base, tree *index, void *data)
385 {
386 struct ar_data *ar_data = (struct ar_data *) data;
387 tree ibase, step, stepsize;
388 HOST_WIDE_INT istep, idelta = 0, imult = 1;
389 affine_iv iv;
390
391 if (TREE_CODE (base) == MISALIGNED_INDIRECT_REF
392 || TREE_CODE (base) == ALIGN_INDIRECT_REF)
393 return false;
394
395 if (!simple_iv (ar_data->loop, loop_containing_stmt (ar_data->stmt),
396 *index, &iv, false))
397 return false;
398 ibase = iv.base;
399 step = iv.step;
400
401 if (!cst_and_fits_in_hwi (step))
402 return false;
403 istep = int_cst_value (step);
404
405 if (TREE_CODE (ibase) == POINTER_PLUS_EXPR
406 && cst_and_fits_in_hwi (TREE_OPERAND (ibase, 1)))
407 {
408 idelta = int_cst_value (TREE_OPERAND (ibase, 1));
409 ibase = TREE_OPERAND (ibase, 0);
410 }
411 if (cst_and_fits_in_hwi (ibase))
412 {
413 idelta += int_cst_value (ibase);
414 ibase = build_int_cst (TREE_TYPE (ibase), 0);
415 }
416
417 if (TREE_CODE (base) == ARRAY_REF)
418 {
419 stepsize = array_ref_element_size (base);
420 if (!cst_and_fits_in_hwi (stepsize))
421 return false;
422 imult = int_cst_value (stepsize);
423
424 istep *= imult;
425 idelta *= imult;
426 }
427
428 *ar_data->step += istep;
429 *ar_data->delta += idelta;
430 *index = ibase;
431
432 return true;
433 }
434
435 /* Tries to express REF_P in shape &BASE + STEP * iter + DELTA, where DELTA and
436 STEP are integer constants and iter is number of iterations of LOOP. The
437 reference occurs in statement STMT. Strips nonaddressable component
438 references from REF_P. */
439
440 static bool
441 analyze_ref (struct loop *loop, tree *ref_p, tree *base,
442 HOST_WIDE_INT *step, HOST_WIDE_INT *delta,
443 gimple stmt)
444 {
445 struct ar_data ar_data;
446 tree off;
447 HOST_WIDE_INT bit_offset;
448 tree ref = *ref_p;
449
450 *step = 0;
451 *delta = 0;
452
453 /* First strip off the component references. Ignore bitfields. */
454 if (TREE_CODE (ref) == COMPONENT_REF
455 && DECL_NONADDRESSABLE_P (TREE_OPERAND (ref, 1)))
456 ref = TREE_OPERAND (ref, 0);
457
458 *ref_p = ref;
459
460 for (; TREE_CODE (ref) == COMPONENT_REF; ref = TREE_OPERAND (ref, 0))
461 {
462 off = DECL_FIELD_BIT_OFFSET (TREE_OPERAND (ref, 1));
463 bit_offset = TREE_INT_CST_LOW (off);
464 gcc_assert (bit_offset % BITS_PER_UNIT == 0);
465
466 *delta += bit_offset / BITS_PER_UNIT;
467 }
468
469 *base = unshare_expr (ref);
470 ar_data.loop = loop;
471 ar_data.stmt = stmt;
472 ar_data.step = step;
473 ar_data.delta = delta;
474 return for_each_index (base, idx_analyze_ref, &ar_data);
475 }
476
477 /* Record a memory reference REF to the list REFS. The reference occurs in
478 LOOP in statement STMT and it is write if WRITE_P. Returns true if the
479 reference was recorded, false otherwise. */
480
481 static bool
482 gather_memory_references_ref (struct loop *loop, struct mem_ref_group **refs,
483 tree ref, bool write_p, gimple stmt)
484 {
485 tree base;
486 HOST_WIDE_INT step, delta;
487 struct mem_ref_group *agrp;
488
489 if (get_base_address (ref) == NULL)
490 return false;
491
492 if (!analyze_ref (loop, &ref, &base, &step, &delta, stmt))
493 return false;
494
495 /* Now we know that REF = &BASE + STEP * iter + DELTA, where DELTA and STEP
496 are integer constants. */
497 agrp = find_or_create_group (refs, base, step);
498 record_ref (agrp, stmt, ref, delta, write_p);
499
500 return true;
501 }
502
503 /* Record the suitable memory references in LOOP. NO_OTHER_REFS is set to
504 true if there are no other memory references inside the loop. */
505
506 static struct mem_ref_group *
507 gather_memory_references (struct loop *loop, bool *no_other_refs, unsigned *ref_count)
508 {
509 basic_block *body = get_loop_body_in_dom_order (loop);
510 basic_block bb;
511 unsigned i;
512 gimple_stmt_iterator bsi;
513 gimple stmt;
514 tree lhs, rhs;
515 struct mem_ref_group *refs = NULL;
516
517 *no_other_refs = true;
518 *ref_count = 0;
519
520 /* Scan the loop body in order, so that the former references precede the
521 later ones. */
522 for (i = 0; i < loop->num_nodes; i++)
523 {
524 bb = body[i];
525 if (bb->loop_father != loop)
526 continue;
527
528 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
529 {
530 stmt = gsi_stmt (bsi);
531
532 if (gimple_code (stmt) != GIMPLE_ASSIGN)
533 {
534 if (gimple_vuse (stmt)
535 || (is_gimple_call (stmt)
536 && !(gimple_call_flags (stmt) & ECF_CONST)))
537 *no_other_refs = false;
538 continue;
539 }
540
541 lhs = gimple_assign_lhs (stmt);
542 rhs = gimple_assign_rhs1 (stmt);
543
544 if (REFERENCE_CLASS_P (rhs))
545 {
546 *no_other_refs &= gather_memory_references_ref (loop, &refs,
547 rhs, false, stmt);
548 *ref_count += 1;
549 }
550 if (REFERENCE_CLASS_P (lhs))
551 {
552 *no_other_refs &= gather_memory_references_ref (loop, &refs,
553 lhs, true, stmt);
554 *ref_count += 1;
555 }
556 }
557 }
558 free (body);
559
560 return refs;
561 }
562
563 /* Prune the prefetch candidate REF using the self-reuse. */
564
565 static void
566 prune_ref_by_self_reuse (struct mem_ref *ref)
567 {
568 HOST_WIDE_INT step = ref->group->step;
569 bool backward = step < 0;
570
571 if (step == 0)
572 {
573 /* Prefetch references to invariant address just once. */
574 ref->prefetch_before = 1;
575 return;
576 }
577
578 if (backward)
579 step = -step;
580
581 if (step > PREFETCH_BLOCK)
582 return;
583
584 if ((backward && HAVE_BACKWARD_PREFETCH)
585 || (!backward && HAVE_FORWARD_PREFETCH))
586 {
587 ref->prefetch_before = 1;
588 return;
589 }
590
591 ref->prefetch_mod = PREFETCH_BLOCK / step;
592 }
593
594 /* Divides X by BY, rounding down. */
595
596 static HOST_WIDE_INT
597 ddown (HOST_WIDE_INT x, unsigned HOST_WIDE_INT by)
598 {
599 gcc_assert (by > 0);
600
601 if (x >= 0)
602 return x / by;
603 else
604 return (x + by - 1) / by;
605 }
606
607 /* Given a CACHE_LINE_SIZE and two inductive memory references
608 with a common STEP greater than CACHE_LINE_SIZE and an address
609 difference DELTA, compute the probability that they will fall
610 in different cache lines. DISTINCT_ITERS is the number of
611 distinct iterations after which the pattern repeats itself.
612 ALIGN_UNIT is the unit of alignment in bytes. */
613
614 static int
615 compute_miss_rate (unsigned HOST_WIDE_INT cache_line_size,
616 HOST_WIDE_INT step, HOST_WIDE_INT delta,
617 unsigned HOST_WIDE_INT distinct_iters,
618 int align_unit)
619 {
620 unsigned align, iter;
621 int total_positions, miss_positions, miss_rate;
622 int address1, address2, cache_line1, cache_line2;
623
624 total_positions = 0;
625 miss_positions = 0;
626
627 /* Iterate through all possible alignments of the first
628 memory reference within its cache line. */
629 for (align = 0; align < cache_line_size; align += align_unit)
630
631 /* Iterate through all distinct iterations. */
632 for (iter = 0; iter < distinct_iters; iter++)
633 {
634 address1 = align + step * iter;
635 address2 = address1 + delta;
636 cache_line1 = address1 / cache_line_size;
637 cache_line2 = address2 / cache_line_size;
638 total_positions += 1;
639 if (cache_line1 != cache_line2)
640 miss_positions += 1;
641 }
642 miss_rate = 1000 * miss_positions / total_positions;
643 return miss_rate;
644 }
645
646 /* Prune the prefetch candidate REF using the reuse with BY.
647 If BY_IS_BEFORE is true, BY is before REF in the loop. */
648
649 static void
650 prune_ref_by_group_reuse (struct mem_ref *ref, struct mem_ref *by,
651 bool by_is_before)
652 {
653 HOST_WIDE_INT step = ref->group->step;
654 bool backward = step < 0;
655 HOST_WIDE_INT delta_r = ref->delta, delta_b = by->delta;
656 HOST_WIDE_INT delta = delta_b - delta_r;
657 HOST_WIDE_INT hit_from;
658 unsigned HOST_WIDE_INT prefetch_before, prefetch_block;
659 int miss_rate;
660 HOST_WIDE_INT reduced_step;
661 unsigned HOST_WIDE_INT reduced_prefetch_block;
662 tree ref_type;
663 int align_unit;
664
665 if (delta == 0)
666 {
667 /* If the references has the same address, only prefetch the
668 former. */
669 if (by_is_before)
670 ref->prefetch_before = 0;
671
672 return;
673 }
674
675 if (!step)
676 {
677 /* If the reference addresses are invariant and fall into the
678 same cache line, prefetch just the first one. */
679 if (!by_is_before)
680 return;
681
682 if (ddown (ref->delta, PREFETCH_BLOCK)
683 != ddown (by->delta, PREFETCH_BLOCK))
684 return;
685
686 ref->prefetch_before = 0;
687 return;
688 }
689
690 /* Only prune the reference that is behind in the array. */
691 if (backward)
692 {
693 if (delta > 0)
694 return;
695
696 /* Transform the data so that we may assume that the accesses
697 are forward. */
698 delta = - delta;
699 step = -step;
700 delta_r = PREFETCH_BLOCK - 1 - delta_r;
701 delta_b = PREFETCH_BLOCK - 1 - delta_b;
702 }
703 else
704 {
705 if (delta < 0)
706 return;
707 }
708
709 /* Check whether the two references are likely to hit the same cache
710 line, and how distant the iterations in that it occurs are from
711 each other. */
712
713 if (step <= PREFETCH_BLOCK)
714 {
715 /* The accesses are sure to meet. Let us check when. */
716 hit_from = ddown (delta_b, PREFETCH_BLOCK) * PREFETCH_BLOCK;
717 prefetch_before = (hit_from - delta_r + step - 1) / step;
718
719 /* Do not reduce prefetch_before if we meet beyond cache size. */
720 if (prefetch_before > abs (L2_CACHE_SIZE_BYTES / step))
721 prefetch_before = PREFETCH_ALL;
722 if (prefetch_before < ref->prefetch_before)
723 ref->prefetch_before = prefetch_before;
724
725 return;
726 }
727
728 /* A more complicated case with step > prefetch_block. First reduce
729 the ratio between the step and the cache line size to its simplest
730 terms. The resulting denominator will then represent the number of
731 distinct iterations after which each address will go back to its
732 initial location within the cache line. This computation assumes
733 that PREFETCH_BLOCK is a power of two. */
734 prefetch_block = PREFETCH_BLOCK;
735 reduced_prefetch_block = prefetch_block;
736 reduced_step = step;
737 while ((reduced_step & 1) == 0
738 && reduced_prefetch_block > 1)
739 {
740 reduced_step >>= 1;
741 reduced_prefetch_block >>= 1;
742 }
743
744 prefetch_before = delta / step;
745 delta %= step;
746 ref_type = TREE_TYPE (ref->mem);
747 align_unit = TYPE_ALIGN (ref_type) / 8;
748 miss_rate = compute_miss_rate(prefetch_block, step, delta,
749 reduced_prefetch_block, align_unit);
750 if (miss_rate <= ACCEPTABLE_MISS_RATE)
751 {
752 /* Do not reduce prefetch_before if we meet beyond cache size. */
753 if (prefetch_before > L2_CACHE_SIZE_BYTES / PREFETCH_BLOCK)
754 prefetch_before = PREFETCH_ALL;
755 if (prefetch_before < ref->prefetch_before)
756 ref->prefetch_before = prefetch_before;
757
758 return;
759 }
760
761 /* Try also the following iteration. */
762 prefetch_before++;
763 delta = step - delta;
764 miss_rate = compute_miss_rate(prefetch_block, step, delta,
765 reduced_prefetch_block, align_unit);
766 if (miss_rate <= ACCEPTABLE_MISS_RATE)
767 {
768 if (prefetch_before < ref->prefetch_before)
769 ref->prefetch_before = prefetch_before;
770
771 return;
772 }
773
774 /* The ref probably does not reuse by. */
775 return;
776 }
777
778 /* Prune the prefetch candidate REF using the reuses with other references
779 in REFS. */
780
781 static void
782 prune_ref_by_reuse (struct mem_ref *ref, struct mem_ref *refs)
783 {
784 struct mem_ref *prune_by;
785 bool before = true;
786
787 prune_ref_by_self_reuse (ref);
788
789 for (prune_by = refs; prune_by; prune_by = prune_by->next)
790 {
791 if (prune_by == ref)
792 {
793 before = false;
794 continue;
795 }
796
797 if (!WRITE_CAN_USE_READ_PREFETCH
798 && ref->write_p
799 && !prune_by->write_p)
800 continue;
801 if (!READ_CAN_USE_WRITE_PREFETCH
802 && !ref->write_p
803 && prune_by->write_p)
804 continue;
805
806 prune_ref_by_group_reuse (ref, prune_by, before);
807 }
808 }
809
810 /* Prune the prefetch candidates in GROUP using the reuse analysis. */
811
812 static void
813 prune_group_by_reuse (struct mem_ref_group *group)
814 {
815 struct mem_ref *ref_pruned;
816
817 for (ref_pruned = group->refs; ref_pruned; ref_pruned = ref_pruned->next)
818 {
819 prune_ref_by_reuse (ref_pruned, group->refs);
820
821 if (dump_file && (dump_flags & TDF_DETAILS))
822 {
823 fprintf (dump_file, "Reference %p:", (void *) ref_pruned);
824
825 if (ref_pruned->prefetch_before == PREFETCH_ALL
826 && ref_pruned->prefetch_mod == 1)
827 fprintf (dump_file, " no restrictions");
828 else if (ref_pruned->prefetch_before == 0)
829 fprintf (dump_file, " do not prefetch");
830 else if (ref_pruned->prefetch_before <= ref_pruned->prefetch_mod)
831 fprintf (dump_file, " prefetch once");
832 else
833 {
834 if (ref_pruned->prefetch_before != PREFETCH_ALL)
835 {
836 fprintf (dump_file, " prefetch before ");
837 fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC,
838 ref_pruned->prefetch_before);
839 }
840 if (ref_pruned->prefetch_mod != 1)
841 {
842 fprintf (dump_file, " prefetch mod ");
843 fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC,
844 ref_pruned->prefetch_mod);
845 }
846 }
847 fprintf (dump_file, "\n");
848 }
849 }
850 }
851
852 /* Prune the list of prefetch candidates GROUPS using the reuse analysis. */
853
854 static void
855 prune_by_reuse (struct mem_ref_group *groups)
856 {
857 for (; groups; groups = groups->next)
858 prune_group_by_reuse (groups);
859 }
860
861 /* Returns true if we should issue prefetch for REF. */
862
863 static bool
864 should_issue_prefetch_p (struct mem_ref *ref)
865 {
866 /* For now do not issue prefetches for only first few of the
867 iterations. */
868 if (ref->prefetch_before != PREFETCH_ALL)
869 {
870 if (dump_file && (dump_flags & TDF_DETAILS))
871 fprintf (dump_file, "Ignoring %p due to prefetch_before\n",
872 (void *) ref);
873 return false;
874 }
875
876 /* Do not prefetch nontemporal stores. */
877 if (ref->storent_p)
878 {
879 if (dump_file && (dump_flags & TDF_DETAILS))
880 fprintf (dump_file, "Ignoring nontemporal store %p\n", (void *) ref);
881 return false;
882 }
883
884 return true;
885 }
886
887 /* Decide which of the prefetch candidates in GROUPS to prefetch.
888 AHEAD is the number of iterations to prefetch ahead (which corresponds
889 to the number of simultaneous instances of one prefetch running at a
890 time). UNROLL_FACTOR is the factor by that the loop is going to be
891 unrolled. Returns true if there is anything to prefetch. */
892
893 static bool
894 schedule_prefetches (struct mem_ref_group *groups, unsigned unroll_factor,
895 unsigned ahead)
896 {
897 unsigned remaining_prefetch_slots, n_prefetches, prefetch_slots;
898 unsigned slots_per_prefetch;
899 struct mem_ref *ref;
900 bool any = false;
901
902 /* At most SIMULTANEOUS_PREFETCHES should be running at the same time. */
903 remaining_prefetch_slots = SIMULTANEOUS_PREFETCHES;
904
905 /* The prefetch will run for AHEAD iterations of the original loop, i.e.,
906 AHEAD / UNROLL_FACTOR iterations of the unrolled loop. In each iteration,
907 it will need a prefetch slot. */
908 slots_per_prefetch = (ahead + unroll_factor / 2) / unroll_factor;
909 if (dump_file && (dump_flags & TDF_DETAILS))
910 fprintf (dump_file, "Each prefetch instruction takes %u prefetch slots.\n",
911 slots_per_prefetch);
912
913 /* For now we just take memory references one by one and issue
914 prefetches for as many as possible. The groups are sorted
915 starting with the largest step, since the references with
916 large step are more likely to cause many cache misses. */
917
918 for (; groups; groups = groups->next)
919 for (ref = groups->refs; ref; ref = ref->next)
920 {
921 if (!should_issue_prefetch_p (ref))
922 continue;
923
924 /* If we need to prefetch the reference each PREFETCH_MOD iterations,
925 and we unroll the loop UNROLL_FACTOR times, we need to insert
926 ceil (UNROLL_FACTOR / PREFETCH_MOD) instructions in each
927 iteration. */
928 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
929 / ref->prefetch_mod);
930 prefetch_slots = n_prefetches * slots_per_prefetch;
931
932 /* If more than half of the prefetches would be lost anyway, do not
933 issue the prefetch. */
934 if (2 * remaining_prefetch_slots < prefetch_slots)
935 continue;
936
937 ref->issue_prefetch_p = true;
938
939 if (remaining_prefetch_slots <= prefetch_slots)
940 return true;
941 remaining_prefetch_slots -= prefetch_slots;
942 any = true;
943 }
944
945 return any;
946 }
947
948 /* Estimate the number of prefetches in the given GROUPS. */
949
950 static int
951 estimate_prefetch_count (struct mem_ref_group *groups)
952 {
953 struct mem_ref *ref;
954 int prefetch_count = 0;
955
956 for (; groups; groups = groups->next)
957 for (ref = groups->refs; ref; ref = ref->next)
958 if (should_issue_prefetch_p (ref))
959 prefetch_count++;
960
961 return prefetch_count;
962 }
963
964 /* Issue prefetches for the reference REF into loop as decided before.
965 HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR
966 is the factor by which LOOP was unrolled. */
967
968 static void
969 issue_prefetch_ref (struct mem_ref *ref, unsigned unroll_factor, unsigned ahead)
970 {
971 HOST_WIDE_INT delta;
972 tree addr, addr_base, write_p, local;
973 gimple prefetch;
974 gimple_stmt_iterator bsi;
975 unsigned n_prefetches, ap;
976 bool nontemporal = ref->reuse_distance >= L2_CACHE_SIZE_BYTES;
977
978 if (dump_file && (dump_flags & TDF_DETAILS))
979 fprintf (dump_file, "Issued%s prefetch for %p.\n",
980 nontemporal ? " nontemporal" : "",
981 (void *) ref);
982
983 bsi = gsi_for_stmt (ref->stmt);
984
985 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
986 / ref->prefetch_mod);
987 addr_base = build_fold_addr_expr_with_type (ref->mem, ptr_type_node);
988 addr_base = force_gimple_operand_gsi (&bsi, unshare_expr (addr_base),
989 true, NULL, true, GSI_SAME_STMT);
990 write_p = ref->write_p ? integer_one_node : integer_zero_node;
991 local = build_int_cst (integer_type_node, nontemporal ? 0 : 3);
992
993 for (ap = 0; ap < n_prefetches; ap++)
994 {
995 /* Determine the address to prefetch. */
996 delta = (ahead + ap * ref->prefetch_mod) * ref->group->step;
997 addr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node,
998 addr_base, size_int (delta));
999 addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true, NULL,
1000 true, GSI_SAME_STMT);
1001
1002 /* Create the prefetch instruction. */
1003 prefetch = gimple_build_call (built_in_decls[BUILT_IN_PREFETCH],
1004 3, addr, write_p, local);
1005 gsi_insert_before (&bsi, prefetch, GSI_SAME_STMT);
1006 }
1007 }
1008
1009 /* Issue prefetches for the references in GROUPS into loop as decided before.
1010 HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR is the
1011 factor by that LOOP was unrolled. */
1012
1013 static void
1014 issue_prefetches (struct mem_ref_group *groups,
1015 unsigned unroll_factor, unsigned ahead)
1016 {
1017 struct mem_ref *ref;
1018
1019 for (; groups; groups = groups->next)
1020 for (ref = groups->refs; ref; ref = ref->next)
1021 if (ref->issue_prefetch_p)
1022 issue_prefetch_ref (ref, unroll_factor, ahead);
1023 }
1024
1025 /* Returns true if REF is a memory write for that a nontemporal store insn
1026 can be used. */
1027
1028 static bool
1029 nontemporal_store_p (struct mem_ref *ref)
1030 {
1031 enum machine_mode mode;
1032 enum insn_code code;
1033
1034 /* REF must be a write that is not reused. We require it to be independent
1035 on all other memory references in the loop, as the nontemporal stores may
1036 be reordered with respect to other memory references. */
1037 if (!ref->write_p
1038 || !ref->independent_p
1039 || ref->reuse_distance < L2_CACHE_SIZE_BYTES)
1040 return false;
1041
1042 /* Check that we have the storent instruction for the mode. */
1043 mode = TYPE_MODE (TREE_TYPE (ref->mem));
1044 if (mode == BLKmode)
1045 return false;
1046
1047 code = optab_handler (storent_optab, mode)->insn_code;
1048 return code != CODE_FOR_nothing;
1049 }
1050
1051 /* If REF is a nontemporal store, we mark the corresponding modify statement
1052 and return true. Otherwise, we return false. */
1053
1054 static bool
1055 mark_nontemporal_store (struct mem_ref *ref)
1056 {
1057 if (!nontemporal_store_p (ref))
1058 return false;
1059
1060 if (dump_file && (dump_flags & TDF_DETAILS))
1061 fprintf (dump_file, "Marked reference %p as a nontemporal store.\n",
1062 (void *) ref);
1063
1064 gimple_assign_set_nontemporal_move (ref->stmt, true);
1065 ref->storent_p = true;
1066
1067 return true;
1068 }
1069
1070 /* Issue a memory fence instruction after LOOP. */
1071
1072 static void
1073 emit_mfence_after_loop (struct loop *loop)
1074 {
1075 VEC (edge, heap) *exits = get_loop_exit_edges (loop);
1076 edge exit;
1077 gimple call;
1078 gimple_stmt_iterator bsi;
1079 unsigned i;
1080
1081 for (i = 0; VEC_iterate (edge, exits, i, exit); i++)
1082 {
1083 call = gimple_build_call (FENCE_FOLLOWING_MOVNT, 0);
1084
1085 if (!single_pred_p (exit->dest)
1086 /* If possible, we prefer not to insert the fence on other paths
1087 in cfg. */
1088 && !(exit->flags & EDGE_ABNORMAL))
1089 split_loop_exit_edge (exit);
1090 bsi = gsi_after_labels (exit->dest);
1091
1092 gsi_insert_before (&bsi, call, GSI_NEW_STMT);
1093 mark_virtual_ops_for_renaming (call);
1094 }
1095
1096 VEC_free (edge, heap, exits);
1097 update_ssa (TODO_update_ssa_only_virtuals);
1098 }
1099
1100 /* Returns true if we can use storent in loop, false otherwise. */
1101
1102 static bool
1103 may_use_storent_in_loop_p (struct loop *loop)
1104 {
1105 bool ret = true;
1106
1107 if (loop->inner != NULL)
1108 return false;
1109
1110 /* If we must issue a mfence insn after using storent, check that there
1111 is a suitable place for it at each of the loop exits. */
1112 if (FENCE_FOLLOWING_MOVNT != NULL_TREE)
1113 {
1114 VEC (edge, heap) *exits = get_loop_exit_edges (loop);
1115 unsigned i;
1116 edge exit;
1117
1118 for (i = 0; VEC_iterate (edge, exits, i, exit); i++)
1119 if ((exit->flags & EDGE_ABNORMAL)
1120 && exit->dest == EXIT_BLOCK_PTR)
1121 ret = false;
1122
1123 VEC_free (edge, heap, exits);
1124 }
1125
1126 return ret;
1127 }
1128
1129 /* Marks nontemporal stores in LOOP. GROUPS contains the description of memory
1130 references in the loop. */
1131
1132 static void
1133 mark_nontemporal_stores (struct loop *loop, struct mem_ref_group *groups)
1134 {
1135 struct mem_ref *ref;
1136 bool any = false;
1137
1138 if (!may_use_storent_in_loop_p (loop))
1139 return;
1140
1141 for (; groups; groups = groups->next)
1142 for (ref = groups->refs; ref; ref = ref->next)
1143 any |= mark_nontemporal_store (ref);
1144
1145 if (any && FENCE_FOLLOWING_MOVNT != NULL_TREE)
1146 emit_mfence_after_loop (loop);
1147 }
1148
1149 /* Determines whether we can profitably unroll LOOP FACTOR times, and if
1150 this is the case, fill in DESC by the description of number of
1151 iterations. */
1152
1153 static bool
1154 should_unroll_loop_p (struct loop *loop, struct tree_niter_desc *desc,
1155 unsigned factor)
1156 {
1157 if (!can_unroll_loop_p (loop, factor, desc))
1158 return false;
1159
1160 /* We only consider loops without control flow for unrolling. This is not
1161 a hard restriction -- tree_unroll_loop works with arbitrary loops
1162 as well; but the unrolling/prefetching is usually more profitable for
1163 loops consisting of a single basic block, and we want to limit the
1164 code growth. */
1165 if (loop->num_nodes > 2)
1166 return false;
1167
1168 return true;
1169 }
1170
1171 /* Determine the coefficient by that unroll LOOP, from the information
1172 contained in the list of memory references REFS. Description of
1173 umber of iterations of LOOP is stored to DESC. NINSNS is the number of
1174 insns of the LOOP. EST_NITER is the estimated number of iterations of
1175 the loop, or -1 if no estimate is available. */
1176
1177 static unsigned
1178 determine_unroll_factor (struct loop *loop, struct mem_ref_group *refs,
1179 unsigned ninsns, struct tree_niter_desc *desc,
1180 HOST_WIDE_INT est_niter)
1181 {
1182 unsigned upper_bound;
1183 unsigned nfactor, factor, mod_constraint;
1184 struct mem_ref_group *agp;
1185 struct mem_ref *ref;
1186
1187 /* First check whether the loop is not too large to unroll. We ignore
1188 PARAM_MAX_UNROLL_TIMES, because for small loops, it prevented us
1189 from unrolling them enough to make exactly one cache line covered by each
1190 iteration. Also, the goal of PARAM_MAX_UNROLL_TIMES is to prevent
1191 us from unrolling the loops too many times in cases where we only expect
1192 gains from better scheduling and decreasing loop overhead, which is not
1193 the case here. */
1194 upper_bound = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / ninsns;
1195
1196 /* If we unrolled the loop more times than it iterates, the unrolled version
1197 of the loop would be never entered. */
1198 if (est_niter >= 0 && est_niter < (HOST_WIDE_INT) upper_bound)
1199 upper_bound = est_niter;
1200
1201 if (upper_bound <= 1)
1202 return 1;
1203
1204 /* Choose the factor so that we may prefetch each cache just once,
1205 but bound the unrolling by UPPER_BOUND. */
1206 factor = 1;
1207 for (agp = refs; agp; agp = agp->next)
1208 for (ref = agp->refs; ref; ref = ref->next)
1209 if (should_issue_prefetch_p (ref))
1210 {
1211 mod_constraint = ref->prefetch_mod;
1212 nfactor = least_common_multiple (mod_constraint, factor);
1213 if (nfactor <= upper_bound)
1214 factor = nfactor;
1215 }
1216
1217 if (!should_unroll_loop_p (loop, desc, factor))
1218 return 1;
1219
1220 return factor;
1221 }
1222
1223 /* Returns the total volume of the memory references REFS, taking into account
1224 reuses in the innermost loop and cache line size. TODO -- we should also
1225 take into account reuses across the iterations of the loops in the loop
1226 nest. */
1227
1228 static unsigned
1229 volume_of_references (struct mem_ref_group *refs)
1230 {
1231 unsigned volume = 0;
1232 struct mem_ref_group *gr;
1233 struct mem_ref *ref;
1234
1235 for (gr = refs; gr; gr = gr->next)
1236 for (ref = gr->refs; ref; ref = ref->next)
1237 {
1238 /* Almost always reuses another value? */
1239 if (ref->prefetch_before != PREFETCH_ALL)
1240 continue;
1241
1242 /* If several iterations access the same cache line, use the size of
1243 the line divided by this number. Otherwise, a cache line is
1244 accessed in each iteration. TODO -- in the latter case, we should
1245 take the size of the reference into account, rounding it up on cache
1246 line size multiple. */
1247 volume += L1_CACHE_LINE_SIZE / ref->prefetch_mod;
1248 }
1249 return volume;
1250 }
1251
1252 /* Returns the volume of memory references accessed across VEC iterations of
1253 loops, whose sizes are described in the LOOP_SIZES array. N is the number
1254 of the loops in the nest (length of VEC and LOOP_SIZES vectors). */
1255
1256 static unsigned
1257 volume_of_dist_vector (lambda_vector vec, unsigned *loop_sizes, unsigned n)
1258 {
1259 unsigned i;
1260
1261 for (i = 0; i < n; i++)
1262 if (vec[i] != 0)
1263 break;
1264
1265 if (i == n)
1266 return 0;
1267
1268 gcc_assert (vec[i] > 0);
1269
1270 /* We ignore the parts of the distance vector in subloops, since usually
1271 the numbers of iterations are much smaller. */
1272 return loop_sizes[i] * vec[i];
1273 }
1274
1275 /* Add the steps of ACCESS_FN multiplied by STRIDE to the array STRIDE
1276 at the position corresponding to the loop of the step. N is the depth
1277 of the considered loop nest, and, LOOP is its innermost loop. */
1278
1279 static void
1280 add_subscript_strides (tree access_fn, unsigned stride,
1281 HOST_WIDE_INT *strides, unsigned n, struct loop *loop)
1282 {
1283 struct loop *aloop;
1284 tree step;
1285 HOST_WIDE_INT astep;
1286 unsigned min_depth = loop_depth (loop) - n;
1287
1288 while (TREE_CODE (access_fn) == POLYNOMIAL_CHREC)
1289 {
1290 aloop = get_chrec_loop (access_fn);
1291 step = CHREC_RIGHT (access_fn);
1292 access_fn = CHREC_LEFT (access_fn);
1293
1294 if ((unsigned) loop_depth (aloop) <= min_depth)
1295 continue;
1296
1297 if (host_integerp (step, 0))
1298 astep = tree_low_cst (step, 0);
1299 else
1300 astep = L1_CACHE_LINE_SIZE;
1301
1302 strides[n - 1 - loop_depth (loop) + loop_depth (aloop)] += astep * stride;
1303
1304 }
1305 }
1306
1307 /* Returns the volume of memory references accessed between two consecutive
1308 self-reuses of the reference DR. We consider the subscripts of DR in N
1309 loops, and LOOP_SIZES contains the volumes of accesses in each of the
1310 loops. LOOP is the innermost loop of the current loop nest. */
1311
1312 static unsigned
1313 self_reuse_distance (data_reference_p dr, unsigned *loop_sizes, unsigned n,
1314 struct loop *loop)
1315 {
1316 tree stride, access_fn;
1317 HOST_WIDE_INT *strides, astride;
1318 VEC (tree, heap) *access_fns;
1319 tree ref = DR_REF (dr);
1320 unsigned i, ret = ~0u;
1321
1322 /* In the following example:
1323
1324 for (i = 0; i < N; i++)
1325 for (j = 0; j < N; j++)
1326 use (a[j][i]);
1327 the same cache line is accessed each N steps (except if the change from
1328 i to i + 1 crosses the boundary of the cache line). Thus, for self-reuse,
1329 we cannot rely purely on the results of the data dependence analysis.
1330
1331 Instead, we compute the stride of the reference in each loop, and consider
1332 the innermost loop in that the stride is less than cache size. */
1333
1334 strides = XCNEWVEC (HOST_WIDE_INT, n);
1335 access_fns = DR_ACCESS_FNS (dr);
1336
1337 for (i = 0; VEC_iterate (tree, access_fns, i, access_fn); i++)
1338 {
1339 /* Keep track of the reference corresponding to the subscript, so that we
1340 know its stride. */
1341 while (handled_component_p (ref) && TREE_CODE (ref) != ARRAY_REF)
1342 ref = TREE_OPERAND (ref, 0);
1343
1344 if (TREE_CODE (ref) == ARRAY_REF)
1345 {
1346 stride = TYPE_SIZE_UNIT (TREE_TYPE (ref));
1347 if (host_integerp (stride, 1))
1348 astride = tree_low_cst (stride, 1);
1349 else
1350 astride = L1_CACHE_LINE_SIZE;
1351
1352 ref = TREE_OPERAND (ref, 0);
1353 }
1354 else
1355 astride = 1;
1356
1357 add_subscript_strides (access_fn, astride, strides, n, loop);
1358 }
1359
1360 for (i = n; i-- > 0; )
1361 {
1362 unsigned HOST_WIDE_INT s;
1363
1364 s = strides[i] < 0 ? -strides[i] : strides[i];
1365
1366 if (s < (unsigned) L1_CACHE_LINE_SIZE
1367 && (loop_sizes[i]
1368 > (unsigned) (L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION)))
1369 {
1370 ret = loop_sizes[i];
1371 break;
1372 }
1373 }
1374
1375 free (strides);
1376 return ret;
1377 }
1378
1379 /* Determines the distance till the first reuse of each reference in REFS
1380 in the loop nest of LOOP. NO_OTHER_REFS is true if there are no other
1381 memory references in the loop. */
1382
1383 static void
1384 determine_loop_nest_reuse (struct loop *loop, struct mem_ref_group *refs,
1385 bool no_other_refs)
1386 {
1387 struct loop *nest, *aloop;
1388 VEC (data_reference_p, heap) *datarefs = NULL;
1389 VEC (ddr_p, heap) *dependences = NULL;
1390 struct mem_ref_group *gr;
1391 struct mem_ref *ref, *refb;
1392 VEC (loop_p, heap) *vloops = NULL;
1393 unsigned *loop_data_size;
1394 unsigned i, j, n;
1395 unsigned volume, dist, adist;
1396 HOST_WIDE_INT vol;
1397 data_reference_p dr;
1398 ddr_p dep;
1399
1400 if (loop->inner)
1401 return;
1402
1403 /* Find the outermost loop of the loop nest of loop (we require that
1404 there are no sibling loops inside the nest). */
1405 nest = loop;
1406 while (1)
1407 {
1408 aloop = loop_outer (nest);
1409
1410 if (aloop == current_loops->tree_root
1411 || aloop->inner->next)
1412 break;
1413
1414 nest = aloop;
1415 }
1416
1417 /* For each loop, determine the amount of data accessed in each iteration.
1418 We use this to estimate whether the reference is evicted from the
1419 cache before its reuse. */
1420 find_loop_nest (nest, &vloops);
1421 n = VEC_length (loop_p, vloops);
1422 loop_data_size = XNEWVEC (unsigned, n);
1423 volume = volume_of_references (refs);
1424 i = n;
1425 while (i-- != 0)
1426 {
1427 loop_data_size[i] = volume;
1428 /* Bound the volume by the L2 cache size, since above this bound,
1429 all dependence distances are equivalent. */
1430 if (volume > L2_CACHE_SIZE_BYTES)
1431 continue;
1432
1433 aloop = VEC_index (loop_p, vloops, i);
1434 vol = estimated_loop_iterations_int (aloop, false);
1435 if (vol < 0)
1436 vol = expected_loop_iterations (aloop);
1437 volume *= vol;
1438 }
1439
1440 /* Prepare the references in the form suitable for data dependence
1441 analysis. We ignore unanalyzable data references (the results
1442 are used just as a heuristics to estimate temporality of the
1443 references, hence we do not need to worry about correctness). */
1444 for (gr = refs; gr; gr = gr->next)
1445 for (ref = gr->refs; ref; ref = ref->next)
1446 {
1447 dr = create_data_ref (nest, ref->mem, ref->stmt, !ref->write_p);
1448
1449 if (dr)
1450 {
1451 ref->reuse_distance = volume;
1452 dr->aux = ref;
1453 VEC_safe_push (data_reference_p, heap, datarefs, dr);
1454 }
1455 else
1456 no_other_refs = false;
1457 }
1458
1459 for (i = 0; VEC_iterate (data_reference_p, datarefs, i, dr); i++)
1460 {
1461 dist = self_reuse_distance (dr, loop_data_size, n, loop);
1462 ref = (struct mem_ref *) dr->aux;
1463 if (ref->reuse_distance > dist)
1464 ref->reuse_distance = dist;
1465
1466 if (no_other_refs)
1467 ref->independent_p = true;
1468 }
1469
1470 compute_all_dependences (datarefs, &dependences, vloops, true);
1471
1472 for (i = 0; VEC_iterate (ddr_p, dependences, i, dep); i++)
1473 {
1474 if (DDR_ARE_DEPENDENT (dep) == chrec_known)
1475 continue;
1476
1477 ref = (struct mem_ref *) DDR_A (dep)->aux;
1478 refb = (struct mem_ref *) DDR_B (dep)->aux;
1479
1480 if (DDR_ARE_DEPENDENT (dep) == chrec_dont_know
1481 || DDR_NUM_DIST_VECTS (dep) == 0)
1482 {
1483 /* If the dependence cannot be analyzed, assume that there might be
1484 a reuse. */
1485 dist = 0;
1486
1487 ref->independent_p = false;
1488 refb->independent_p = false;
1489 }
1490 else
1491 {
1492 /* The distance vectors are normalized to be always lexicographically
1493 positive, hence we cannot tell just from them whether DDR_A comes
1494 before DDR_B or vice versa. However, it is not important,
1495 anyway -- if DDR_A is close to DDR_B, then it is either reused in
1496 DDR_B (and it is not nontemporal), or it reuses the value of DDR_B
1497 in cache (and marking it as nontemporal would not affect
1498 anything). */
1499
1500 dist = volume;
1501 for (j = 0; j < DDR_NUM_DIST_VECTS (dep); j++)
1502 {
1503 adist = volume_of_dist_vector (DDR_DIST_VECT (dep, j),
1504 loop_data_size, n);
1505
1506 /* If this is a dependence in the innermost loop (i.e., the
1507 distances in all superloops are zero) and it is not
1508 the trivial self-dependence with distance zero, record that
1509 the references are not completely independent. */
1510 if (lambda_vector_zerop (DDR_DIST_VECT (dep, j), n - 1)
1511 && (ref != refb
1512 || DDR_DIST_VECT (dep, j)[n-1] != 0))
1513 {
1514 ref->independent_p = false;
1515 refb->independent_p = false;
1516 }
1517
1518 /* Ignore accesses closer than
1519 L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
1520 so that we use nontemporal prefetches e.g. if single memory
1521 location is accessed several times in a single iteration of
1522 the loop. */
1523 if (adist < L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION)
1524 continue;
1525
1526 if (adist < dist)
1527 dist = adist;
1528 }
1529 }
1530
1531 if (ref->reuse_distance > dist)
1532 ref->reuse_distance = dist;
1533 if (refb->reuse_distance > dist)
1534 refb->reuse_distance = dist;
1535 }
1536
1537 free_dependence_relations (dependences);
1538 free_data_refs (datarefs);
1539 free (loop_data_size);
1540
1541 if (dump_file && (dump_flags & TDF_DETAILS))
1542 {
1543 fprintf (dump_file, "Reuse distances:\n");
1544 for (gr = refs; gr; gr = gr->next)
1545 for (ref = gr->refs; ref; ref = ref->next)
1546 fprintf (dump_file, " ref %p distance %u\n",
1547 (void *) ref, ref->reuse_distance);
1548 }
1549 }
1550
1551 /* Do a cost-benefit analysis to determine if prefetching is profitable
1552 for the current loop given the following parameters:
1553 AHEAD: the iteration ahead distance,
1554 EST_NITER: the estimated trip count,
1555 NINSNS: estimated number of instructions in the loop,
1556 PREFETCH_COUNT: an estimate of the number of prefetches
1557 MEM_REF_COUNT: total number of memory references in the loop. */
1558
1559 static bool
1560 is_loop_prefetching_profitable (unsigned ahead, HOST_WIDE_INT est_niter,
1561 unsigned ninsns, unsigned prefetch_count,
1562 unsigned mem_ref_count, unsigned unroll_factor)
1563 {
1564 int insn_to_mem_ratio, insn_to_prefetch_ratio;
1565
1566 if (mem_ref_count == 0)
1567 return false;
1568
1569 /* Prefetching improves performance by overlapping cache missing
1570 memory accesses with CPU operations. If the loop does not have
1571 enough CPU operations to overlap with memory operations, prefetching
1572 won't give a significant benefit. One approximate way of checking
1573 this is to require the ratio of instructions to memory references to
1574 be above a certain limit. This approximation works well in practice.
1575 TODO: Implement a more precise computation by estimating the time
1576 for each CPU or memory op in the loop. Time estimates for memory ops
1577 should account for cache misses. */
1578 insn_to_mem_ratio = ninsns / mem_ref_count;
1579
1580 if (insn_to_mem_ratio < PREFETCH_MIN_INSN_TO_MEM_RATIO)
1581 {
1582 if (dump_file && (dump_flags & TDF_DETAILS))
1583 fprintf (dump_file,
1584 "Not prefetching -- instruction to memory reference ratio (%d) too small\n",
1585 insn_to_mem_ratio);
1586 return false;
1587 }
1588
1589 /* Profitability of prefetching is highly dependent on the trip count.
1590 For a given AHEAD distance, the first AHEAD iterations do not benefit
1591 from prefetching, and the last AHEAD iterations execute useless
1592 prefetches. So, if the trip count is not large enough relative to AHEAD,
1593 prefetching may cause serious performance degradation. To avoid this
1594 problem when the trip count is not known at compile time, we
1595 conservatively skip loops with high prefetching costs. For now, only
1596 the I-cache cost is considered. The relative I-cache cost is estimated
1597 by taking the ratio between the number of prefetches and the total
1598 number of instructions. Since we are using integer arithmetic, we
1599 compute the reciprocal of this ratio.
1600 (unroll_factor * ninsns) is used to estimate the number of instructions in
1601 the unrolled loop. This implementation is a bit simplistic -- the number
1602 of issued prefetch instructions is also affected by unrolling. So,
1603 prefetch_mod and the unroll factor should be taken into account when
1604 determining prefetch_count. Also, the number of insns of the unrolled
1605 loop will usually be significantly smaller than the number of insns of the
1606 original loop * unroll_factor (at least the induction variable increases
1607 and the exit branches will get eliminated), so it might be better to use
1608 tree_estimate_loop_size + estimated_unrolled_size. */
1609 if (est_niter < 0)
1610 {
1611 insn_to_prefetch_ratio = (unroll_factor * ninsns) / prefetch_count;
1612 return insn_to_prefetch_ratio >= MIN_INSN_TO_PREFETCH_RATIO;
1613 }
1614
1615 if (est_niter < (HOST_WIDE_INT) (TRIP_COUNT_TO_AHEAD_RATIO * ahead))
1616 {
1617 if (dump_file && (dump_flags & TDF_DETAILS))
1618 fprintf (dump_file,
1619 "Not prefetching -- loop estimated to roll only %d times\n",
1620 (int) est_niter);
1621 return false;
1622 }
1623 return true;
1624 }
1625
1626
1627 /* Issue prefetch instructions for array references in LOOP. Returns
1628 true if the LOOP was unrolled. */
1629
1630 static bool
1631 loop_prefetch_arrays (struct loop *loop)
1632 {
1633 struct mem_ref_group *refs;
1634 unsigned ahead, ninsns, time, unroll_factor;
1635 HOST_WIDE_INT est_niter;
1636 struct tree_niter_desc desc;
1637 bool unrolled = false, no_other_refs;
1638 unsigned prefetch_count;
1639 unsigned mem_ref_count;
1640
1641 if (optimize_loop_nest_for_size_p (loop))
1642 {
1643 if (dump_file && (dump_flags & TDF_DETAILS))
1644 fprintf (dump_file, " ignored (cold area)\n");
1645 return false;
1646 }
1647
1648 /* Step 1: gather the memory references. */
1649 refs = gather_memory_references (loop, &no_other_refs, &mem_ref_count);
1650
1651 /* Step 2: estimate the reuse effects. */
1652 prune_by_reuse (refs);
1653
1654 prefetch_count = estimate_prefetch_count (refs);
1655 if (prefetch_count == 0)
1656 goto fail;
1657
1658 determine_loop_nest_reuse (loop, refs, no_other_refs);
1659
1660 /* Step 3: determine the ahead and unroll factor. */
1661
1662 /* FIXME: the time should be weighted by the probabilities of the blocks in
1663 the loop body. */
1664 time = tree_num_loop_insns (loop, &eni_time_weights);
1665 ahead = (PREFETCH_LATENCY + time - 1) / time;
1666 est_niter = estimated_loop_iterations_int (loop, false);
1667
1668 ninsns = tree_num_loop_insns (loop, &eni_size_weights);
1669 unroll_factor = determine_unroll_factor (loop, refs, ninsns, &desc,
1670 est_niter);
1671 if (dump_file && (dump_flags & TDF_DETAILS))
1672 fprintf (dump_file, "Ahead %d, unroll factor %d, trip count "
1673 HOST_WIDE_INT_PRINT_DEC "\n"
1674 "insn count %d, mem ref count %d, prefetch count %d\n",
1675 ahead, unroll_factor, est_niter,
1676 ninsns, mem_ref_count, prefetch_count);
1677
1678 if (!is_loop_prefetching_profitable (ahead, est_niter, ninsns, prefetch_count,
1679 mem_ref_count, unroll_factor))
1680 goto fail;
1681
1682 mark_nontemporal_stores (loop, refs);
1683
1684 /* Step 4: what to prefetch? */
1685 if (!schedule_prefetches (refs, unroll_factor, ahead))
1686 goto fail;
1687
1688 /* Step 5: unroll the loop. TODO -- peeling of first and last few
1689 iterations so that we do not issue superfluous prefetches. */
1690 if (unroll_factor != 1)
1691 {
1692 tree_unroll_loop (loop, unroll_factor,
1693 single_dom_exit (loop), &desc);
1694 unrolled = true;
1695 }
1696
1697 /* Step 6: issue the prefetches. */
1698 issue_prefetches (refs, unroll_factor, ahead);
1699
1700 fail:
1701 release_mem_refs (refs);
1702 return unrolled;
1703 }
1704
1705 /* Issue prefetch instructions for array references in loops. */
1706
1707 unsigned int
1708 tree_ssa_prefetch_arrays (void)
1709 {
1710 loop_iterator li;
1711 struct loop *loop;
1712 bool unrolled = false;
1713 int todo_flags = 0;
1714
1715 if (!HAVE_prefetch
1716 /* It is possible to ask compiler for say -mtune=i486 -march=pentium4.
1717 -mtune=i486 causes us having PREFETCH_BLOCK 0, since this is part
1718 of processor costs and i486 does not have prefetch, but
1719 -march=pentium4 causes HAVE_prefetch to be true. Ugh. */
1720 || PREFETCH_BLOCK == 0)
1721 return 0;
1722
1723 if (dump_file && (dump_flags & TDF_DETAILS))
1724 {
1725 fprintf (dump_file, "Prefetching parameters:\n");
1726 fprintf (dump_file, " simultaneous prefetches: %d\n",
1727 SIMULTANEOUS_PREFETCHES);
1728 fprintf (dump_file, " prefetch latency: %d\n", PREFETCH_LATENCY);
1729 fprintf (dump_file, " prefetch block size: %d\n", PREFETCH_BLOCK);
1730 fprintf (dump_file, " L1 cache size: %d lines, %d kB\n",
1731 L1_CACHE_SIZE_BYTES / L1_CACHE_LINE_SIZE, L1_CACHE_SIZE);
1732 fprintf (dump_file, " L1 cache line size: %d\n", L1_CACHE_LINE_SIZE);
1733 fprintf (dump_file, " L2 cache size: %d kB\n", L2_CACHE_SIZE);
1734 fprintf (dump_file, " min insn-to-prefetch ratio: %d \n",
1735 MIN_INSN_TO_PREFETCH_RATIO);
1736 fprintf (dump_file, " min insn-to-mem ratio: %d \n",
1737 PREFETCH_MIN_INSN_TO_MEM_RATIO);
1738 fprintf (dump_file, "\n");
1739 }
1740
1741 initialize_original_copy_tables ();
1742
1743 if (!built_in_decls[BUILT_IN_PREFETCH])
1744 {
1745 tree type = build_function_type (void_type_node,
1746 tree_cons (NULL_TREE,
1747 const_ptr_type_node,
1748 NULL_TREE));
1749 tree decl = add_builtin_function ("__builtin_prefetch", type,
1750 BUILT_IN_PREFETCH, BUILT_IN_NORMAL,
1751 NULL, NULL_TREE);
1752 DECL_IS_NOVOPS (decl) = true;
1753 built_in_decls[BUILT_IN_PREFETCH] = decl;
1754 }
1755
1756 /* We assume that size of cache line is a power of two, so verify this
1757 here. */
1758 gcc_assert ((PREFETCH_BLOCK & (PREFETCH_BLOCK - 1)) == 0);
1759
1760 FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
1761 {
1762 if (dump_file && (dump_flags & TDF_DETAILS))
1763 fprintf (dump_file, "Processing loop %d:\n", loop->num);
1764
1765 unrolled |= loop_prefetch_arrays (loop);
1766
1767 if (dump_file && (dump_flags & TDF_DETAILS))
1768 fprintf (dump_file, "\n\n");
1769 }
1770
1771 if (unrolled)
1772 {
1773 scev_reset ();
1774 todo_flags |= TODO_cleanup_cfg;
1775 }
1776
1777 free_original_copy_tables ();
1778 return todo_flags;
1779 }