tree-ssa-loop-prefetch.c: Fix comment at head of file.
[gcc.git] / gcc / tree-ssa-loop-prefetch.c
1 /* Array prefetching.
2 Copyright (C) 2005, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "tree.h"
25 #include "tm_p.h"
26 #include "basic-block.h"
27 #include "output.h"
28 #include "tree-pretty-print.h"
29 #include "tree-flow.h"
30 #include "tree-dump.h"
31 #include "timevar.h"
32 #include "cfgloop.h"
33 #include "tree-pass.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "hashtab.h"
37 #include "tree-chrec.h"
38 #include "tree-scalar-evolution.h"
39 #include "diagnostic-core.h"
40 #include "toplev.h"
41 #include "params.h"
42 #include "langhooks.h"
43 #include "tree-inline.h"
44 #include "tree-data-ref.h"
45
46
47 /* FIXME: Needed for optabs, but this should all be moved to a TBD interface
48 between the GIMPLE and RTL worlds. */
49 #include "expr.h"
50 #include "optabs.h"
51
52 /* This pass inserts prefetch instructions to optimize cache usage during
53 accesses to arrays in loops. It processes loops sequentially and:
54
55 1) Gathers all memory references in the single loop.
56 2) For each of the references it decides when it is profitable to prefetch
57 it. To do it, we evaluate the reuse among the accesses, and determines
58 two values: PREFETCH_BEFORE (meaning that it only makes sense to do
59 prefetching in the first PREFETCH_BEFORE iterations of the loop) and
60 PREFETCH_MOD (meaning that it only makes sense to prefetch in the
61 iterations of the loop that are zero modulo PREFETCH_MOD). For example
62 (assuming cache line size is 64 bytes, char has size 1 byte and there
63 is no hardware sequential prefetch):
64
65 char *a;
66 for (i = 0; i < max; i++)
67 {
68 a[255] = ...; (0)
69 a[i] = ...; (1)
70 a[i + 64] = ...; (2)
71 a[16*i] = ...; (3)
72 a[187*i] = ...; (4)
73 a[187*i + 50] = ...; (5)
74 }
75
76 (0) obviously has PREFETCH_BEFORE 1
77 (1) has PREFETCH_BEFORE 64, since (2) accesses the same memory
78 location 64 iterations before it, and PREFETCH_MOD 64 (since
79 it hits the same cache line otherwise).
80 (2) has PREFETCH_MOD 64
81 (3) has PREFETCH_MOD 4
82 (4) has PREFETCH_MOD 1. We do not set PREFETCH_BEFORE here, since
83 the cache line accessed by (5) is the same with probability only
84 7/32.
85 (5) has PREFETCH_MOD 1 as well.
86
87 Additionally, we use data dependence analysis to determine for each
88 reference the distance till the first reuse; this information is used
89 to determine the temporality of the issued prefetch instruction.
90
91 3) We determine how much ahead we need to prefetch. The number of
92 iterations needed is time to fetch / time spent in one iteration of
93 the loop. The problem is that we do not know either of these values,
94 so we just make a heuristic guess based on a magic (possibly)
95 target-specific constant and size of the loop.
96
97 4) Determine which of the references we prefetch. We take into account
98 that there is a maximum number of simultaneous prefetches (provided
99 by machine description). We prefetch as many prefetches as possible
100 while still within this bound (starting with those with lowest
101 prefetch_mod, since they are responsible for most of the cache
102 misses).
103
104 5) We unroll and peel loops so that we are able to satisfy PREFETCH_MOD
105 and PREFETCH_BEFORE requirements (within some bounds), and to avoid
106 prefetching nonaccessed memory.
107 TODO -- actually implement peeling.
108
109 6) We actually emit the prefetch instructions. ??? Perhaps emit the
110 prefetch instructions with guards in cases where 5) was not sufficient
111 to satisfy the constraints?
112
113 A cost model is implemented to determine whether or not prefetching is
114 profitable for a given loop. The cost model has three heuristics:
115
116 1. Function trip_count_to_ahead_ratio_too_small_p implements a
117 heuristic that determines whether or not the loop has too few
118 iterations (compared to ahead). Prefetching is not likely to be
119 beneficial if the trip count to ahead ratio is below a certain
120 minimum.
121
122 2. Function mem_ref_count_reasonable_p implements a heuristic that
123 determines whether the given loop has enough CPU ops that can be
124 overlapped with cache missing memory ops. If not, the loop
125 won't benefit from prefetching. In the implementation,
126 prefetching is not considered beneficial if the ratio between
127 the instruction count and the mem ref count is below a certain
128 minimum.
129
130 3. Function insn_to_prefetch_ratio_too_small_p implements a
131 heuristic that disables prefetching in a loop if the prefetching
132 cost is above a certain limit. The relative prefetching cost is
133 estimated by taking the ratio between the prefetch count and the
134 total intruction count (this models the I-cache cost).
135
136 The limits used in these heuristics are defined as parameters with
137 reasonable default values. Machine-specific default values will be
138 added later.
139
140 Some other TODO:
141 -- write and use more general reuse analysis (that could be also used
142 in other cache aimed loop optimizations)
143 -- make it behave sanely together with the prefetches given by user
144 (now we just ignore them; at the very least we should avoid
145 optimizing loops in that user put his own prefetches)
146 -- we assume cache line size alignment of arrays; this could be
147 improved. */
148
149 /* Magic constants follow. These should be replaced by machine specific
150 numbers. */
151
152 /* True if write can be prefetched by a read prefetch. */
153
154 #ifndef WRITE_CAN_USE_READ_PREFETCH
155 #define WRITE_CAN_USE_READ_PREFETCH 1
156 #endif
157
158 /* True if read can be prefetched by a write prefetch. */
159
160 #ifndef READ_CAN_USE_WRITE_PREFETCH
161 #define READ_CAN_USE_WRITE_PREFETCH 0
162 #endif
163
164 /* The size of the block loaded by a single prefetch. Usually, this is
165 the same as cache line size (at the moment, we only consider one level
166 of cache hierarchy). */
167
168 #ifndef PREFETCH_BLOCK
169 #define PREFETCH_BLOCK L1_CACHE_LINE_SIZE
170 #endif
171
172 /* Do we have a forward hardware sequential prefetching? */
173
174 #ifndef HAVE_FORWARD_PREFETCH
175 #define HAVE_FORWARD_PREFETCH 0
176 #endif
177
178 /* Do we have a backward hardware sequential prefetching? */
179
180 #ifndef HAVE_BACKWARD_PREFETCH
181 #define HAVE_BACKWARD_PREFETCH 0
182 #endif
183
184 /* In some cases we are only able to determine that there is a certain
185 probability that the two accesses hit the same cache line. In this
186 case, we issue the prefetches for both of them if this probability
187 is less then (1000 - ACCEPTABLE_MISS_RATE) per thousand. */
188
189 #ifndef ACCEPTABLE_MISS_RATE
190 #define ACCEPTABLE_MISS_RATE 50
191 #endif
192
193 #ifndef HAVE_prefetch
194 #define HAVE_prefetch 0
195 #endif
196
197 #define L1_CACHE_SIZE_BYTES ((unsigned) (L1_CACHE_SIZE * 1024))
198 #define L2_CACHE_SIZE_BYTES ((unsigned) (L2_CACHE_SIZE * 1024))
199
200 /* We consider a memory access nontemporal if it is not reused sooner than
201 after L2_CACHE_SIZE_BYTES of memory are accessed. However, we ignore
202 accesses closer than L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
203 so that we use nontemporal prefetches e.g. if single memory location
204 is accessed several times in a single iteration of the loop. */
205 #define NONTEMPORAL_FRACTION 16
206
207 /* In case we have to emit a memory fence instruction after the loop that
208 uses nontemporal stores, this defines the builtin to use. */
209
210 #ifndef FENCE_FOLLOWING_MOVNT
211 #define FENCE_FOLLOWING_MOVNT NULL_TREE
212 #endif
213
214 /* It is not profitable to prefetch when the trip count is not at
215 least TRIP_COUNT_TO_AHEAD_RATIO times the prefetch ahead distance.
216 For example, in a loop with a prefetch ahead distance of 10,
217 supposing that TRIP_COUNT_TO_AHEAD_RATIO is equal to 4, it is
218 profitable to prefetch when the trip count is greater or equal to
219 40. In that case, 30 out of the 40 iterations will benefit from
220 prefetching. */
221
222 #ifndef TRIP_COUNT_TO_AHEAD_RATIO
223 #define TRIP_COUNT_TO_AHEAD_RATIO 4
224 #endif
225
226 /* The group of references between that reuse may occur. */
227
228 struct mem_ref_group
229 {
230 tree base; /* Base of the reference. */
231 tree step; /* Step of the reference. */
232 struct mem_ref *refs; /* References in the group. */
233 struct mem_ref_group *next; /* Next group of references. */
234 };
235
236 /* Assigned to PREFETCH_BEFORE when all iterations are to be prefetched. */
237
238 #define PREFETCH_ALL (~(unsigned HOST_WIDE_INT) 0)
239
240 /* Do not generate a prefetch if the unroll factor is significantly less
241 than what is required by the prefetch. This is to avoid redundant
242 prefetches. For example, when prefetch_mod is 16 and unroll_factor is
243 2, prefetching requires unrolling the loop 16 times, but
244 the loop is actually unrolled twice. In this case (ratio = 8),
245 prefetching is not likely to be beneficial. */
246
247 #ifndef PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO
248 #define PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO 4
249 #endif
250
251 /* Some of the prefetch computations have quadratic complexity. We want to
252 avoid huge compile times and, therefore, want to limit the amount of
253 memory references per loop where we consider prefetching. */
254
255 #ifndef PREFETCH_MAX_MEM_REFS_PER_LOOP
256 #define PREFETCH_MAX_MEM_REFS_PER_LOOP 200
257 #endif
258
259 /* The memory reference. */
260
261 struct mem_ref
262 {
263 gimple stmt; /* Statement in that the reference appears. */
264 tree mem; /* The reference. */
265 HOST_WIDE_INT delta; /* Constant offset of the reference. */
266 struct mem_ref_group *group; /* The group of references it belongs to. */
267 unsigned HOST_WIDE_INT prefetch_mod;
268 /* Prefetch only each PREFETCH_MOD-th
269 iteration. */
270 unsigned HOST_WIDE_INT prefetch_before;
271 /* Prefetch only first PREFETCH_BEFORE
272 iterations. */
273 unsigned reuse_distance; /* The amount of data accessed before the first
274 reuse of this value. */
275 struct mem_ref *next; /* The next reference in the group. */
276 unsigned write_p : 1; /* Is it a write? */
277 unsigned independent_p : 1; /* True if the reference is independent on
278 all other references inside the loop. */
279 unsigned issue_prefetch_p : 1; /* Should we really issue the prefetch? */
280 unsigned storent_p : 1; /* True if we changed the store to a
281 nontemporal one. */
282 };
283
284 /* Dumps information about reference REF to FILE. */
285
286 static void
287 dump_mem_ref (FILE *file, struct mem_ref *ref)
288 {
289 fprintf (file, "Reference %p:\n", (void *) ref);
290
291 fprintf (file, " group %p (base ", (void *) ref->group);
292 print_generic_expr (file, ref->group->base, TDF_SLIM);
293 fprintf (file, ", step ");
294 if (cst_and_fits_in_hwi (ref->group->step))
295 fprintf (file, HOST_WIDE_INT_PRINT_DEC, int_cst_value (ref->group->step));
296 else
297 print_generic_expr (file, ref->group->step, TDF_TREE);
298 fprintf (file, ")\n");
299
300 fprintf (file, " delta ");
301 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ref->delta);
302 fprintf (file, "\n");
303
304 fprintf (file, " %s\n", ref->write_p ? "write" : "read");
305
306 fprintf (file, "\n");
307 }
308
309 /* Finds a group with BASE and STEP in GROUPS, or creates one if it does not
310 exist. */
311
312 static struct mem_ref_group *
313 find_or_create_group (struct mem_ref_group **groups, tree base, tree step)
314 {
315 struct mem_ref_group *group;
316
317 for (; *groups; groups = &(*groups)->next)
318 {
319 if (operand_equal_p ((*groups)->step, step, 0)
320 && operand_equal_p ((*groups)->base, base, 0))
321 return *groups;
322
323 /* If step is an integer constant, keep the list of groups sorted
324 by decreasing step. */
325 if (cst_and_fits_in_hwi ((*groups)->step) && cst_and_fits_in_hwi (step)
326 && int_cst_value ((*groups)->step) < int_cst_value (step))
327 break;
328 }
329
330 group = XNEW (struct mem_ref_group);
331 group->base = base;
332 group->step = step;
333 group->refs = NULL;
334 group->next = *groups;
335 *groups = group;
336
337 return group;
338 }
339
340 /* Records a memory reference MEM in GROUP with offset DELTA and write status
341 WRITE_P. The reference occurs in statement STMT. */
342
343 static void
344 record_ref (struct mem_ref_group *group, gimple stmt, tree mem,
345 HOST_WIDE_INT delta, bool write_p)
346 {
347 struct mem_ref **aref;
348
349 /* Do not record the same address twice. */
350 for (aref = &group->refs; *aref; aref = &(*aref)->next)
351 {
352 /* It does not have to be possible for write reference to reuse the read
353 prefetch, or vice versa. */
354 if (!WRITE_CAN_USE_READ_PREFETCH
355 && write_p
356 && !(*aref)->write_p)
357 continue;
358 if (!READ_CAN_USE_WRITE_PREFETCH
359 && !write_p
360 && (*aref)->write_p)
361 continue;
362
363 if ((*aref)->delta == delta)
364 return;
365 }
366
367 (*aref) = XNEW (struct mem_ref);
368 (*aref)->stmt = stmt;
369 (*aref)->mem = mem;
370 (*aref)->delta = delta;
371 (*aref)->write_p = write_p;
372 (*aref)->prefetch_before = PREFETCH_ALL;
373 (*aref)->prefetch_mod = 1;
374 (*aref)->reuse_distance = 0;
375 (*aref)->issue_prefetch_p = false;
376 (*aref)->group = group;
377 (*aref)->next = NULL;
378 (*aref)->independent_p = false;
379 (*aref)->storent_p = false;
380
381 if (dump_file && (dump_flags & TDF_DETAILS))
382 dump_mem_ref (dump_file, *aref);
383 }
384
385 /* Release memory references in GROUPS. */
386
387 static void
388 release_mem_refs (struct mem_ref_group *groups)
389 {
390 struct mem_ref_group *next_g;
391 struct mem_ref *ref, *next_r;
392
393 for (; groups; groups = next_g)
394 {
395 next_g = groups->next;
396 for (ref = groups->refs; ref; ref = next_r)
397 {
398 next_r = ref->next;
399 free (ref);
400 }
401 free (groups);
402 }
403 }
404
405 /* A structure used to pass arguments to idx_analyze_ref. */
406
407 struct ar_data
408 {
409 struct loop *loop; /* Loop of the reference. */
410 gimple stmt; /* Statement of the reference. */
411 tree *step; /* Step of the memory reference. */
412 HOST_WIDE_INT *delta; /* Offset of the memory reference. */
413 };
414
415 /* Analyzes a single INDEX of a memory reference to obtain information
416 described at analyze_ref. Callback for for_each_index. */
417
418 static bool
419 idx_analyze_ref (tree base, tree *index, void *data)
420 {
421 struct ar_data *ar_data = (struct ar_data *) data;
422 tree ibase, step, stepsize;
423 HOST_WIDE_INT idelta = 0, imult = 1;
424 affine_iv iv;
425
426 if (TREE_CODE (base) == MISALIGNED_INDIRECT_REF)
427 return false;
428
429 if (!simple_iv (ar_data->loop, loop_containing_stmt (ar_data->stmt),
430 *index, &iv, true))
431 return false;
432 ibase = iv.base;
433 step = iv.step;
434
435 if (TREE_CODE (ibase) == POINTER_PLUS_EXPR
436 && cst_and_fits_in_hwi (TREE_OPERAND (ibase, 1)))
437 {
438 idelta = int_cst_value (TREE_OPERAND (ibase, 1));
439 ibase = TREE_OPERAND (ibase, 0);
440 }
441 if (cst_and_fits_in_hwi (ibase))
442 {
443 idelta += int_cst_value (ibase);
444 ibase = build_int_cst (TREE_TYPE (ibase), 0);
445 }
446
447 if (TREE_CODE (base) == ARRAY_REF)
448 {
449 stepsize = array_ref_element_size (base);
450 if (!cst_and_fits_in_hwi (stepsize))
451 return false;
452 imult = int_cst_value (stepsize);
453 step = fold_build2 (MULT_EXPR, sizetype,
454 fold_convert (sizetype, step),
455 fold_convert (sizetype, stepsize));
456 idelta *= imult;
457 }
458
459 if (*ar_data->step == NULL_TREE)
460 *ar_data->step = step;
461 else
462 *ar_data->step = fold_build2 (PLUS_EXPR, sizetype,
463 fold_convert (sizetype, *ar_data->step),
464 fold_convert (sizetype, step));
465 *ar_data->delta += idelta;
466 *index = ibase;
467
468 return true;
469 }
470
471 /* Tries to express REF_P in shape &BASE + STEP * iter + DELTA, where DELTA and
472 STEP are integer constants and iter is number of iterations of LOOP. The
473 reference occurs in statement STMT. Strips nonaddressable component
474 references from REF_P. */
475
476 static bool
477 analyze_ref (struct loop *loop, tree *ref_p, tree *base,
478 tree *step, HOST_WIDE_INT *delta,
479 gimple stmt)
480 {
481 struct ar_data ar_data;
482 tree off;
483 HOST_WIDE_INT bit_offset;
484 tree ref = *ref_p;
485
486 *step = NULL_TREE;
487 *delta = 0;
488
489 /* First strip off the component references. Ignore bitfields.
490 Also strip off the real and imagine parts of a complex, so that
491 they can have the same base. */
492 if (TREE_CODE (ref) == REALPART_EXPR
493 || TREE_CODE (ref) == IMAGPART_EXPR
494 || (TREE_CODE (ref) == COMPONENT_REF
495 && DECL_NONADDRESSABLE_P (TREE_OPERAND (ref, 1))))
496 {
497 if (TREE_CODE (ref) == IMAGPART_EXPR)
498 *delta += int_size_in_bytes (TREE_TYPE (ref));
499 ref = TREE_OPERAND (ref, 0);
500 }
501
502 *ref_p = ref;
503
504 for (; TREE_CODE (ref) == COMPONENT_REF; ref = TREE_OPERAND (ref, 0))
505 {
506 off = DECL_FIELD_BIT_OFFSET (TREE_OPERAND (ref, 1));
507 bit_offset = TREE_INT_CST_LOW (off);
508 gcc_assert (bit_offset % BITS_PER_UNIT == 0);
509
510 *delta += bit_offset / BITS_PER_UNIT;
511 }
512
513 *base = unshare_expr (ref);
514 ar_data.loop = loop;
515 ar_data.stmt = stmt;
516 ar_data.step = step;
517 ar_data.delta = delta;
518 return for_each_index (base, idx_analyze_ref, &ar_data);
519 }
520
521 /* Record a memory reference REF to the list REFS. The reference occurs in
522 LOOP in statement STMT and it is write if WRITE_P. Returns true if the
523 reference was recorded, false otherwise. */
524
525 static bool
526 gather_memory_references_ref (struct loop *loop, struct mem_ref_group **refs,
527 tree ref, bool write_p, gimple stmt)
528 {
529 tree base, step;
530 HOST_WIDE_INT delta;
531 struct mem_ref_group *agrp;
532
533 if (get_base_address (ref) == NULL)
534 return false;
535
536 if (!analyze_ref (loop, &ref, &base, &step, &delta, stmt))
537 return false;
538 /* If analyze_ref fails the default is a NULL_TREE. We can stop here. */
539 if (step == NULL_TREE)
540 return false;
541
542 /* Stop if the address of BASE could not be taken. */
543 if (may_be_nonaddressable_p (base))
544 return false;
545
546 /* Limit non-constant step prefetching only to the innermost loops. */
547 if (!cst_and_fits_in_hwi (step) && loop->inner != NULL)
548 return false;
549
550 /* Now we know that REF = &BASE + STEP * iter + DELTA, where DELTA and STEP
551 are integer constants. */
552 agrp = find_or_create_group (refs, base, step);
553 record_ref (agrp, stmt, ref, delta, write_p);
554
555 return true;
556 }
557
558 /* Record the suitable memory references in LOOP. NO_OTHER_REFS is set to
559 true if there are no other memory references inside the loop. */
560
561 static struct mem_ref_group *
562 gather_memory_references (struct loop *loop, bool *no_other_refs, unsigned *ref_count)
563 {
564 basic_block *body = get_loop_body_in_dom_order (loop);
565 basic_block bb;
566 unsigned i;
567 gimple_stmt_iterator bsi;
568 gimple stmt;
569 tree lhs, rhs;
570 struct mem_ref_group *refs = NULL;
571
572 *no_other_refs = true;
573 *ref_count = 0;
574
575 /* Scan the loop body in order, so that the former references precede the
576 later ones. */
577 for (i = 0; i < loop->num_nodes; i++)
578 {
579 bb = body[i];
580 if (bb->loop_father != loop)
581 continue;
582
583 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
584 {
585 stmt = gsi_stmt (bsi);
586
587 if (gimple_code (stmt) != GIMPLE_ASSIGN)
588 {
589 if (gimple_vuse (stmt)
590 || (is_gimple_call (stmt)
591 && !(gimple_call_flags (stmt) & ECF_CONST)))
592 *no_other_refs = false;
593 continue;
594 }
595
596 lhs = gimple_assign_lhs (stmt);
597 rhs = gimple_assign_rhs1 (stmt);
598
599 if (REFERENCE_CLASS_P (rhs))
600 {
601 *no_other_refs &= gather_memory_references_ref (loop, &refs,
602 rhs, false, stmt);
603 *ref_count += 1;
604 }
605 if (REFERENCE_CLASS_P (lhs))
606 {
607 *no_other_refs &= gather_memory_references_ref (loop, &refs,
608 lhs, true, stmt);
609 *ref_count += 1;
610 }
611 }
612 }
613 free (body);
614
615 return refs;
616 }
617
618 /* Prune the prefetch candidate REF using the self-reuse. */
619
620 static void
621 prune_ref_by_self_reuse (struct mem_ref *ref)
622 {
623 HOST_WIDE_INT step;
624 bool backward;
625
626 /* If the step size is non constant, we cannot calculate prefetch_mod. */
627 if (!cst_and_fits_in_hwi (ref->group->step))
628 return;
629
630 step = int_cst_value (ref->group->step);
631
632 backward = step < 0;
633
634 if (step == 0)
635 {
636 /* Prefetch references to invariant address just once. */
637 ref->prefetch_before = 1;
638 return;
639 }
640
641 if (backward)
642 step = -step;
643
644 if (step > PREFETCH_BLOCK)
645 return;
646
647 if ((backward && HAVE_BACKWARD_PREFETCH)
648 || (!backward && HAVE_FORWARD_PREFETCH))
649 {
650 ref->prefetch_before = 1;
651 return;
652 }
653
654 ref->prefetch_mod = PREFETCH_BLOCK / step;
655 }
656
657 /* Divides X by BY, rounding down. */
658
659 static HOST_WIDE_INT
660 ddown (HOST_WIDE_INT x, unsigned HOST_WIDE_INT by)
661 {
662 gcc_assert (by > 0);
663
664 if (x >= 0)
665 return x / by;
666 else
667 return (x + by - 1) / by;
668 }
669
670 /* Given a CACHE_LINE_SIZE and two inductive memory references
671 with a common STEP greater than CACHE_LINE_SIZE and an address
672 difference DELTA, compute the probability that they will fall
673 in different cache lines. Return true if the computed miss rate
674 is not greater than the ACCEPTABLE_MISS_RATE. DISTINCT_ITERS is the
675 number of distinct iterations after which the pattern repeats itself.
676 ALIGN_UNIT is the unit of alignment in bytes. */
677
678 static bool
679 is_miss_rate_acceptable (unsigned HOST_WIDE_INT cache_line_size,
680 HOST_WIDE_INT step, HOST_WIDE_INT delta,
681 unsigned HOST_WIDE_INT distinct_iters,
682 int align_unit)
683 {
684 unsigned align, iter;
685 int total_positions, miss_positions, max_allowed_miss_positions;
686 int address1, address2, cache_line1, cache_line2;
687
688 /* It always misses if delta is greater than or equal to the cache
689 line size. */
690 if (delta >= (HOST_WIDE_INT) cache_line_size)
691 return false;
692
693 miss_positions = 0;
694 total_positions = (cache_line_size / align_unit) * distinct_iters;
695 max_allowed_miss_positions = (ACCEPTABLE_MISS_RATE * total_positions) / 1000;
696
697 /* Iterate through all possible alignments of the first
698 memory reference within its cache line. */
699 for (align = 0; align < cache_line_size; align += align_unit)
700
701 /* Iterate through all distinct iterations. */
702 for (iter = 0; iter < distinct_iters; iter++)
703 {
704 address1 = align + step * iter;
705 address2 = address1 + delta;
706 cache_line1 = address1 / cache_line_size;
707 cache_line2 = address2 / cache_line_size;
708 if (cache_line1 != cache_line2)
709 {
710 miss_positions += 1;
711 if (miss_positions > max_allowed_miss_positions)
712 return false;
713 }
714 }
715 return true;
716 }
717
718 /* Prune the prefetch candidate REF using the reuse with BY.
719 If BY_IS_BEFORE is true, BY is before REF in the loop. */
720
721 static void
722 prune_ref_by_group_reuse (struct mem_ref *ref, struct mem_ref *by,
723 bool by_is_before)
724 {
725 HOST_WIDE_INT step;
726 bool backward;
727 HOST_WIDE_INT delta_r = ref->delta, delta_b = by->delta;
728 HOST_WIDE_INT delta = delta_b - delta_r;
729 HOST_WIDE_INT hit_from;
730 unsigned HOST_WIDE_INT prefetch_before, prefetch_block;
731 HOST_WIDE_INT reduced_step;
732 unsigned HOST_WIDE_INT reduced_prefetch_block;
733 tree ref_type;
734 int align_unit;
735
736 /* If the step is non constant we cannot calculate prefetch_before. */
737 if (!cst_and_fits_in_hwi (ref->group->step)) {
738 return;
739 }
740
741 step = int_cst_value (ref->group->step);
742
743 backward = step < 0;
744
745
746 if (delta == 0)
747 {
748 /* If the references has the same address, only prefetch the
749 former. */
750 if (by_is_before)
751 ref->prefetch_before = 0;
752
753 return;
754 }
755
756 if (!step)
757 {
758 /* If the reference addresses are invariant and fall into the
759 same cache line, prefetch just the first one. */
760 if (!by_is_before)
761 return;
762
763 if (ddown (ref->delta, PREFETCH_BLOCK)
764 != ddown (by->delta, PREFETCH_BLOCK))
765 return;
766
767 ref->prefetch_before = 0;
768 return;
769 }
770
771 /* Only prune the reference that is behind in the array. */
772 if (backward)
773 {
774 if (delta > 0)
775 return;
776
777 /* Transform the data so that we may assume that the accesses
778 are forward. */
779 delta = - delta;
780 step = -step;
781 delta_r = PREFETCH_BLOCK - 1 - delta_r;
782 delta_b = PREFETCH_BLOCK - 1 - delta_b;
783 }
784 else
785 {
786 if (delta < 0)
787 return;
788 }
789
790 /* Check whether the two references are likely to hit the same cache
791 line, and how distant the iterations in that it occurs are from
792 each other. */
793
794 if (step <= PREFETCH_BLOCK)
795 {
796 /* The accesses are sure to meet. Let us check when. */
797 hit_from = ddown (delta_b, PREFETCH_BLOCK) * PREFETCH_BLOCK;
798 prefetch_before = (hit_from - delta_r + step - 1) / step;
799
800 /* Do not reduce prefetch_before if we meet beyond cache size. */
801 if (prefetch_before > (unsigned) abs (L2_CACHE_SIZE_BYTES / step))
802 prefetch_before = PREFETCH_ALL;
803 if (prefetch_before < ref->prefetch_before)
804 ref->prefetch_before = prefetch_before;
805
806 return;
807 }
808
809 /* A more complicated case with step > prefetch_block. First reduce
810 the ratio between the step and the cache line size to its simplest
811 terms. The resulting denominator will then represent the number of
812 distinct iterations after which each address will go back to its
813 initial location within the cache line. This computation assumes
814 that PREFETCH_BLOCK is a power of two. */
815 prefetch_block = PREFETCH_BLOCK;
816 reduced_prefetch_block = prefetch_block;
817 reduced_step = step;
818 while ((reduced_step & 1) == 0
819 && reduced_prefetch_block > 1)
820 {
821 reduced_step >>= 1;
822 reduced_prefetch_block >>= 1;
823 }
824
825 prefetch_before = delta / step;
826 delta %= step;
827 ref_type = TREE_TYPE (ref->mem);
828 align_unit = TYPE_ALIGN (ref_type) / 8;
829 if (is_miss_rate_acceptable (prefetch_block, step, delta,
830 reduced_prefetch_block, align_unit))
831 {
832 /* Do not reduce prefetch_before if we meet beyond cache size. */
833 if (prefetch_before > L2_CACHE_SIZE_BYTES / PREFETCH_BLOCK)
834 prefetch_before = PREFETCH_ALL;
835 if (prefetch_before < ref->prefetch_before)
836 ref->prefetch_before = prefetch_before;
837
838 return;
839 }
840
841 /* Try also the following iteration. */
842 prefetch_before++;
843 delta = step - delta;
844 if (is_miss_rate_acceptable (prefetch_block, step, delta,
845 reduced_prefetch_block, align_unit))
846 {
847 if (prefetch_before < ref->prefetch_before)
848 ref->prefetch_before = prefetch_before;
849
850 return;
851 }
852
853 /* The ref probably does not reuse by. */
854 return;
855 }
856
857 /* Prune the prefetch candidate REF using the reuses with other references
858 in REFS. */
859
860 static void
861 prune_ref_by_reuse (struct mem_ref *ref, struct mem_ref *refs)
862 {
863 struct mem_ref *prune_by;
864 bool before = true;
865
866 prune_ref_by_self_reuse (ref);
867
868 for (prune_by = refs; prune_by; prune_by = prune_by->next)
869 {
870 if (prune_by == ref)
871 {
872 before = false;
873 continue;
874 }
875
876 if (!WRITE_CAN_USE_READ_PREFETCH
877 && ref->write_p
878 && !prune_by->write_p)
879 continue;
880 if (!READ_CAN_USE_WRITE_PREFETCH
881 && !ref->write_p
882 && prune_by->write_p)
883 continue;
884
885 prune_ref_by_group_reuse (ref, prune_by, before);
886 }
887 }
888
889 /* Prune the prefetch candidates in GROUP using the reuse analysis. */
890
891 static void
892 prune_group_by_reuse (struct mem_ref_group *group)
893 {
894 struct mem_ref *ref_pruned;
895
896 for (ref_pruned = group->refs; ref_pruned; ref_pruned = ref_pruned->next)
897 {
898 prune_ref_by_reuse (ref_pruned, group->refs);
899
900 if (dump_file && (dump_flags & TDF_DETAILS))
901 {
902 fprintf (dump_file, "Reference %p:", (void *) ref_pruned);
903
904 if (ref_pruned->prefetch_before == PREFETCH_ALL
905 && ref_pruned->prefetch_mod == 1)
906 fprintf (dump_file, " no restrictions");
907 else if (ref_pruned->prefetch_before == 0)
908 fprintf (dump_file, " do not prefetch");
909 else if (ref_pruned->prefetch_before <= ref_pruned->prefetch_mod)
910 fprintf (dump_file, " prefetch once");
911 else
912 {
913 if (ref_pruned->prefetch_before != PREFETCH_ALL)
914 {
915 fprintf (dump_file, " prefetch before ");
916 fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC,
917 ref_pruned->prefetch_before);
918 }
919 if (ref_pruned->prefetch_mod != 1)
920 {
921 fprintf (dump_file, " prefetch mod ");
922 fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC,
923 ref_pruned->prefetch_mod);
924 }
925 }
926 fprintf (dump_file, "\n");
927 }
928 }
929 }
930
931 /* Prune the list of prefetch candidates GROUPS using the reuse analysis. */
932
933 static void
934 prune_by_reuse (struct mem_ref_group *groups)
935 {
936 for (; groups; groups = groups->next)
937 prune_group_by_reuse (groups);
938 }
939
940 /* Returns true if we should issue prefetch for REF. */
941
942 static bool
943 should_issue_prefetch_p (struct mem_ref *ref)
944 {
945 /* For now do not issue prefetches for only first few of the
946 iterations. */
947 if (ref->prefetch_before != PREFETCH_ALL)
948 {
949 if (dump_file && (dump_flags & TDF_DETAILS))
950 fprintf (dump_file, "Ignoring %p due to prefetch_before\n",
951 (void *) ref);
952 return false;
953 }
954
955 /* Do not prefetch nontemporal stores. */
956 if (ref->storent_p)
957 {
958 if (dump_file && (dump_flags & TDF_DETAILS))
959 fprintf (dump_file, "Ignoring nontemporal store %p\n", (void *) ref);
960 return false;
961 }
962
963 return true;
964 }
965
966 /* Decide which of the prefetch candidates in GROUPS to prefetch.
967 AHEAD is the number of iterations to prefetch ahead (which corresponds
968 to the number of simultaneous instances of one prefetch running at a
969 time). UNROLL_FACTOR is the factor by that the loop is going to be
970 unrolled. Returns true if there is anything to prefetch. */
971
972 static bool
973 schedule_prefetches (struct mem_ref_group *groups, unsigned unroll_factor,
974 unsigned ahead)
975 {
976 unsigned remaining_prefetch_slots, n_prefetches, prefetch_slots;
977 unsigned slots_per_prefetch;
978 struct mem_ref *ref;
979 bool any = false;
980
981 /* At most SIMULTANEOUS_PREFETCHES should be running at the same time. */
982 remaining_prefetch_slots = SIMULTANEOUS_PREFETCHES;
983
984 /* The prefetch will run for AHEAD iterations of the original loop, i.e.,
985 AHEAD / UNROLL_FACTOR iterations of the unrolled loop. In each iteration,
986 it will need a prefetch slot. */
987 slots_per_prefetch = (ahead + unroll_factor / 2) / unroll_factor;
988 if (dump_file && (dump_flags & TDF_DETAILS))
989 fprintf (dump_file, "Each prefetch instruction takes %u prefetch slots.\n",
990 slots_per_prefetch);
991
992 /* For now we just take memory references one by one and issue
993 prefetches for as many as possible. The groups are sorted
994 starting with the largest step, since the references with
995 large step are more likely to cause many cache misses. */
996
997 for (; groups; groups = groups->next)
998 for (ref = groups->refs; ref; ref = ref->next)
999 {
1000 if (!should_issue_prefetch_p (ref))
1001 continue;
1002
1003 /* The loop is far from being sufficiently unrolled for this
1004 prefetch. Do not generate prefetch to avoid many redudant
1005 prefetches. */
1006 if (ref->prefetch_mod / unroll_factor > PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO)
1007 continue;
1008
1009 /* If we need to prefetch the reference each PREFETCH_MOD iterations,
1010 and we unroll the loop UNROLL_FACTOR times, we need to insert
1011 ceil (UNROLL_FACTOR / PREFETCH_MOD) instructions in each
1012 iteration. */
1013 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
1014 / ref->prefetch_mod);
1015 prefetch_slots = n_prefetches * slots_per_prefetch;
1016
1017 /* If more than half of the prefetches would be lost anyway, do not
1018 issue the prefetch. */
1019 if (2 * remaining_prefetch_slots < prefetch_slots)
1020 continue;
1021
1022 ref->issue_prefetch_p = true;
1023
1024 if (remaining_prefetch_slots <= prefetch_slots)
1025 return true;
1026 remaining_prefetch_slots -= prefetch_slots;
1027 any = true;
1028 }
1029
1030 return any;
1031 }
1032
1033 /* Return TRUE if no prefetch is going to be generated in the given
1034 GROUPS. */
1035
1036 static bool
1037 nothing_to_prefetch_p (struct mem_ref_group *groups)
1038 {
1039 struct mem_ref *ref;
1040
1041 for (; groups; groups = groups->next)
1042 for (ref = groups->refs; ref; ref = ref->next)
1043 if (should_issue_prefetch_p (ref))
1044 return false;
1045
1046 return true;
1047 }
1048
1049 /* Estimate the number of prefetches in the given GROUPS.
1050 UNROLL_FACTOR is the factor by which LOOP was unrolled. */
1051
1052 static int
1053 estimate_prefetch_count (struct mem_ref_group *groups, unsigned unroll_factor)
1054 {
1055 struct mem_ref *ref;
1056 unsigned n_prefetches;
1057 int prefetch_count = 0;
1058
1059 for (; groups; groups = groups->next)
1060 for (ref = groups->refs; ref; ref = ref->next)
1061 if (should_issue_prefetch_p (ref))
1062 {
1063 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
1064 / ref->prefetch_mod);
1065 prefetch_count += n_prefetches;
1066 }
1067
1068 return prefetch_count;
1069 }
1070
1071 /* Issue prefetches for the reference REF into loop as decided before.
1072 HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR
1073 is the factor by which LOOP was unrolled. */
1074
1075 static void
1076 issue_prefetch_ref (struct mem_ref *ref, unsigned unroll_factor, unsigned ahead)
1077 {
1078 HOST_WIDE_INT delta;
1079 tree addr, addr_base, write_p, local, forward;
1080 gimple prefetch;
1081 gimple_stmt_iterator bsi;
1082 unsigned n_prefetches, ap;
1083 bool nontemporal = ref->reuse_distance >= L2_CACHE_SIZE_BYTES;
1084
1085 if (dump_file && (dump_flags & TDF_DETAILS))
1086 fprintf (dump_file, "Issued%s prefetch for %p.\n",
1087 nontemporal ? " nontemporal" : "",
1088 (void *) ref);
1089
1090 bsi = gsi_for_stmt (ref->stmt);
1091
1092 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
1093 / ref->prefetch_mod);
1094 addr_base = build_fold_addr_expr_with_type (ref->mem, ptr_type_node);
1095 addr_base = force_gimple_operand_gsi (&bsi, unshare_expr (addr_base),
1096 true, NULL, true, GSI_SAME_STMT);
1097 write_p = ref->write_p ? integer_one_node : integer_zero_node;
1098 local = nontemporal ? integer_zero_node : integer_three_node;
1099
1100 for (ap = 0; ap < n_prefetches; ap++)
1101 {
1102 if (cst_and_fits_in_hwi (ref->group->step))
1103 {
1104 /* Determine the address to prefetch. */
1105 delta = (ahead + ap * ref->prefetch_mod) *
1106 int_cst_value (ref->group->step);
1107 addr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node,
1108 addr_base, size_int (delta));
1109 addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true, NULL,
1110 true, GSI_SAME_STMT);
1111 }
1112 else
1113 {
1114 /* The step size is non-constant but loop-invariant. We use the
1115 heuristic to simply prefetch ahead iterations ahead. */
1116 forward = fold_build2 (MULT_EXPR, sizetype,
1117 fold_convert (sizetype, ref->group->step),
1118 fold_convert (sizetype, size_int (ahead)));
1119 addr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, addr_base,
1120 forward);
1121 addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true,
1122 NULL, true, GSI_SAME_STMT);
1123 }
1124 /* Create the prefetch instruction. */
1125 prefetch = gimple_build_call (built_in_decls[BUILT_IN_PREFETCH],
1126 3, addr, write_p, local);
1127 gsi_insert_before (&bsi, prefetch, GSI_SAME_STMT);
1128 }
1129 }
1130
1131 /* Issue prefetches for the references in GROUPS into loop as decided before.
1132 HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR is the
1133 factor by that LOOP was unrolled. */
1134
1135 static void
1136 issue_prefetches (struct mem_ref_group *groups,
1137 unsigned unroll_factor, unsigned ahead)
1138 {
1139 struct mem_ref *ref;
1140
1141 for (; groups; groups = groups->next)
1142 for (ref = groups->refs; ref; ref = ref->next)
1143 if (ref->issue_prefetch_p)
1144 issue_prefetch_ref (ref, unroll_factor, ahead);
1145 }
1146
1147 /* Returns true if REF is a memory write for that a nontemporal store insn
1148 can be used. */
1149
1150 static bool
1151 nontemporal_store_p (struct mem_ref *ref)
1152 {
1153 enum machine_mode mode;
1154 enum insn_code code;
1155
1156 /* REF must be a write that is not reused. We require it to be independent
1157 on all other memory references in the loop, as the nontemporal stores may
1158 be reordered with respect to other memory references. */
1159 if (!ref->write_p
1160 || !ref->independent_p
1161 || ref->reuse_distance < L2_CACHE_SIZE_BYTES)
1162 return false;
1163
1164 /* Check that we have the storent instruction for the mode. */
1165 mode = TYPE_MODE (TREE_TYPE (ref->mem));
1166 if (mode == BLKmode)
1167 return false;
1168
1169 code = optab_handler (storent_optab, mode);
1170 return code != CODE_FOR_nothing;
1171 }
1172
1173 /* If REF is a nontemporal store, we mark the corresponding modify statement
1174 and return true. Otherwise, we return false. */
1175
1176 static bool
1177 mark_nontemporal_store (struct mem_ref *ref)
1178 {
1179 if (!nontemporal_store_p (ref))
1180 return false;
1181
1182 if (dump_file && (dump_flags & TDF_DETAILS))
1183 fprintf (dump_file, "Marked reference %p as a nontemporal store.\n",
1184 (void *) ref);
1185
1186 gimple_assign_set_nontemporal_move (ref->stmt, true);
1187 ref->storent_p = true;
1188
1189 return true;
1190 }
1191
1192 /* Issue a memory fence instruction after LOOP. */
1193
1194 static void
1195 emit_mfence_after_loop (struct loop *loop)
1196 {
1197 VEC (edge, heap) *exits = get_loop_exit_edges (loop);
1198 edge exit;
1199 gimple call;
1200 gimple_stmt_iterator bsi;
1201 unsigned i;
1202
1203 FOR_EACH_VEC_ELT (edge, exits, i, exit)
1204 {
1205 call = gimple_build_call (FENCE_FOLLOWING_MOVNT, 0);
1206
1207 if (!single_pred_p (exit->dest)
1208 /* If possible, we prefer not to insert the fence on other paths
1209 in cfg. */
1210 && !(exit->flags & EDGE_ABNORMAL))
1211 split_loop_exit_edge (exit);
1212 bsi = gsi_after_labels (exit->dest);
1213
1214 gsi_insert_before (&bsi, call, GSI_NEW_STMT);
1215 mark_virtual_ops_for_renaming (call);
1216 }
1217
1218 VEC_free (edge, heap, exits);
1219 update_ssa (TODO_update_ssa_only_virtuals);
1220 }
1221
1222 /* Returns true if we can use storent in loop, false otherwise. */
1223
1224 static bool
1225 may_use_storent_in_loop_p (struct loop *loop)
1226 {
1227 bool ret = true;
1228
1229 if (loop->inner != NULL)
1230 return false;
1231
1232 /* If we must issue a mfence insn after using storent, check that there
1233 is a suitable place for it at each of the loop exits. */
1234 if (FENCE_FOLLOWING_MOVNT != NULL_TREE)
1235 {
1236 VEC (edge, heap) *exits = get_loop_exit_edges (loop);
1237 unsigned i;
1238 edge exit;
1239
1240 FOR_EACH_VEC_ELT (edge, exits, i, exit)
1241 if ((exit->flags & EDGE_ABNORMAL)
1242 && exit->dest == EXIT_BLOCK_PTR)
1243 ret = false;
1244
1245 VEC_free (edge, heap, exits);
1246 }
1247
1248 return ret;
1249 }
1250
1251 /* Marks nontemporal stores in LOOP. GROUPS contains the description of memory
1252 references in the loop. */
1253
1254 static void
1255 mark_nontemporal_stores (struct loop *loop, struct mem_ref_group *groups)
1256 {
1257 struct mem_ref *ref;
1258 bool any = false;
1259
1260 if (!may_use_storent_in_loop_p (loop))
1261 return;
1262
1263 for (; groups; groups = groups->next)
1264 for (ref = groups->refs; ref; ref = ref->next)
1265 any |= mark_nontemporal_store (ref);
1266
1267 if (any && FENCE_FOLLOWING_MOVNT != NULL_TREE)
1268 emit_mfence_after_loop (loop);
1269 }
1270
1271 /* Determines whether we can profitably unroll LOOP FACTOR times, and if
1272 this is the case, fill in DESC by the description of number of
1273 iterations. */
1274
1275 static bool
1276 should_unroll_loop_p (struct loop *loop, struct tree_niter_desc *desc,
1277 unsigned factor)
1278 {
1279 if (!can_unroll_loop_p (loop, factor, desc))
1280 return false;
1281
1282 /* We only consider loops without control flow for unrolling. This is not
1283 a hard restriction -- tree_unroll_loop works with arbitrary loops
1284 as well; but the unrolling/prefetching is usually more profitable for
1285 loops consisting of a single basic block, and we want to limit the
1286 code growth. */
1287 if (loop->num_nodes > 2)
1288 return false;
1289
1290 return true;
1291 }
1292
1293 /* Determine the coefficient by that unroll LOOP, from the information
1294 contained in the list of memory references REFS. Description of
1295 umber of iterations of LOOP is stored to DESC. NINSNS is the number of
1296 insns of the LOOP. EST_NITER is the estimated number of iterations of
1297 the loop, or -1 if no estimate is available. */
1298
1299 static unsigned
1300 determine_unroll_factor (struct loop *loop, struct mem_ref_group *refs,
1301 unsigned ninsns, struct tree_niter_desc *desc,
1302 HOST_WIDE_INT est_niter)
1303 {
1304 unsigned upper_bound;
1305 unsigned nfactor, factor, mod_constraint;
1306 struct mem_ref_group *agp;
1307 struct mem_ref *ref;
1308
1309 /* First check whether the loop is not too large to unroll. We ignore
1310 PARAM_MAX_UNROLL_TIMES, because for small loops, it prevented us
1311 from unrolling them enough to make exactly one cache line covered by each
1312 iteration. Also, the goal of PARAM_MAX_UNROLL_TIMES is to prevent
1313 us from unrolling the loops too many times in cases where we only expect
1314 gains from better scheduling and decreasing loop overhead, which is not
1315 the case here. */
1316 upper_bound = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / ninsns;
1317
1318 /* If we unrolled the loop more times than it iterates, the unrolled version
1319 of the loop would be never entered. */
1320 if (est_niter >= 0 && est_niter < (HOST_WIDE_INT) upper_bound)
1321 upper_bound = est_niter;
1322
1323 if (upper_bound <= 1)
1324 return 1;
1325
1326 /* Choose the factor so that we may prefetch each cache just once,
1327 but bound the unrolling by UPPER_BOUND. */
1328 factor = 1;
1329 for (agp = refs; agp; agp = agp->next)
1330 for (ref = agp->refs; ref; ref = ref->next)
1331 if (should_issue_prefetch_p (ref))
1332 {
1333 mod_constraint = ref->prefetch_mod;
1334 nfactor = least_common_multiple (mod_constraint, factor);
1335 if (nfactor <= upper_bound)
1336 factor = nfactor;
1337 }
1338
1339 if (!should_unroll_loop_p (loop, desc, factor))
1340 return 1;
1341
1342 return factor;
1343 }
1344
1345 /* Returns the total volume of the memory references REFS, taking into account
1346 reuses in the innermost loop and cache line size. TODO -- we should also
1347 take into account reuses across the iterations of the loops in the loop
1348 nest. */
1349
1350 static unsigned
1351 volume_of_references (struct mem_ref_group *refs)
1352 {
1353 unsigned volume = 0;
1354 struct mem_ref_group *gr;
1355 struct mem_ref *ref;
1356
1357 for (gr = refs; gr; gr = gr->next)
1358 for (ref = gr->refs; ref; ref = ref->next)
1359 {
1360 /* Almost always reuses another value? */
1361 if (ref->prefetch_before != PREFETCH_ALL)
1362 continue;
1363
1364 /* If several iterations access the same cache line, use the size of
1365 the line divided by this number. Otherwise, a cache line is
1366 accessed in each iteration. TODO -- in the latter case, we should
1367 take the size of the reference into account, rounding it up on cache
1368 line size multiple. */
1369 volume += L1_CACHE_LINE_SIZE / ref->prefetch_mod;
1370 }
1371 return volume;
1372 }
1373
1374 /* Returns the volume of memory references accessed across VEC iterations of
1375 loops, whose sizes are described in the LOOP_SIZES array. N is the number
1376 of the loops in the nest (length of VEC and LOOP_SIZES vectors). */
1377
1378 static unsigned
1379 volume_of_dist_vector (lambda_vector vec, unsigned *loop_sizes, unsigned n)
1380 {
1381 unsigned i;
1382
1383 for (i = 0; i < n; i++)
1384 if (vec[i] != 0)
1385 break;
1386
1387 if (i == n)
1388 return 0;
1389
1390 gcc_assert (vec[i] > 0);
1391
1392 /* We ignore the parts of the distance vector in subloops, since usually
1393 the numbers of iterations are much smaller. */
1394 return loop_sizes[i] * vec[i];
1395 }
1396
1397 /* Add the steps of ACCESS_FN multiplied by STRIDE to the array STRIDE
1398 at the position corresponding to the loop of the step. N is the depth
1399 of the considered loop nest, and, LOOP is its innermost loop. */
1400
1401 static void
1402 add_subscript_strides (tree access_fn, unsigned stride,
1403 HOST_WIDE_INT *strides, unsigned n, struct loop *loop)
1404 {
1405 struct loop *aloop;
1406 tree step;
1407 HOST_WIDE_INT astep;
1408 unsigned min_depth = loop_depth (loop) - n;
1409
1410 while (TREE_CODE (access_fn) == POLYNOMIAL_CHREC)
1411 {
1412 aloop = get_chrec_loop (access_fn);
1413 step = CHREC_RIGHT (access_fn);
1414 access_fn = CHREC_LEFT (access_fn);
1415
1416 if ((unsigned) loop_depth (aloop) <= min_depth)
1417 continue;
1418
1419 if (host_integerp (step, 0))
1420 astep = tree_low_cst (step, 0);
1421 else
1422 astep = L1_CACHE_LINE_SIZE;
1423
1424 strides[n - 1 - loop_depth (loop) + loop_depth (aloop)] += astep * stride;
1425
1426 }
1427 }
1428
1429 /* Returns the volume of memory references accessed between two consecutive
1430 self-reuses of the reference DR. We consider the subscripts of DR in N
1431 loops, and LOOP_SIZES contains the volumes of accesses in each of the
1432 loops. LOOP is the innermost loop of the current loop nest. */
1433
1434 static unsigned
1435 self_reuse_distance (data_reference_p dr, unsigned *loop_sizes, unsigned n,
1436 struct loop *loop)
1437 {
1438 tree stride, access_fn;
1439 HOST_WIDE_INT *strides, astride;
1440 VEC (tree, heap) *access_fns;
1441 tree ref = DR_REF (dr);
1442 unsigned i, ret = ~0u;
1443
1444 /* In the following example:
1445
1446 for (i = 0; i < N; i++)
1447 for (j = 0; j < N; j++)
1448 use (a[j][i]);
1449 the same cache line is accessed each N steps (except if the change from
1450 i to i + 1 crosses the boundary of the cache line). Thus, for self-reuse,
1451 we cannot rely purely on the results of the data dependence analysis.
1452
1453 Instead, we compute the stride of the reference in each loop, and consider
1454 the innermost loop in that the stride is less than cache size. */
1455
1456 strides = XCNEWVEC (HOST_WIDE_INT, n);
1457 access_fns = DR_ACCESS_FNS (dr);
1458
1459 FOR_EACH_VEC_ELT (tree, access_fns, i, access_fn)
1460 {
1461 /* Keep track of the reference corresponding to the subscript, so that we
1462 know its stride. */
1463 while (handled_component_p (ref) && TREE_CODE (ref) != ARRAY_REF)
1464 ref = TREE_OPERAND (ref, 0);
1465
1466 if (TREE_CODE (ref) == ARRAY_REF)
1467 {
1468 stride = TYPE_SIZE_UNIT (TREE_TYPE (ref));
1469 if (host_integerp (stride, 1))
1470 astride = tree_low_cst (stride, 1);
1471 else
1472 astride = L1_CACHE_LINE_SIZE;
1473
1474 ref = TREE_OPERAND (ref, 0);
1475 }
1476 else
1477 astride = 1;
1478
1479 add_subscript_strides (access_fn, astride, strides, n, loop);
1480 }
1481
1482 for (i = n; i-- > 0; )
1483 {
1484 unsigned HOST_WIDE_INT s;
1485
1486 s = strides[i] < 0 ? -strides[i] : strides[i];
1487
1488 if (s < (unsigned) L1_CACHE_LINE_SIZE
1489 && (loop_sizes[i]
1490 > (unsigned) (L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION)))
1491 {
1492 ret = loop_sizes[i];
1493 break;
1494 }
1495 }
1496
1497 free (strides);
1498 return ret;
1499 }
1500
1501 /* Determines the distance till the first reuse of each reference in REFS
1502 in the loop nest of LOOP. NO_OTHER_REFS is true if there are no other
1503 memory references in the loop. */
1504
1505 static void
1506 determine_loop_nest_reuse (struct loop *loop, struct mem_ref_group *refs,
1507 bool no_other_refs)
1508 {
1509 struct loop *nest, *aloop;
1510 VEC (data_reference_p, heap) *datarefs = NULL;
1511 VEC (ddr_p, heap) *dependences = NULL;
1512 struct mem_ref_group *gr;
1513 struct mem_ref *ref, *refb;
1514 VEC (loop_p, heap) *vloops = NULL;
1515 unsigned *loop_data_size;
1516 unsigned i, j, n;
1517 unsigned volume, dist, adist;
1518 HOST_WIDE_INT vol;
1519 data_reference_p dr;
1520 ddr_p dep;
1521
1522 if (loop->inner)
1523 return;
1524
1525 /* Find the outermost loop of the loop nest of loop (we require that
1526 there are no sibling loops inside the nest). */
1527 nest = loop;
1528 while (1)
1529 {
1530 aloop = loop_outer (nest);
1531
1532 if (aloop == current_loops->tree_root
1533 || aloop->inner->next)
1534 break;
1535
1536 nest = aloop;
1537 }
1538
1539 /* For each loop, determine the amount of data accessed in each iteration.
1540 We use this to estimate whether the reference is evicted from the
1541 cache before its reuse. */
1542 find_loop_nest (nest, &vloops);
1543 n = VEC_length (loop_p, vloops);
1544 loop_data_size = XNEWVEC (unsigned, n);
1545 volume = volume_of_references (refs);
1546 i = n;
1547 while (i-- != 0)
1548 {
1549 loop_data_size[i] = volume;
1550 /* Bound the volume by the L2 cache size, since above this bound,
1551 all dependence distances are equivalent. */
1552 if (volume > L2_CACHE_SIZE_BYTES)
1553 continue;
1554
1555 aloop = VEC_index (loop_p, vloops, i);
1556 vol = estimated_loop_iterations_int (aloop, false);
1557 if (vol < 0)
1558 vol = expected_loop_iterations (aloop);
1559 volume *= vol;
1560 }
1561
1562 /* Prepare the references in the form suitable for data dependence
1563 analysis. We ignore unanalyzable data references (the results
1564 are used just as a heuristics to estimate temporality of the
1565 references, hence we do not need to worry about correctness). */
1566 for (gr = refs; gr; gr = gr->next)
1567 for (ref = gr->refs; ref; ref = ref->next)
1568 {
1569 dr = create_data_ref (nest, ref->mem, ref->stmt, !ref->write_p);
1570
1571 if (dr)
1572 {
1573 ref->reuse_distance = volume;
1574 dr->aux = ref;
1575 VEC_safe_push (data_reference_p, heap, datarefs, dr);
1576 }
1577 else
1578 no_other_refs = false;
1579 }
1580
1581 FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
1582 {
1583 dist = self_reuse_distance (dr, loop_data_size, n, loop);
1584 ref = (struct mem_ref *) dr->aux;
1585 if (ref->reuse_distance > dist)
1586 ref->reuse_distance = dist;
1587
1588 if (no_other_refs)
1589 ref->independent_p = true;
1590 }
1591
1592 compute_all_dependences (datarefs, &dependences, vloops, true);
1593
1594 FOR_EACH_VEC_ELT (ddr_p, dependences, i, dep)
1595 {
1596 if (DDR_ARE_DEPENDENT (dep) == chrec_known)
1597 continue;
1598
1599 ref = (struct mem_ref *) DDR_A (dep)->aux;
1600 refb = (struct mem_ref *) DDR_B (dep)->aux;
1601
1602 if (DDR_ARE_DEPENDENT (dep) == chrec_dont_know
1603 || DDR_NUM_DIST_VECTS (dep) == 0)
1604 {
1605 /* If the dependence cannot be analyzed, assume that there might be
1606 a reuse. */
1607 dist = 0;
1608
1609 ref->independent_p = false;
1610 refb->independent_p = false;
1611 }
1612 else
1613 {
1614 /* The distance vectors are normalized to be always lexicographically
1615 positive, hence we cannot tell just from them whether DDR_A comes
1616 before DDR_B or vice versa. However, it is not important,
1617 anyway -- if DDR_A is close to DDR_B, then it is either reused in
1618 DDR_B (and it is not nontemporal), or it reuses the value of DDR_B
1619 in cache (and marking it as nontemporal would not affect
1620 anything). */
1621
1622 dist = volume;
1623 for (j = 0; j < DDR_NUM_DIST_VECTS (dep); j++)
1624 {
1625 adist = volume_of_dist_vector (DDR_DIST_VECT (dep, j),
1626 loop_data_size, n);
1627
1628 /* If this is a dependence in the innermost loop (i.e., the
1629 distances in all superloops are zero) and it is not
1630 the trivial self-dependence with distance zero, record that
1631 the references are not completely independent. */
1632 if (lambda_vector_zerop (DDR_DIST_VECT (dep, j), n - 1)
1633 && (ref != refb
1634 || DDR_DIST_VECT (dep, j)[n-1] != 0))
1635 {
1636 ref->independent_p = false;
1637 refb->independent_p = false;
1638 }
1639
1640 /* Ignore accesses closer than
1641 L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
1642 so that we use nontemporal prefetches e.g. if single memory
1643 location is accessed several times in a single iteration of
1644 the loop. */
1645 if (adist < L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION)
1646 continue;
1647
1648 if (adist < dist)
1649 dist = adist;
1650 }
1651 }
1652
1653 if (ref->reuse_distance > dist)
1654 ref->reuse_distance = dist;
1655 if (refb->reuse_distance > dist)
1656 refb->reuse_distance = dist;
1657 }
1658
1659 free_dependence_relations (dependences);
1660 free_data_refs (datarefs);
1661 free (loop_data_size);
1662
1663 if (dump_file && (dump_flags & TDF_DETAILS))
1664 {
1665 fprintf (dump_file, "Reuse distances:\n");
1666 for (gr = refs; gr; gr = gr->next)
1667 for (ref = gr->refs; ref; ref = ref->next)
1668 fprintf (dump_file, " ref %p distance %u\n",
1669 (void *) ref, ref->reuse_distance);
1670 }
1671 }
1672
1673 /* Determine whether or not the trip count to ahead ratio is too small based
1674 on prefitablility consideration.
1675 AHEAD: the iteration ahead distance,
1676 EST_NITER: the estimated trip count. */
1677
1678 static bool
1679 trip_count_to_ahead_ratio_too_small_p (unsigned ahead, HOST_WIDE_INT est_niter)
1680 {
1681 /* Assume trip count to ahead ratio is big enough if the trip count could not
1682 be estimated at compile time. */
1683 if (est_niter < 0)
1684 return false;
1685
1686 if (est_niter < (HOST_WIDE_INT) (TRIP_COUNT_TO_AHEAD_RATIO * ahead))
1687 {
1688 if (dump_file && (dump_flags & TDF_DETAILS))
1689 fprintf (dump_file,
1690 "Not prefetching -- loop estimated to roll only %d times\n",
1691 (int) est_niter);
1692 return true;
1693 }
1694
1695 return false;
1696 }
1697
1698 /* Determine whether or not the number of memory references in the loop is
1699 reasonable based on the profitablity and compilation time considerations.
1700 NINSNS: estimated number of instructions in the loop,
1701 MEM_REF_COUNT: total number of memory references in the loop. */
1702
1703 static bool
1704 mem_ref_count_reasonable_p (unsigned ninsns, unsigned mem_ref_count)
1705 {
1706 int insn_to_mem_ratio;
1707
1708 if (mem_ref_count == 0)
1709 return false;
1710
1711 /* Miss rate computation (is_miss_rate_acceptable) and dependence analysis
1712 (compute_all_dependences) have high costs based on quadratic complexity.
1713 To avoid huge compilation time, we give up prefetching if mem_ref_count
1714 is too large. */
1715 if (mem_ref_count > PREFETCH_MAX_MEM_REFS_PER_LOOP)
1716 return false;
1717
1718 /* Prefetching improves performance by overlapping cache missing
1719 memory accesses with CPU operations. If the loop does not have
1720 enough CPU operations to overlap with memory operations, prefetching
1721 won't give a significant benefit. One approximate way of checking
1722 this is to require the ratio of instructions to memory references to
1723 be above a certain limit. This approximation works well in practice.
1724 TODO: Implement a more precise computation by estimating the time
1725 for each CPU or memory op in the loop. Time estimates for memory ops
1726 should account for cache misses. */
1727 insn_to_mem_ratio = ninsns / mem_ref_count;
1728
1729 if (insn_to_mem_ratio < PREFETCH_MIN_INSN_TO_MEM_RATIO)
1730 {
1731 if (dump_file && (dump_flags & TDF_DETAILS))
1732 fprintf (dump_file,
1733 "Not prefetching -- instruction to memory reference ratio (%d) too small\n",
1734 insn_to_mem_ratio);
1735 return false;
1736 }
1737
1738 return true;
1739 }
1740
1741 /* Determine whether or not the instruction to prefetch ratio in the loop is
1742 too small based on the profitablity consideration.
1743 NINSNS: estimated number of instructions in the loop,
1744 PREFETCH_COUNT: an estimate of the number of prefetches,
1745 UNROLL_FACTOR: the factor to unroll the loop if prefetching. */
1746
1747 static bool
1748 insn_to_prefetch_ratio_too_small_p (unsigned ninsns, unsigned prefetch_count,
1749 unsigned unroll_factor)
1750 {
1751 int insn_to_prefetch_ratio;
1752
1753 /* Prefetching most likely causes performance degradation when the instruction
1754 to prefetch ratio is too small. Too many prefetch instructions in a loop
1755 may reduce the I-cache performance.
1756 (unroll_factor * ninsns) is used to estimate the number of instructions in
1757 the unrolled loop. This implementation is a bit simplistic -- the number
1758 of issued prefetch instructions is also affected by unrolling. So,
1759 prefetch_mod and the unroll factor should be taken into account when
1760 determining prefetch_count. Also, the number of insns of the unrolled
1761 loop will usually be significantly smaller than the number of insns of the
1762 original loop * unroll_factor (at least the induction variable increases
1763 and the exit branches will get eliminated), so it might be better to use
1764 tree_estimate_loop_size + estimated_unrolled_size. */
1765 insn_to_prefetch_ratio = (unroll_factor * ninsns) / prefetch_count;
1766 if (insn_to_prefetch_ratio < MIN_INSN_TO_PREFETCH_RATIO)
1767 {
1768 if (dump_file && (dump_flags & TDF_DETAILS))
1769 fprintf (dump_file,
1770 "Not prefetching -- instruction to prefetch ratio (%d) too small\n",
1771 insn_to_prefetch_ratio);
1772 return true;
1773 }
1774
1775 return false;
1776 }
1777
1778
1779 /* Issue prefetch instructions for array references in LOOP. Returns
1780 true if the LOOP was unrolled. */
1781
1782 static bool
1783 loop_prefetch_arrays (struct loop *loop)
1784 {
1785 struct mem_ref_group *refs;
1786 unsigned ahead, ninsns, time, unroll_factor;
1787 HOST_WIDE_INT est_niter;
1788 struct tree_niter_desc desc;
1789 bool unrolled = false, no_other_refs;
1790 unsigned prefetch_count;
1791 unsigned mem_ref_count;
1792
1793 if (optimize_loop_nest_for_size_p (loop))
1794 {
1795 if (dump_file && (dump_flags & TDF_DETAILS))
1796 fprintf (dump_file, " ignored (cold area)\n");
1797 return false;
1798 }
1799
1800 /* FIXME: the time should be weighted by the probabilities of the blocks in
1801 the loop body. */
1802 time = tree_num_loop_insns (loop, &eni_time_weights);
1803 if (time == 0)
1804 return false;
1805
1806 ahead = (PREFETCH_LATENCY + time - 1) / time;
1807 est_niter = estimated_loop_iterations_int (loop, false);
1808
1809 /* Prefetching is not likely to be profitable if the trip count to ahead
1810 ratio is too small. */
1811 if (trip_count_to_ahead_ratio_too_small_p (ahead, est_niter))
1812 return false;
1813
1814 ninsns = tree_num_loop_insns (loop, &eni_size_weights);
1815
1816 /* Step 1: gather the memory references. */
1817 refs = gather_memory_references (loop, &no_other_refs, &mem_ref_count);
1818
1819 /* Give up prefetching if the number of memory references in the
1820 loop is not reasonable based on profitablity and compilation time
1821 considerations. */
1822 if (!mem_ref_count_reasonable_p (ninsns, mem_ref_count))
1823 goto fail;
1824
1825 /* Step 2: estimate the reuse effects. */
1826 prune_by_reuse (refs);
1827
1828 if (nothing_to_prefetch_p (refs))
1829 goto fail;
1830
1831 determine_loop_nest_reuse (loop, refs, no_other_refs);
1832
1833 /* Step 3: determine unroll factor. */
1834 unroll_factor = determine_unroll_factor (loop, refs, ninsns, &desc,
1835 est_niter);
1836
1837 /* Estimate prefetch count for the unrolled loop. */
1838 prefetch_count = estimate_prefetch_count (refs, unroll_factor);
1839 if (prefetch_count == 0)
1840 goto fail;
1841
1842 if (dump_file && (dump_flags & TDF_DETAILS))
1843 fprintf (dump_file, "Ahead %d, unroll factor %d, trip count "
1844 HOST_WIDE_INT_PRINT_DEC "\n"
1845 "insn count %d, mem ref count %d, prefetch count %d\n",
1846 ahead, unroll_factor, est_niter,
1847 ninsns, mem_ref_count, prefetch_count);
1848
1849 /* Prefetching is not likely to be profitable if the instruction to prefetch
1850 ratio is too small. */
1851 if (insn_to_prefetch_ratio_too_small_p (ninsns, prefetch_count,
1852 unroll_factor))
1853 goto fail;
1854
1855 mark_nontemporal_stores (loop, refs);
1856
1857 /* Step 4: what to prefetch? */
1858 if (!schedule_prefetches (refs, unroll_factor, ahead))
1859 goto fail;
1860
1861 /* Step 5: unroll the loop. TODO -- peeling of first and last few
1862 iterations so that we do not issue superfluous prefetches. */
1863 if (unroll_factor != 1)
1864 {
1865 tree_unroll_loop (loop, unroll_factor,
1866 single_dom_exit (loop), &desc);
1867 unrolled = true;
1868 }
1869
1870 /* Step 6: issue the prefetches. */
1871 issue_prefetches (refs, unroll_factor, ahead);
1872
1873 fail:
1874 release_mem_refs (refs);
1875 return unrolled;
1876 }
1877
1878 /* Issue prefetch instructions for array references in loops. */
1879
1880 unsigned int
1881 tree_ssa_prefetch_arrays (void)
1882 {
1883 loop_iterator li;
1884 struct loop *loop;
1885 bool unrolled = false;
1886 int todo_flags = 0;
1887
1888 if (!HAVE_prefetch
1889 /* It is possible to ask compiler for say -mtune=i486 -march=pentium4.
1890 -mtune=i486 causes us having PREFETCH_BLOCK 0, since this is part
1891 of processor costs and i486 does not have prefetch, but
1892 -march=pentium4 causes HAVE_prefetch to be true. Ugh. */
1893 || PREFETCH_BLOCK == 0)
1894 return 0;
1895
1896 if (dump_file && (dump_flags & TDF_DETAILS))
1897 {
1898 fprintf (dump_file, "Prefetching parameters:\n");
1899 fprintf (dump_file, " simultaneous prefetches: %d\n",
1900 SIMULTANEOUS_PREFETCHES);
1901 fprintf (dump_file, " prefetch latency: %d\n", PREFETCH_LATENCY);
1902 fprintf (dump_file, " prefetch block size: %d\n", PREFETCH_BLOCK);
1903 fprintf (dump_file, " L1 cache size: %d lines, %d kB\n",
1904 L1_CACHE_SIZE_BYTES / L1_CACHE_LINE_SIZE, L1_CACHE_SIZE);
1905 fprintf (dump_file, " L1 cache line size: %d\n", L1_CACHE_LINE_SIZE);
1906 fprintf (dump_file, " L2 cache size: %d kB\n", L2_CACHE_SIZE);
1907 fprintf (dump_file, " min insn-to-prefetch ratio: %d \n",
1908 MIN_INSN_TO_PREFETCH_RATIO);
1909 fprintf (dump_file, " min insn-to-mem ratio: %d \n",
1910 PREFETCH_MIN_INSN_TO_MEM_RATIO);
1911 fprintf (dump_file, "\n");
1912 }
1913
1914 initialize_original_copy_tables ();
1915
1916 if (!built_in_decls[BUILT_IN_PREFETCH])
1917 {
1918 tree type = build_function_type_list (void_type_node,
1919 const_ptr_type_node, NULL_TREE);
1920 tree decl = add_builtin_function ("__builtin_prefetch", type,
1921 BUILT_IN_PREFETCH, BUILT_IN_NORMAL,
1922 NULL, NULL_TREE);
1923 DECL_IS_NOVOPS (decl) = true;
1924 built_in_decls[BUILT_IN_PREFETCH] = decl;
1925 }
1926
1927 /* We assume that size of cache line is a power of two, so verify this
1928 here. */
1929 gcc_assert ((PREFETCH_BLOCK & (PREFETCH_BLOCK - 1)) == 0);
1930
1931 FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
1932 {
1933 if (dump_file && (dump_flags & TDF_DETAILS))
1934 fprintf (dump_file, "Processing loop %d:\n", loop->num);
1935
1936 unrolled |= loop_prefetch_arrays (loop);
1937
1938 if (dump_file && (dump_flags & TDF_DETAILS))
1939 fprintf (dump_file, "\n\n");
1940 }
1941
1942 if (unrolled)
1943 {
1944 scev_reset ();
1945 todo_flags |= TODO_cleanup_cfg;
1946 }
1947
1948 free_original_copy_tables ();
1949 return todo_flags;
1950 }