rs6000.c (rs6000_expand_vector_set): Use vnand instead of vnor to exploit possible...
[gcc.git] / gcc / tree-ssa-loop-prefetch.c
1 /* Array prefetching.
2 Copyright (C) 2005-2014 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "tree.h"
25 #include "stor-layout.h"
26 #include "tm_p.h"
27 #include "basic-block.h"
28 #include "tree-pretty-print.h"
29 #include "tree-ssa-alias.h"
30 #include "internal-fn.h"
31 #include "gimple-expr.h"
32 #include "is-a.h"
33 #include "gimple.h"
34 #include "gimplify.h"
35 #include "gimple-iterator.h"
36 #include "gimplify-me.h"
37 #include "gimple-ssa.h"
38 #include "tree-ssa-loop-ivopts.h"
39 #include "tree-ssa-loop-manip.h"
40 #include "tree-ssa-loop-niter.h"
41 #include "tree-ssa-loop.h"
42 #include "tree-into-ssa.h"
43 #include "cfgloop.h"
44 #include "tree-pass.h"
45 #include "insn-config.h"
46 #include "hashtab.h"
47 #include "tree-chrec.h"
48 #include "tree-scalar-evolution.h"
49 #include "diagnostic-core.h"
50 #include "params.h"
51 #include "langhooks.h"
52 #include "tree-inline.h"
53 #include "tree-data-ref.h"
54
55
56 /* FIXME: Needed for optabs, but this should all be moved to a TBD interface
57 between the GIMPLE and RTL worlds. */
58 #include "expr.h"
59 #include "optabs.h"
60 #include "recog.h"
61
62 /* This pass inserts prefetch instructions to optimize cache usage during
63 accesses to arrays in loops. It processes loops sequentially and:
64
65 1) Gathers all memory references in the single loop.
66 2) For each of the references it decides when it is profitable to prefetch
67 it. To do it, we evaluate the reuse among the accesses, and determines
68 two values: PREFETCH_BEFORE (meaning that it only makes sense to do
69 prefetching in the first PREFETCH_BEFORE iterations of the loop) and
70 PREFETCH_MOD (meaning that it only makes sense to prefetch in the
71 iterations of the loop that are zero modulo PREFETCH_MOD). For example
72 (assuming cache line size is 64 bytes, char has size 1 byte and there
73 is no hardware sequential prefetch):
74
75 char *a;
76 for (i = 0; i < max; i++)
77 {
78 a[255] = ...; (0)
79 a[i] = ...; (1)
80 a[i + 64] = ...; (2)
81 a[16*i] = ...; (3)
82 a[187*i] = ...; (4)
83 a[187*i + 50] = ...; (5)
84 }
85
86 (0) obviously has PREFETCH_BEFORE 1
87 (1) has PREFETCH_BEFORE 64, since (2) accesses the same memory
88 location 64 iterations before it, and PREFETCH_MOD 64 (since
89 it hits the same cache line otherwise).
90 (2) has PREFETCH_MOD 64
91 (3) has PREFETCH_MOD 4
92 (4) has PREFETCH_MOD 1. We do not set PREFETCH_BEFORE here, since
93 the cache line accessed by (5) is the same with probability only
94 7/32.
95 (5) has PREFETCH_MOD 1 as well.
96
97 Additionally, we use data dependence analysis to determine for each
98 reference the distance till the first reuse; this information is used
99 to determine the temporality of the issued prefetch instruction.
100
101 3) We determine how much ahead we need to prefetch. The number of
102 iterations needed is time to fetch / time spent in one iteration of
103 the loop. The problem is that we do not know either of these values,
104 so we just make a heuristic guess based on a magic (possibly)
105 target-specific constant and size of the loop.
106
107 4) Determine which of the references we prefetch. We take into account
108 that there is a maximum number of simultaneous prefetches (provided
109 by machine description). We prefetch as many prefetches as possible
110 while still within this bound (starting with those with lowest
111 prefetch_mod, since they are responsible for most of the cache
112 misses).
113
114 5) We unroll and peel loops so that we are able to satisfy PREFETCH_MOD
115 and PREFETCH_BEFORE requirements (within some bounds), and to avoid
116 prefetching nonaccessed memory.
117 TODO -- actually implement peeling.
118
119 6) We actually emit the prefetch instructions. ??? Perhaps emit the
120 prefetch instructions with guards in cases where 5) was not sufficient
121 to satisfy the constraints?
122
123 A cost model is implemented to determine whether or not prefetching is
124 profitable for a given loop. The cost model has three heuristics:
125
126 1. Function trip_count_to_ahead_ratio_too_small_p implements a
127 heuristic that determines whether or not the loop has too few
128 iterations (compared to ahead). Prefetching is not likely to be
129 beneficial if the trip count to ahead ratio is below a certain
130 minimum.
131
132 2. Function mem_ref_count_reasonable_p implements a heuristic that
133 determines whether the given loop has enough CPU ops that can be
134 overlapped with cache missing memory ops. If not, the loop
135 won't benefit from prefetching. In the implementation,
136 prefetching is not considered beneficial if the ratio between
137 the instruction count and the mem ref count is below a certain
138 minimum.
139
140 3. Function insn_to_prefetch_ratio_too_small_p implements a
141 heuristic that disables prefetching in a loop if the prefetching
142 cost is above a certain limit. The relative prefetching cost is
143 estimated by taking the ratio between the prefetch count and the
144 total intruction count (this models the I-cache cost).
145
146 The limits used in these heuristics are defined as parameters with
147 reasonable default values. Machine-specific default values will be
148 added later.
149
150 Some other TODO:
151 -- write and use more general reuse analysis (that could be also used
152 in other cache aimed loop optimizations)
153 -- make it behave sanely together with the prefetches given by user
154 (now we just ignore them; at the very least we should avoid
155 optimizing loops in that user put his own prefetches)
156 -- we assume cache line size alignment of arrays; this could be
157 improved. */
158
159 /* Magic constants follow. These should be replaced by machine specific
160 numbers. */
161
162 /* True if write can be prefetched by a read prefetch. */
163
164 #ifndef WRITE_CAN_USE_READ_PREFETCH
165 #define WRITE_CAN_USE_READ_PREFETCH 1
166 #endif
167
168 /* True if read can be prefetched by a write prefetch. */
169
170 #ifndef READ_CAN_USE_WRITE_PREFETCH
171 #define READ_CAN_USE_WRITE_PREFETCH 0
172 #endif
173
174 /* The size of the block loaded by a single prefetch. Usually, this is
175 the same as cache line size (at the moment, we only consider one level
176 of cache hierarchy). */
177
178 #ifndef PREFETCH_BLOCK
179 #define PREFETCH_BLOCK L1_CACHE_LINE_SIZE
180 #endif
181
182 /* Do we have a forward hardware sequential prefetching? */
183
184 #ifndef HAVE_FORWARD_PREFETCH
185 #define HAVE_FORWARD_PREFETCH 0
186 #endif
187
188 /* Do we have a backward hardware sequential prefetching? */
189
190 #ifndef HAVE_BACKWARD_PREFETCH
191 #define HAVE_BACKWARD_PREFETCH 0
192 #endif
193
194 /* In some cases we are only able to determine that there is a certain
195 probability that the two accesses hit the same cache line. In this
196 case, we issue the prefetches for both of them if this probability
197 is less then (1000 - ACCEPTABLE_MISS_RATE) per thousand. */
198
199 #ifndef ACCEPTABLE_MISS_RATE
200 #define ACCEPTABLE_MISS_RATE 50
201 #endif
202
203 #ifndef HAVE_prefetch
204 #define HAVE_prefetch 0
205 #endif
206
207 #define L1_CACHE_SIZE_BYTES ((unsigned) (L1_CACHE_SIZE * 1024))
208 #define L2_CACHE_SIZE_BYTES ((unsigned) (L2_CACHE_SIZE * 1024))
209
210 /* We consider a memory access nontemporal if it is not reused sooner than
211 after L2_CACHE_SIZE_BYTES of memory are accessed. However, we ignore
212 accesses closer than L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
213 so that we use nontemporal prefetches e.g. if single memory location
214 is accessed several times in a single iteration of the loop. */
215 #define NONTEMPORAL_FRACTION 16
216
217 /* In case we have to emit a memory fence instruction after the loop that
218 uses nontemporal stores, this defines the builtin to use. */
219
220 #ifndef FENCE_FOLLOWING_MOVNT
221 #define FENCE_FOLLOWING_MOVNT NULL_TREE
222 #endif
223
224 /* It is not profitable to prefetch when the trip count is not at
225 least TRIP_COUNT_TO_AHEAD_RATIO times the prefetch ahead distance.
226 For example, in a loop with a prefetch ahead distance of 10,
227 supposing that TRIP_COUNT_TO_AHEAD_RATIO is equal to 4, it is
228 profitable to prefetch when the trip count is greater or equal to
229 40. In that case, 30 out of the 40 iterations will benefit from
230 prefetching. */
231
232 #ifndef TRIP_COUNT_TO_AHEAD_RATIO
233 #define TRIP_COUNT_TO_AHEAD_RATIO 4
234 #endif
235
236 /* The group of references between that reuse may occur. */
237
238 struct mem_ref_group
239 {
240 tree base; /* Base of the reference. */
241 tree step; /* Step of the reference. */
242 struct mem_ref *refs; /* References in the group. */
243 struct mem_ref_group *next; /* Next group of references. */
244 };
245
246 /* Assigned to PREFETCH_BEFORE when all iterations are to be prefetched. */
247
248 #define PREFETCH_ALL (~(unsigned HOST_WIDE_INT) 0)
249
250 /* Do not generate a prefetch if the unroll factor is significantly less
251 than what is required by the prefetch. This is to avoid redundant
252 prefetches. For example, when prefetch_mod is 16 and unroll_factor is
253 2, prefetching requires unrolling the loop 16 times, but
254 the loop is actually unrolled twice. In this case (ratio = 8),
255 prefetching is not likely to be beneficial. */
256
257 #ifndef PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO
258 #define PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO 4
259 #endif
260
261 /* Some of the prefetch computations have quadratic complexity. We want to
262 avoid huge compile times and, therefore, want to limit the amount of
263 memory references per loop where we consider prefetching. */
264
265 #ifndef PREFETCH_MAX_MEM_REFS_PER_LOOP
266 #define PREFETCH_MAX_MEM_REFS_PER_LOOP 200
267 #endif
268
269 /* The memory reference. */
270
271 struct mem_ref
272 {
273 gimple stmt; /* Statement in that the reference appears. */
274 tree mem; /* The reference. */
275 HOST_WIDE_INT delta; /* Constant offset of the reference. */
276 struct mem_ref_group *group; /* The group of references it belongs to. */
277 unsigned HOST_WIDE_INT prefetch_mod;
278 /* Prefetch only each PREFETCH_MOD-th
279 iteration. */
280 unsigned HOST_WIDE_INT prefetch_before;
281 /* Prefetch only first PREFETCH_BEFORE
282 iterations. */
283 unsigned reuse_distance; /* The amount of data accessed before the first
284 reuse of this value. */
285 struct mem_ref *next; /* The next reference in the group. */
286 unsigned write_p : 1; /* Is it a write? */
287 unsigned independent_p : 1; /* True if the reference is independent on
288 all other references inside the loop. */
289 unsigned issue_prefetch_p : 1; /* Should we really issue the prefetch? */
290 unsigned storent_p : 1; /* True if we changed the store to a
291 nontemporal one. */
292 };
293
294 /* Dumps information about memory reference */
295 static void
296 dump_mem_details (FILE *file, tree base, tree step,
297 HOST_WIDE_INT delta, bool write_p)
298 {
299 fprintf (file, "(base ");
300 print_generic_expr (file, base, TDF_SLIM);
301 fprintf (file, ", step ");
302 if (cst_and_fits_in_hwi (step))
303 fprintf (file, HOST_WIDE_INT_PRINT_DEC, int_cst_value (step));
304 else
305 print_generic_expr (file, step, TDF_TREE);
306 fprintf (file, ")\n");
307 fprintf (file, " delta ");
308 fprintf (file, HOST_WIDE_INT_PRINT_DEC, delta);
309 fprintf (file, "\n");
310 fprintf (file, " %s\n", write_p ? "write" : "read");
311 fprintf (file, "\n");
312 }
313
314 /* Dumps information about reference REF to FILE. */
315
316 static void
317 dump_mem_ref (FILE *file, struct mem_ref *ref)
318 {
319 fprintf (file, "Reference %p:\n", (void *) ref);
320
321 fprintf (file, " group %p ", (void *) ref->group);
322
323 dump_mem_details (file, ref->group->base, ref->group->step, ref->delta,
324 ref->write_p);
325 }
326
327 /* Finds a group with BASE and STEP in GROUPS, or creates one if it does not
328 exist. */
329
330 static struct mem_ref_group *
331 find_or_create_group (struct mem_ref_group **groups, tree base, tree step)
332 {
333 struct mem_ref_group *group;
334
335 for (; *groups; groups = &(*groups)->next)
336 {
337 if (operand_equal_p ((*groups)->step, step, 0)
338 && operand_equal_p ((*groups)->base, base, 0))
339 return *groups;
340
341 /* If step is an integer constant, keep the list of groups sorted
342 by decreasing step. */
343 if (cst_and_fits_in_hwi ((*groups)->step) && cst_and_fits_in_hwi (step)
344 && int_cst_value ((*groups)->step) < int_cst_value (step))
345 break;
346 }
347
348 group = XNEW (struct mem_ref_group);
349 group->base = base;
350 group->step = step;
351 group->refs = NULL;
352 group->next = *groups;
353 *groups = group;
354
355 return group;
356 }
357
358 /* Records a memory reference MEM in GROUP with offset DELTA and write status
359 WRITE_P. The reference occurs in statement STMT. */
360
361 static void
362 record_ref (struct mem_ref_group *group, gimple stmt, tree mem,
363 HOST_WIDE_INT delta, bool write_p)
364 {
365 struct mem_ref **aref;
366
367 /* Do not record the same address twice. */
368 for (aref = &group->refs; *aref; aref = &(*aref)->next)
369 {
370 /* It does not have to be possible for write reference to reuse the read
371 prefetch, or vice versa. */
372 if (!WRITE_CAN_USE_READ_PREFETCH
373 && write_p
374 && !(*aref)->write_p)
375 continue;
376 if (!READ_CAN_USE_WRITE_PREFETCH
377 && !write_p
378 && (*aref)->write_p)
379 continue;
380
381 if ((*aref)->delta == delta)
382 return;
383 }
384
385 (*aref) = XNEW (struct mem_ref);
386 (*aref)->stmt = stmt;
387 (*aref)->mem = mem;
388 (*aref)->delta = delta;
389 (*aref)->write_p = write_p;
390 (*aref)->prefetch_before = PREFETCH_ALL;
391 (*aref)->prefetch_mod = 1;
392 (*aref)->reuse_distance = 0;
393 (*aref)->issue_prefetch_p = false;
394 (*aref)->group = group;
395 (*aref)->next = NULL;
396 (*aref)->independent_p = false;
397 (*aref)->storent_p = false;
398
399 if (dump_file && (dump_flags & TDF_DETAILS))
400 dump_mem_ref (dump_file, *aref);
401 }
402
403 /* Release memory references in GROUPS. */
404
405 static void
406 release_mem_refs (struct mem_ref_group *groups)
407 {
408 struct mem_ref_group *next_g;
409 struct mem_ref *ref, *next_r;
410
411 for (; groups; groups = next_g)
412 {
413 next_g = groups->next;
414 for (ref = groups->refs; ref; ref = next_r)
415 {
416 next_r = ref->next;
417 free (ref);
418 }
419 free (groups);
420 }
421 }
422
423 /* A structure used to pass arguments to idx_analyze_ref. */
424
425 struct ar_data
426 {
427 struct loop *loop; /* Loop of the reference. */
428 gimple stmt; /* Statement of the reference. */
429 tree *step; /* Step of the memory reference. */
430 HOST_WIDE_INT *delta; /* Offset of the memory reference. */
431 };
432
433 /* Analyzes a single INDEX of a memory reference to obtain information
434 described at analyze_ref. Callback for for_each_index. */
435
436 static bool
437 idx_analyze_ref (tree base, tree *index, void *data)
438 {
439 struct ar_data *ar_data = (struct ar_data *) data;
440 tree ibase, step, stepsize;
441 HOST_WIDE_INT idelta = 0, imult = 1;
442 affine_iv iv;
443
444 if (!simple_iv (ar_data->loop, loop_containing_stmt (ar_data->stmt),
445 *index, &iv, true))
446 return false;
447 ibase = iv.base;
448 step = iv.step;
449
450 if (TREE_CODE (ibase) == POINTER_PLUS_EXPR
451 && cst_and_fits_in_hwi (TREE_OPERAND (ibase, 1)))
452 {
453 idelta = int_cst_value (TREE_OPERAND (ibase, 1));
454 ibase = TREE_OPERAND (ibase, 0);
455 }
456 if (cst_and_fits_in_hwi (ibase))
457 {
458 idelta += int_cst_value (ibase);
459 ibase = build_int_cst (TREE_TYPE (ibase), 0);
460 }
461
462 if (TREE_CODE (base) == ARRAY_REF)
463 {
464 stepsize = array_ref_element_size (base);
465 if (!cst_and_fits_in_hwi (stepsize))
466 return false;
467 imult = int_cst_value (stepsize);
468 step = fold_build2 (MULT_EXPR, sizetype,
469 fold_convert (sizetype, step),
470 fold_convert (sizetype, stepsize));
471 idelta *= imult;
472 }
473
474 if (*ar_data->step == NULL_TREE)
475 *ar_data->step = step;
476 else
477 *ar_data->step = fold_build2 (PLUS_EXPR, sizetype,
478 fold_convert (sizetype, *ar_data->step),
479 fold_convert (sizetype, step));
480 *ar_data->delta += idelta;
481 *index = ibase;
482
483 return true;
484 }
485
486 /* Tries to express REF_P in shape &BASE + STEP * iter + DELTA, where DELTA and
487 STEP are integer constants and iter is number of iterations of LOOP. The
488 reference occurs in statement STMT. Strips nonaddressable component
489 references from REF_P. */
490
491 static bool
492 analyze_ref (struct loop *loop, tree *ref_p, tree *base,
493 tree *step, HOST_WIDE_INT *delta,
494 gimple stmt)
495 {
496 struct ar_data ar_data;
497 tree off;
498 HOST_WIDE_INT bit_offset;
499 tree ref = *ref_p;
500
501 *step = NULL_TREE;
502 *delta = 0;
503
504 /* First strip off the component references. Ignore bitfields.
505 Also strip off the real and imagine parts of a complex, so that
506 they can have the same base. */
507 if (TREE_CODE (ref) == REALPART_EXPR
508 || TREE_CODE (ref) == IMAGPART_EXPR
509 || (TREE_CODE (ref) == COMPONENT_REF
510 && DECL_NONADDRESSABLE_P (TREE_OPERAND (ref, 1))))
511 {
512 if (TREE_CODE (ref) == IMAGPART_EXPR)
513 *delta += int_size_in_bytes (TREE_TYPE (ref));
514 ref = TREE_OPERAND (ref, 0);
515 }
516
517 *ref_p = ref;
518
519 for (; TREE_CODE (ref) == COMPONENT_REF; ref = TREE_OPERAND (ref, 0))
520 {
521 off = DECL_FIELD_BIT_OFFSET (TREE_OPERAND (ref, 1));
522 bit_offset = TREE_INT_CST_LOW (off);
523 gcc_assert (bit_offset % BITS_PER_UNIT == 0);
524
525 *delta += bit_offset / BITS_PER_UNIT;
526 }
527
528 *base = unshare_expr (ref);
529 ar_data.loop = loop;
530 ar_data.stmt = stmt;
531 ar_data.step = step;
532 ar_data.delta = delta;
533 return for_each_index (base, idx_analyze_ref, &ar_data);
534 }
535
536 /* Record a memory reference REF to the list REFS. The reference occurs in
537 LOOP in statement STMT and it is write if WRITE_P. Returns true if the
538 reference was recorded, false otherwise. */
539
540 static bool
541 gather_memory_references_ref (struct loop *loop, struct mem_ref_group **refs,
542 tree ref, bool write_p, gimple stmt)
543 {
544 tree base, step;
545 HOST_WIDE_INT delta;
546 struct mem_ref_group *agrp;
547
548 if (get_base_address (ref) == NULL)
549 return false;
550
551 if (!analyze_ref (loop, &ref, &base, &step, &delta, stmt))
552 return false;
553 /* If analyze_ref fails the default is a NULL_TREE. We can stop here. */
554 if (step == NULL_TREE)
555 return false;
556
557 /* Stop if the address of BASE could not be taken. */
558 if (may_be_nonaddressable_p (base))
559 return false;
560
561 /* Limit non-constant step prefetching only to the innermost loops and
562 only when the step is loop invariant in the entire loop nest. */
563 if (!cst_and_fits_in_hwi (step))
564 {
565 if (loop->inner != NULL)
566 {
567 if (dump_file && (dump_flags & TDF_DETAILS))
568 {
569 fprintf (dump_file, "Memory expression %p\n",(void *) ref );
570 print_generic_expr (dump_file, ref, TDF_TREE);
571 fprintf (dump_file,":");
572 dump_mem_details (dump_file, base, step, delta, write_p);
573 fprintf (dump_file,
574 "Ignoring %p, non-constant step prefetching is "
575 "limited to inner most loops \n",
576 (void *) ref);
577 }
578 return false;
579 }
580 else
581 {
582 if (!expr_invariant_in_loop_p (loop_outermost (loop), step))
583 {
584 if (dump_file && (dump_flags & TDF_DETAILS))
585 {
586 fprintf (dump_file, "Memory expression %p\n",(void *) ref );
587 print_generic_expr (dump_file, ref, TDF_TREE);
588 fprintf (dump_file,":");
589 dump_mem_details (dump_file, base, step, delta, write_p);
590 fprintf (dump_file,
591 "Not prefetching, ignoring %p due to "
592 "loop variant step\n",
593 (void *) ref);
594 }
595 return false;
596 }
597 }
598 }
599
600 /* Now we know that REF = &BASE + STEP * iter + DELTA, where DELTA and STEP
601 are integer constants. */
602 agrp = find_or_create_group (refs, base, step);
603 record_ref (agrp, stmt, ref, delta, write_p);
604
605 return true;
606 }
607
608 /* Record the suitable memory references in LOOP. NO_OTHER_REFS is set to
609 true if there are no other memory references inside the loop. */
610
611 static struct mem_ref_group *
612 gather_memory_references (struct loop *loop, bool *no_other_refs, unsigned *ref_count)
613 {
614 basic_block *body = get_loop_body_in_dom_order (loop);
615 basic_block bb;
616 unsigned i;
617 gimple_stmt_iterator bsi;
618 gimple stmt;
619 tree lhs, rhs;
620 struct mem_ref_group *refs = NULL;
621
622 *no_other_refs = true;
623 *ref_count = 0;
624
625 /* Scan the loop body in order, so that the former references precede the
626 later ones. */
627 for (i = 0; i < loop->num_nodes; i++)
628 {
629 bb = body[i];
630 if (bb->loop_father != loop)
631 continue;
632
633 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
634 {
635 stmt = gsi_stmt (bsi);
636
637 if (gimple_code (stmt) != GIMPLE_ASSIGN)
638 {
639 if (gimple_vuse (stmt)
640 || (is_gimple_call (stmt)
641 && !(gimple_call_flags (stmt) & ECF_CONST)))
642 *no_other_refs = false;
643 continue;
644 }
645
646 lhs = gimple_assign_lhs (stmt);
647 rhs = gimple_assign_rhs1 (stmt);
648
649 if (REFERENCE_CLASS_P (rhs))
650 {
651 *no_other_refs &= gather_memory_references_ref (loop, &refs,
652 rhs, false, stmt);
653 *ref_count += 1;
654 }
655 if (REFERENCE_CLASS_P (lhs))
656 {
657 *no_other_refs &= gather_memory_references_ref (loop, &refs,
658 lhs, true, stmt);
659 *ref_count += 1;
660 }
661 }
662 }
663 free (body);
664
665 return refs;
666 }
667
668 /* Prune the prefetch candidate REF using the self-reuse. */
669
670 static void
671 prune_ref_by_self_reuse (struct mem_ref *ref)
672 {
673 HOST_WIDE_INT step;
674 bool backward;
675
676 /* If the step size is non constant, we cannot calculate prefetch_mod. */
677 if (!cst_and_fits_in_hwi (ref->group->step))
678 return;
679
680 step = int_cst_value (ref->group->step);
681
682 backward = step < 0;
683
684 if (step == 0)
685 {
686 /* Prefetch references to invariant address just once. */
687 ref->prefetch_before = 1;
688 return;
689 }
690
691 if (backward)
692 step = -step;
693
694 if (step > PREFETCH_BLOCK)
695 return;
696
697 if ((backward && HAVE_BACKWARD_PREFETCH)
698 || (!backward && HAVE_FORWARD_PREFETCH))
699 {
700 ref->prefetch_before = 1;
701 return;
702 }
703
704 ref->prefetch_mod = PREFETCH_BLOCK / step;
705 }
706
707 /* Divides X by BY, rounding down. */
708
709 static HOST_WIDE_INT
710 ddown (HOST_WIDE_INT x, unsigned HOST_WIDE_INT by)
711 {
712 gcc_assert (by > 0);
713
714 if (x >= 0)
715 return x / by;
716 else
717 return (x + by - 1) / by;
718 }
719
720 /* Given a CACHE_LINE_SIZE and two inductive memory references
721 with a common STEP greater than CACHE_LINE_SIZE and an address
722 difference DELTA, compute the probability that they will fall
723 in different cache lines. Return true if the computed miss rate
724 is not greater than the ACCEPTABLE_MISS_RATE. DISTINCT_ITERS is the
725 number of distinct iterations after which the pattern repeats itself.
726 ALIGN_UNIT is the unit of alignment in bytes. */
727
728 static bool
729 is_miss_rate_acceptable (unsigned HOST_WIDE_INT cache_line_size,
730 HOST_WIDE_INT step, HOST_WIDE_INT delta,
731 unsigned HOST_WIDE_INT distinct_iters,
732 int align_unit)
733 {
734 unsigned align, iter;
735 int total_positions, miss_positions, max_allowed_miss_positions;
736 int address1, address2, cache_line1, cache_line2;
737
738 /* It always misses if delta is greater than or equal to the cache
739 line size. */
740 if (delta >= (HOST_WIDE_INT) cache_line_size)
741 return false;
742
743 miss_positions = 0;
744 total_positions = (cache_line_size / align_unit) * distinct_iters;
745 max_allowed_miss_positions = (ACCEPTABLE_MISS_RATE * total_positions) / 1000;
746
747 /* Iterate through all possible alignments of the first
748 memory reference within its cache line. */
749 for (align = 0; align < cache_line_size; align += align_unit)
750
751 /* Iterate through all distinct iterations. */
752 for (iter = 0; iter < distinct_iters; iter++)
753 {
754 address1 = align + step * iter;
755 address2 = address1 + delta;
756 cache_line1 = address1 / cache_line_size;
757 cache_line2 = address2 / cache_line_size;
758 if (cache_line1 != cache_line2)
759 {
760 miss_positions += 1;
761 if (miss_positions > max_allowed_miss_positions)
762 return false;
763 }
764 }
765 return true;
766 }
767
768 /* Prune the prefetch candidate REF using the reuse with BY.
769 If BY_IS_BEFORE is true, BY is before REF in the loop. */
770
771 static void
772 prune_ref_by_group_reuse (struct mem_ref *ref, struct mem_ref *by,
773 bool by_is_before)
774 {
775 HOST_WIDE_INT step;
776 bool backward;
777 HOST_WIDE_INT delta_r = ref->delta, delta_b = by->delta;
778 HOST_WIDE_INT delta = delta_b - delta_r;
779 HOST_WIDE_INT hit_from;
780 unsigned HOST_WIDE_INT prefetch_before, prefetch_block;
781 HOST_WIDE_INT reduced_step;
782 unsigned HOST_WIDE_INT reduced_prefetch_block;
783 tree ref_type;
784 int align_unit;
785
786 /* If the step is non constant we cannot calculate prefetch_before. */
787 if (!cst_and_fits_in_hwi (ref->group->step)) {
788 return;
789 }
790
791 step = int_cst_value (ref->group->step);
792
793 backward = step < 0;
794
795
796 if (delta == 0)
797 {
798 /* If the references has the same address, only prefetch the
799 former. */
800 if (by_is_before)
801 ref->prefetch_before = 0;
802
803 return;
804 }
805
806 if (!step)
807 {
808 /* If the reference addresses are invariant and fall into the
809 same cache line, prefetch just the first one. */
810 if (!by_is_before)
811 return;
812
813 if (ddown (ref->delta, PREFETCH_BLOCK)
814 != ddown (by->delta, PREFETCH_BLOCK))
815 return;
816
817 ref->prefetch_before = 0;
818 return;
819 }
820
821 /* Only prune the reference that is behind in the array. */
822 if (backward)
823 {
824 if (delta > 0)
825 return;
826
827 /* Transform the data so that we may assume that the accesses
828 are forward. */
829 delta = - delta;
830 step = -step;
831 delta_r = PREFETCH_BLOCK - 1 - delta_r;
832 delta_b = PREFETCH_BLOCK - 1 - delta_b;
833 }
834 else
835 {
836 if (delta < 0)
837 return;
838 }
839
840 /* Check whether the two references are likely to hit the same cache
841 line, and how distant the iterations in that it occurs are from
842 each other. */
843
844 if (step <= PREFETCH_BLOCK)
845 {
846 /* The accesses are sure to meet. Let us check when. */
847 hit_from = ddown (delta_b, PREFETCH_BLOCK) * PREFETCH_BLOCK;
848 prefetch_before = (hit_from - delta_r + step - 1) / step;
849
850 /* Do not reduce prefetch_before if we meet beyond cache size. */
851 if (prefetch_before > absu_hwi (L2_CACHE_SIZE_BYTES / step))
852 prefetch_before = PREFETCH_ALL;
853 if (prefetch_before < ref->prefetch_before)
854 ref->prefetch_before = prefetch_before;
855
856 return;
857 }
858
859 /* A more complicated case with step > prefetch_block. First reduce
860 the ratio between the step and the cache line size to its simplest
861 terms. The resulting denominator will then represent the number of
862 distinct iterations after which each address will go back to its
863 initial location within the cache line. This computation assumes
864 that PREFETCH_BLOCK is a power of two. */
865 prefetch_block = PREFETCH_BLOCK;
866 reduced_prefetch_block = prefetch_block;
867 reduced_step = step;
868 while ((reduced_step & 1) == 0
869 && reduced_prefetch_block > 1)
870 {
871 reduced_step >>= 1;
872 reduced_prefetch_block >>= 1;
873 }
874
875 prefetch_before = delta / step;
876 delta %= step;
877 ref_type = TREE_TYPE (ref->mem);
878 align_unit = TYPE_ALIGN (ref_type) / 8;
879 if (is_miss_rate_acceptable (prefetch_block, step, delta,
880 reduced_prefetch_block, align_unit))
881 {
882 /* Do not reduce prefetch_before if we meet beyond cache size. */
883 if (prefetch_before > L2_CACHE_SIZE_BYTES / PREFETCH_BLOCK)
884 prefetch_before = PREFETCH_ALL;
885 if (prefetch_before < ref->prefetch_before)
886 ref->prefetch_before = prefetch_before;
887
888 return;
889 }
890
891 /* Try also the following iteration. */
892 prefetch_before++;
893 delta = step - delta;
894 if (is_miss_rate_acceptable (prefetch_block, step, delta,
895 reduced_prefetch_block, align_unit))
896 {
897 if (prefetch_before < ref->prefetch_before)
898 ref->prefetch_before = prefetch_before;
899
900 return;
901 }
902
903 /* The ref probably does not reuse by. */
904 return;
905 }
906
907 /* Prune the prefetch candidate REF using the reuses with other references
908 in REFS. */
909
910 static void
911 prune_ref_by_reuse (struct mem_ref *ref, struct mem_ref *refs)
912 {
913 struct mem_ref *prune_by;
914 bool before = true;
915
916 prune_ref_by_self_reuse (ref);
917
918 for (prune_by = refs; prune_by; prune_by = prune_by->next)
919 {
920 if (prune_by == ref)
921 {
922 before = false;
923 continue;
924 }
925
926 if (!WRITE_CAN_USE_READ_PREFETCH
927 && ref->write_p
928 && !prune_by->write_p)
929 continue;
930 if (!READ_CAN_USE_WRITE_PREFETCH
931 && !ref->write_p
932 && prune_by->write_p)
933 continue;
934
935 prune_ref_by_group_reuse (ref, prune_by, before);
936 }
937 }
938
939 /* Prune the prefetch candidates in GROUP using the reuse analysis. */
940
941 static void
942 prune_group_by_reuse (struct mem_ref_group *group)
943 {
944 struct mem_ref *ref_pruned;
945
946 for (ref_pruned = group->refs; ref_pruned; ref_pruned = ref_pruned->next)
947 {
948 prune_ref_by_reuse (ref_pruned, group->refs);
949
950 if (dump_file && (dump_flags & TDF_DETAILS))
951 {
952 fprintf (dump_file, "Reference %p:", (void *) ref_pruned);
953
954 if (ref_pruned->prefetch_before == PREFETCH_ALL
955 && ref_pruned->prefetch_mod == 1)
956 fprintf (dump_file, " no restrictions");
957 else if (ref_pruned->prefetch_before == 0)
958 fprintf (dump_file, " do not prefetch");
959 else if (ref_pruned->prefetch_before <= ref_pruned->prefetch_mod)
960 fprintf (dump_file, " prefetch once");
961 else
962 {
963 if (ref_pruned->prefetch_before != PREFETCH_ALL)
964 {
965 fprintf (dump_file, " prefetch before ");
966 fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC,
967 ref_pruned->prefetch_before);
968 }
969 if (ref_pruned->prefetch_mod != 1)
970 {
971 fprintf (dump_file, " prefetch mod ");
972 fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC,
973 ref_pruned->prefetch_mod);
974 }
975 }
976 fprintf (dump_file, "\n");
977 }
978 }
979 }
980
981 /* Prune the list of prefetch candidates GROUPS using the reuse analysis. */
982
983 static void
984 prune_by_reuse (struct mem_ref_group *groups)
985 {
986 for (; groups; groups = groups->next)
987 prune_group_by_reuse (groups);
988 }
989
990 /* Returns true if we should issue prefetch for REF. */
991
992 static bool
993 should_issue_prefetch_p (struct mem_ref *ref)
994 {
995 /* For now do not issue prefetches for only first few of the
996 iterations. */
997 if (ref->prefetch_before != PREFETCH_ALL)
998 {
999 if (dump_file && (dump_flags & TDF_DETAILS))
1000 fprintf (dump_file, "Ignoring %p due to prefetch_before\n",
1001 (void *) ref);
1002 return false;
1003 }
1004
1005 /* Do not prefetch nontemporal stores. */
1006 if (ref->storent_p)
1007 {
1008 if (dump_file && (dump_flags & TDF_DETAILS))
1009 fprintf (dump_file, "Ignoring nontemporal store %p\n", (void *) ref);
1010 return false;
1011 }
1012
1013 return true;
1014 }
1015
1016 /* Decide which of the prefetch candidates in GROUPS to prefetch.
1017 AHEAD is the number of iterations to prefetch ahead (which corresponds
1018 to the number of simultaneous instances of one prefetch running at a
1019 time). UNROLL_FACTOR is the factor by that the loop is going to be
1020 unrolled. Returns true if there is anything to prefetch. */
1021
1022 static bool
1023 schedule_prefetches (struct mem_ref_group *groups, unsigned unroll_factor,
1024 unsigned ahead)
1025 {
1026 unsigned remaining_prefetch_slots, n_prefetches, prefetch_slots;
1027 unsigned slots_per_prefetch;
1028 struct mem_ref *ref;
1029 bool any = false;
1030
1031 /* At most SIMULTANEOUS_PREFETCHES should be running at the same time. */
1032 remaining_prefetch_slots = SIMULTANEOUS_PREFETCHES;
1033
1034 /* The prefetch will run for AHEAD iterations of the original loop, i.e.,
1035 AHEAD / UNROLL_FACTOR iterations of the unrolled loop. In each iteration,
1036 it will need a prefetch slot. */
1037 slots_per_prefetch = (ahead + unroll_factor / 2) / unroll_factor;
1038 if (dump_file && (dump_flags & TDF_DETAILS))
1039 fprintf (dump_file, "Each prefetch instruction takes %u prefetch slots.\n",
1040 slots_per_prefetch);
1041
1042 /* For now we just take memory references one by one and issue
1043 prefetches for as many as possible. The groups are sorted
1044 starting with the largest step, since the references with
1045 large step are more likely to cause many cache misses. */
1046
1047 for (; groups; groups = groups->next)
1048 for (ref = groups->refs; ref; ref = ref->next)
1049 {
1050 if (!should_issue_prefetch_p (ref))
1051 continue;
1052
1053 /* The loop is far from being sufficiently unrolled for this
1054 prefetch. Do not generate prefetch to avoid many redudant
1055 prefetches. */
1056 if (ref->prefetch_mod / unroll_factor > PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO)
1057 continue;
1058
1059 /* If we need to prefetch the reference each PREFETCH_MOD iterations,
1060 and we unroll the loop UNROLL_FACTOR times, we need to insert
1061 ceil (UNROLL_FACTOR / PREFETCH_MOD) instructions in each
1062 iteration. */
1063 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
1064 / ref->prefetch_mod);
1065 prefetch_slots = n_prefetches * slots_per_prefetch;
1066
1067 /* If more than half of the prefetches would be lost anyway, do not
1068 issue the prefetch. */
1069 if (2 * remaining_prefetch_slots < prefetch_slots)
1070 continue;
1071
1072 ref->issue_prefetch_p = true;
1073
1074 if (remaining_prefetch_slots <= prefetch_slots)
1075 return true;
1076 remaining_prefetch_slots -= prefetch_slots;
1077 any = true;
1078 }
1079
1080 return any;
1081 }
1082
1083 /* Return TRUE if no prefetch is going to be generated in the given
1084 GROUPS. */
1085
1086 static bool
1087 nothing_to_prefetch_p (struct mem_ref_group *groups)
1088 {
1089 struct mem_ref *ref;
1090
1091 for (; groups; groups = groups->next)
1092 for (ref = groups->refs; ref; ref = ref->next)
1093 if (should_issue_prefetch_p (ref))
1094 return false;
1095
1096 return true;
1097 }
1098
1099 /* Estimate the number of prefetches in the given GROUPS.
1100 UNROLL_FACTOR is the factor by which LOOP was unrolled. */
1101
1102 static int
1103 estimate_prefetch_count (struct mem_ref_group *groups, unsigned unroll_factor)
1104 {
1105 struct mem_ref *ref;
1106 unsigned n_prefetches;
1107 int prefetch_count = 0;
1108
1109 for (; groups; groups = groups->next)
1110 for (ref = groups->refs; ref; ref = ref->next)
1111 if (should_issue_prefetch_p (ref))
1112 {
1113 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
1114 / ref->prefetch_mod);
1115 prefetch_count += n_prefetches;
1116 }
1117
1118 return prefetch_count;
1119 }
1120
1121 /* Issue prefetches for the reference REF into loop as decided before.
1122 HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR
1123 is the factor by which LOOP was unrolled. */
1124
1125 static void
1126 issue_prefetch_ref (struct mem_ref *ref, unsigned unroll_factor, unsigned ahead)
1127 {
1128 HOST_WIDE_INT delta;
1129 tree addr, addr_base, write_p, local, forward;
1130 gimple prefetch;
1131 gimple_stmt_iterator bsi;
1132 unsigned n_prefetches, ap;
1133 bool nontemporal = ref->reuse_distance >= L2_CACHE_SIZE_BYTES;
1134
1135 if (dump_file && (dump_flags & TDF_DETAILS))
1136 fprintf (dump_file, "Issued%s prefetch for %p.\n",
1137 nontemporal ? " nontemporal" : "",
1138 (void *) ref);
1139
1140 bsi = gsi_for_stmt (ref->stmt);
1141
1142 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
1143 / ref->prefetch_mod);
1144 addr_base = build_fold_addr_expr_with_type (ref->mem, ptr_type_node);
1145 addr_base = force_gimple_operand_gsi (&bsi, unshare_expr (addr_base),
1146 true, NULL, true, GSI_SAME_STMT);
1147 write_p = ref->write_p ? integer_one_node : integer_zero_node;
1148 local = nontemporal ? integer_zero_node : integer_three_node;
1149
1150 for (ap = 0; ap < n_prefetches; ap++)
1151 {
1152 if (cst_and_fits_in_hwi (ref->group->step))
1153 {
1154 /* Determine the address to prefetch. */
1155 delta = (ahead + ap * ref->prefetch_mod) *
1156 int_cst_value (ref->group->step);
1157 addr = fold_build_pointer_plus_hwi (addr_base, delta);
1158 addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true, NULL,
1159 true, GSI_SAME_STMT);
1160 }
1161 else
1162 {
1163 /* The step size is non-constant but loop-invariant. We use the
1164 heuristic to simply prefetch ahead iterations ahead. */
1165 forward = fold_build2 (MULT_EXPR, sizetype,
1166 fold_convert (sizetype, ref->group->step),
1167 fold_convert (sizetype, size_int (ahead)));
1168 addr = fold_build_pointer_plus (addr_base, forward);
1169 addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true,
1170 NULL, true, GSI_SAME_STMT);
1171 }
1172 /* Create the prefetch instruction. */
1173 prefetch = gimple_build_call (builtin_decl_explicit (BUILT_IN_PREFETCH),
1174 3, addr, write_p, local);
1175 gsi_insert_before (&bsi, prefetch, GSI_SAME_STMT);
1176 }
1177 }
1178
1179 /* Issue prefetches for the references in GROUPS into loop as decided before.
1180 HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR is the
1181 factor by that LOOP was unrolled. */
1182
1183 static void
1184 issue_prefetches (struct mem_ref_group *groups,
1185 unsigned unroll_factor, unsigned ahead)
1186 {
1187 struct mem_ref *ref;
1188
1189 for (; groups; groups = groups->next)
1190 for (ref = groups->refs; ref; ref = ref->next)
1191 if (ref->issue_prefetch_p)
1192 issue_prefetch_ref (ref, unroll_factor, ahead);
1193 }
1194
1195 /* Returns true if REF is a memory write for that a nontemporal store insn
1196 can be used. */
1197
1198 static bool
1199 nontemporal_store_p (struct mem_ref *ref)
1200 {
1201 enum machine_mode mode;
1202 enum insn_code code;
1203
1204 /* REF must be a write that is not reused. We require it to be independent
1205 on all other memory references in the loop, as the nontemporal stores may
1206 be reordered with respect to other memory references. */
1207 if (!ref->write_p
1208 || !ref->independent_p
1209 || ref->reuse_distance < L2_CACHE_SIZE_BYTES)
1210 return false;
1211
1212 /* Check that we have the storent instruction for the mode. */
1213 mode = TYPE_MODE (TREE_TYPE (ref->mem));
1214 if (mode == BLKmode)
1215 return false;
1216
1217 code = optab_handler (storent_optab, mode);
1218 return code != CODE_FOR_nothing;
1219 }
1220
1221 /* If REF is a nontemporal store, we mark the corresponding modify statement
1222 and return true. Otherwise, we return false. */
1223
1224 static bool
1225 mark_nontemporal_store (struct mem_ref *ref)
1226 {
1227 if (!nontemporal_store_p (ref))
1228 return false;
1229
1230 if (dump_file && (dump_flags & TDF_DETAILS))
1231 fprintf (dump_file, "Marked reference %p as a nontemporal store.\n",
1232 (void *) ref);
1233
1234 gimple_assign_set_nontemporal_move (ref->stmt, true);
1235 ref->storent_p = true;
1236
1237 return true;
1238 }
1239
1240 /* Issue a memory fence instruction after LOOP. */
1241
1242 static void
1243 emit_mfence_after_loop (struct loop *loop)
1244 {
1245 vec<edge> exits = get_loop_exit_edges (loop);
1246 edge exit;
1247 gimple call;
1248 gimple_stmt_iterator bsi;
1249 unsigned i;
1250
1251 FOR_EACH_VEC_ELT (exits, i, exit)
1252 {
1253 call = gimple_build_call (FENCE_FOLLOWING_MOVNT, 0);
1254
1255 if (!single_pred_p (exit->dest)
1256 /* If possible, we prefer not to insert the fence on other paths
1257 in cfg. */
1258 && !(exit->flags & EDGE_ABNORMAL))
1259 split_loop_exit_edge (exit);
1260 bsi = gsi_after_labels (exit->dest);
1261
1262 gsi_insert_before (&bsi, call, GSI_NEW_STMT);
1263 }
1264
1265 exits.release ();
1266 update_ssa (TODO_update_ssa_only_virtuals);
1267 }
1268
1269 /* Returns true if we can use storent in loop, false otherwise. */
1270
1271 static bool
1272 may_use_storent_in_loop_p (struct loop *loop)
1273 {
1274 bool ret = true;
1275
1276 if (loop->inner != NULL)
1277 return false;
1278
1279 /* If we must issue a mfence insn after using storent, check that there
1280 is a suitable place for it at each of the loop exits. */
1281 if (FENCE_FOLLOWING_MOVNT != NULL_TREE)
1282 {
1283 vec<edge> exits = get_loop_exit_edges (loop);
1284 unsigned i;
1285 edge exit;
1286
1287 FOR_EACH_VEC_ELT (exits, i, exit)
1288 if ((exit->flags & EDGE_ABNORMAL)
1289 && exit->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
1290 ret = false;
1291
1292 exits.release ();
1293 }
1294
1295 return ret;
1296 }
1297
1298 /* Marks nontemporal stores in LOOP. GROUPS contains the description of memory
1299 references in the loop. */
1300
1301 static void
1302 mark_nontemporal_stores (struct loop *loop, struct mem_ref_group *groups)
1303 {
1304 struct mem_ref *ref;
1305 bool any = false;
1306
1307 if (!may_use_storent_in_loop_p (loop))
1308 return;
1309
1310 for (; groups; groups = groups->next)
1311 for (ref = groups->refs; ref; ref = ref->next)
1312 any |= mark_nontemporal_store (ref);
1313
1314 if (any && FENCE_FOLLOWING_MOVNT != NULL_TREE)
1315 emit_mfence_after_loop (loop);
1316 }
1317
1318 /* Determines whether we can profitably unroll LOOP FACTOR times, and if
1319 this is the case, fill in DESC by the description of number of
1320 iterations. */
1321
1322 static bool
1323 should_unroll_loop_p (struct loop *loop, struct tree_niter_desc *desc,
1324 unsigned factor)
1325 {
1326 if (!can_unroll_loop_p (loop, factor, desc))
1327 return false;
1328
1329 /* We only consider loops without control flow for unrolling. This is not
1330 a hard restriction -- tree_unroll_loop works with arbitrary loops
1331 as well; but the unrolling/prefetching is usually more profitable for
1332 loops consisting of a single basic block, and we want to limit the
1333 code growth. */
1334 if (loop->num_nodes > 2)
1335 return false;
1336
1337 return true;
1338 }
1339
1340 /* Determine the coefficient by that unroll LOOP, from the information
1341 contained in the list of memory references REFS. Description of
1342 umber of iterations of LOOP is stored to DESC. NINSNS is the number of
1343 insns of the LOOP. EST_NITER is the estimated number of iterations of
1344 the loop, or -1 if no estimate is available. */
1345
1346 static unsigned
1347 determine_unroll_factor (struct loop *loop, struct mem_ref_group *refs,
1348 unsigned ninsns, struct tree_niter_desc *desc,
1349 HOST_WIDE_INT est_niter)
1350 {
1351 unsigned upper_bound;
1352 unsigned nfactor, factor, mod_constraint;
1353 struct mem_ref_group *agp;
1354 struct mem_ref *ref;
1355
1356 /* First check whether the loop is not too large to unroll. We ignore
1357 PARAM_MAX_UNROLL_TIMES, because for small loops, it prevented us
1358 from unrolling them enough to make exactly one cache line covered by each
1359 iteration. Also, the goal of PARAM_MAX_UNROLL_TIMES is to prevent
1360 us from unrolling the loops too many times in cases where we only expect
1361 gains from better scheduling and decreasing loop overhead, which is not
1362 the case here. */
1363 upper_bound = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / ninsns;
1364
1365 /* If we unrolled the loop more times than it iterates, the unrolled version
1366 of the loop would be never entered. */
1367 if (est_niter >= 0 && est_niter < (HOST_WIDE_INT) upper_bound)
1368 upper_bound = est_niter;
1369
1370 if (upper_bound <= 1)
1371 return 1;
1372
1373 /* Choose the factor so that we may prefetch each cache just once,
1374 but bound the unrolling by UPPER_BOUND. */
1375 factor = 1;
1376 for (agp = refs; agp; agp = agp->next)
1377 for (ref = agp->refs; ref; ref = ref->next)
1378 if (should_issue_prefetch_p (ref))
1379 {
1380 mod_constraint = ref->prefetch_mod;
1381 nfactor = least_common_multiple (mod_constraint, factor);
1382 if (nfactor <= upper_bound)
1383 factor = nfactor;
1384 }
1385
1386 if (!should_unroll_loop_p (loop, desc, factor))
1387 return 1;
1388
1389 return factor;
1390 }
1391
1392 /* Returns the total volume of the memory references REFS, taking into account
1393 reuses in the innermost loop and cache line size. TODO -- we should also
1394 take into account reuses across the iterations of the loops in the loop
1395 nest. */
1396
1397 static unsigned
1398 volume_of_references (struct mem_ref_group *refs)
1399 {
1400 unsigned volume = 0;
1401 struct mem_ref_group *gr;
1402 struct mem_ref *ref;
1403
1404 for (gr = refs; gr; gr = gr->next)
1405 for (ref = gr->refs; ref; ref = ref->next)
1406 {
1407 /* Almost always reuses another value? */
1408 if (ref->prefetch_before != PREFETCH_ALL)
1409 continue;
1410
1411 /* If several iterations access the same cache line, use the size of
1412 the line divided by this number. Otherwise, a cache line is
1413 accessed in each iteration. TODO -- in the latter case, we should
1414 take the size of the reference into account, rounding it up on cache
1415 line size multiple. */
1416 volume += L1_CACHE_LINE_SIZE / ref->prefetch_mod;
1417 }
1418 return volume;
1419 }
1420
1421 /* Returns the volume of memory references accessed across VEC iterations of
1422 loops, whose sizes are described in the LOOP_SIZES array. N is the number
1423 of the loops in the nest (length of VEC and LOOP_SIZES vectors). */
1424
1425 static unsigned
1426 volume_of_dist_vector (lambda_vector vec, unsigned *loop_sizes, unsigned n)
1427 {
1428 unsigned i;
1429
1430 for (i = 0; i < n; i++)
1431 if (vec[i] != 0)
1432 break;
1433
1434 if (i == n)
1435 return 0;
1436
1437 gcc_assert (vec[i] > 0);
1438
1439 /* We ignore the parts of the distance vector in subloops, since usually
1440 the numbers of iterations are much smaller. */
1441 return loop_sizes[i] * vec[i];
1442 }
1443
1444 /* Add the steps of ACCESS_FN multiplied by STRIDE to the array STRIDE
1445 at the position corresponding to the loop of the step. N is the depth
1446 of the considered loop nest, and, LOOP is its innermost loop. */
1447
1448 static void
1449 add_subscript_strides (tree access_fn, unsigned stride,
1450 HOST_WIDE_INT *strides, unsigned n, struct loop *loop)
1451 {
1452 struct loop *aloop;
1453 tree step;
1454 HOST_WIDE_INT astep;
1455 unsigned min_depth = loop_depth (loop) - n;
1456
1457 while (TREE_CODE (access_fn) == POLYNOMIAL_CHREC)
1458 {
1459 aloop = get_chrec_loop (access_fn);
1460 step = CHREC_RIGHT (access_fn);
1461 access_fn = CHREC_LEFT (access_fn);
1462
1463 if ((unsigned) loop_depth (aloop) <= min_depth)
1464 continue;
1465
1466 if (tree_fits_shwi_p (step))
1467 astep = tree_to_shwi (step);
1468 else
1469 astep = L1_CACHE_LINE_SIZE;
1470
1471 strides[n - 1 - loop_depth (loop) + loop_depth (aloop)] += astep * stride;
1472
1473 }
1474 }
1475
1476 /* Returns the volume of memory references accessed between two consecutive
1477 self-reuses of the reference DR. We consider the subscripts of DR in N
1478 loops, and LOOP_SIZES contains the volumes of accesses in each of the
1479 loops. LOOP is the innermost loop of the current loop nest. */
1480
1481 static unsigned
1482 self_reuse_distance (data_reference_p dr, unsigned *loop_sizes, unsigned n,
1483 struct loop *loop)
1484 {
1485 tree stride, access_fn;
1486 HOST_WIDE_INT *strides, astride;
1487 vec<tree> access_fns;
1488 tree ref = DR_REF (dr);
1489 unsigned i, ret = ~0u;
1490
1491 /* In the following example:
1492
1493 for (i = 0; i < N; i++)
1494 for (j = 0; j < N; j++)
1495 use (a[j][i]);
1496 the same cache line is accessed each N steps (except if the change from
1497 i to i + 1 crosses the boundary of the cache line). Thus, for self-reuse,
1498 we cannot rely purely on the results of the data dependence analysis.
1499
1500 Instead, we compute the stride of the reference in each loop, and consider
1501 the innermost loop in that the stride is less than cache size. */
1502
1503 strides = XCNEWVEC (HOST_WIDE_INT, n);
1504 access_fns = DR_ACCESS_FNS (dr);
1505
1506 FOR_EACH_VEC_ELT (access_fns, i, access_fn)
1507 {
1508 /* Keep track of the reference corresponding to the subscript, so that we
1509 know its stride. */
1510 while (handled_component_p (ref) && TREE_CODE (ref) != ARRAY_REF)
1511 ref = TREE_OPERAND (ref, 0);
1512
1513 if (TREE_CODE (ref) == ARRAY_REF)
1514 {
1515 stride = TYPE_SIZE_UNIT (TREE_TYPE (ref));
1516 if (tree_fits_uhwi_p (stride))
1517 astride = tree_to_uhwi (stride);
1518 else
1519 astride = L1_CACHE_LINE_SIZE;
1520
1521 ref = TREE_OPERAND (ref, 0);
1522 }
1523 else
1524 astride = 1;
1525
1526 add_subscript_strides (access_fn, astride, strides, n, loop);
1527 }
1528
1529 for (i = n; i-- > 0; )
1530 {
1531 unsigned HOST_WIDE_INT s;
1532
1533 s = strides[i] < 0 ? -strides[i] : strides[i];
1534
1535 if (s < (unsigned) L1_CACHE_LINE_SIZE
1536 && (loop_sizes[i]
1537 > (unsigned) (L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION)))
1538 {
1539 ret = loop_sizes[i];
1540 break;
1541 }
1542 }
1543
1544 free (strides);
1545 return ret;
1546 }
1547
1548 /* Determines the distance till the first reuse of each reference in REFS
1549 in the loop nest of LOOP. NO_OTHER_REFS is true if there are no other
1550 memory references in the loop. Return false if the analysis fails. */
1551
1552 static bool
1553 determine_loop_nest_reuse (struct loop *loop, struct mem_ref_group *refs,
1554 bool no_other_refs)
1555 {
1556 struct loop *nest, *aloop;
1557 vec<data_reference_p> datarefs = vNULL;
1558 vec<ddr_p> dependences = vNULL;
1559 struct mem_ref_group *gr;
1560 struct mem_ref *ref, *refb;
1561 vec<loop_p> vloops = vNULL;
1562 unsigned *loop_data_size;
1563 unsigned i, j, n;
1564 unsigned volume, dist, adist;
1565 HOST_WIDE_INT vol;
1566 data_reference_p dr;
1567 ddr_p dep;
1568
1569 if (loop->inner)
1570 return true;
1571
1572 /* Find the outermost loop of the loop nest of loop (we require that
1573 there are no sibling loops inside the nest). */
1574 nest = loop;
1575 while (1)
1576 {
1577 aloop = loop_outer (nest);
1578
1579 if (aloop == current_loops->tree_root
1580 || aloop->inner->next)
1581 break;
1582
1583 nest = aloop;
1584 }
1585
1586 /* For each loop, determine the amount of data accessed in each iteration.
1587 We use this to estimate whether the reference is evicted from the
1588 cache before its reuse. */
1589 find_loop_nest (nest, &vloops);
1590 n = vloops.length ();
1591 loop_data_size = XNEWVEC (unsigned, n);
1592 volume = volume_of_references (refs);
1593 i = n;
1594 while (i-- != 0)
1595 {
1596 loop_data_size[i] = volume;
1597 /* Bound the volume by the L2 cache size, since above this bound,
1598 all dependence distances are equivalent. */
1599 if (volume > L2_CACHE_SIZE_BYTES)
1600 continue;
1601
1602 aloop = vloops[i];
1603 vol = estimated_stmt_executions_int (aloop);
1604 if (vol == -1)
1605 vol = expected_loop_iterations (aloop);
1606 volume *= vol;
1607 }
1608
1609 /* Prepare the references in the form suitable for data dependence
1610 analysis. We ignore unanalyzable data references (the results
1611 are used just as a heuristics to estimate temporality of the
1612 references, hence we do not need to worry about correctness). */
1613 for (gr = refs; gr; gr = gr->next)
1614 for (ref = gr->refs; ref; ref = ref->next)
1615 {
1616 dr = create_data_ref (nest, loop_containing_stmt (ref->stmt),
1617 ref->mem, ref->stmt, !ref->write_p);
1618
1619 if (dr)
1620 {
1621 ref->reuse_distance = volume;
1622 dr->aux = ref;
1623 datarefs.safe_push (dr);
1624 }
1625 else
1626 no_other_refs = false;
1627 }
1628
1629 FOR_EACH_VEC_ELT (datarefs, i, dr)
1630 {
1631 dist = self_reuse_distance (dr, loop_data_size, n, loop);
1632 ref = (struct mem_ref *) dr->aux;
1633 if (ref->reuse_distance > dist)
1634 ref->reuse_distance = dist;
1635
1636 if (no_other_refs)
1637 ref->independent_p = true;
1638 }
1639
1640 if (!compute_all_dependences (datarefs, &dependences, vloops, true))
1641 return false;
1642
1643 FOR_EACH_VEC_ELT (dependences, i, dep)
1644 {
1645 if (DDR_ARE_DEPENDENT (dep) == chrec_known)
1646 continue;
1647
1648 ref = (struct mem_ref *) DDR_A (dep)->aux;
1649 refb = (struct mem_ref *) DDR_B (dep)->aux;
1650
1651 if (DDR_ARE_DEPENDENT (dep) == chrec_dont_know
1652 || DDR_NUM_DIST_VECTS (dep) == 0)
1653 {
1654 /* If the dependence cannot be analyzed, assume that there might be
1655 a reuse. */
1656 dist = 0;
1657
1658 ref->independent_p = false;
1659 refb->independent_p = false;
1660 }
1661 else
1662 {
1663 /* The distance vectors are normalized to be always lexicographically
1664 positive, hence we cannot tell just from them whether DDR_A comes
1665 before DDR_B or vice versa. However, it is not important,
1666 anyway -- if DDR_A is close to DDR_B, then it is either reused in
1667 DDR_B (and it is not nontemporal), or it reuses the value of DDR_B
1668 in cache (and marking it as nontemporal would not affect
1669 anything). */
1670
1671 dist = volume;
1672 for (j = 0; j < DDR_NUM_DIST_VECTS (dep); j++)
1673 {
1674 adist = volume_of_dist_vector (DDR_DIST_VECT (dep, j),
1675 loop_data_size, n);
1676
1677 /* If this is a dependence in the innermost loop (i.e., the
1678 distances in all superloops are zero) and it is not
1679 the trivial self-dependence with distance zero, record that
1680 the references are not completely independent. */
1681 if (lambda_vector_zerop (DDR_DIST_VECT (dep, j), n - 1)
1682 && (ref != refb
1683 || DDR_DIST_VECT (dep, j)[n-1] != 0))
1684 {
1685 ref->independent_p = false;
1686 refb->independent_p = false;
1687 }
1688
1689 /* Ignore accesses closer than
1690 L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
1691 so that we use nontemporal prefetches e.g. if single memory
1692 location is accessed several times in a single iteration of
1693 the loop. */
1694 if (adist < L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION)
1695 continue;
1696
1697 if (adist < dist)
1698 dist = adist;
1699 }
1700 }
1701
1702 if (ref->reuse_distance > dist)
1703 ref->reuse_distance = dist;
1704 if (refb->reuse_distance > dist)
1705 refb->reuse_distance = dist;
1706 }
1707
1708 free_dependence_relations (dependences);
1709 free_data_refs (datarefs);
1710 free (loop_data_size);
1711
1712 if (dump_file && (dump_flags & TDF_DETAILS))
1713 {
1714 fprintf (dump_file, "Reuse distances:\n");
1715 for (gr = refs; gr; gr = gr->next)
1716 for (ref = gr->refs; ref; ref = ref->next)
1717 fprintf (dump_file, " ref %p distance %u\n",
1718 (void *) ref, ref->reuse_distance);
1719 }
1720
1721 return true;
1722 }
1723
1724 /* Determine whether or not the trip count to ahead ratio is too small based
1725 on prefitablility consideration.
1726 AHEAD: the iteration ahead distance,
1727 EST_NITER: the estimated trip count. */
1728
1729 static bool
1730 trip_count_to_ahead_ratio_too_small_p (unsigned ahead, HOST_WIDE_INT est_niter)
1731 {
1732 /* Assume trip count to ahead ratio is big enough if the trip count could not
1733 be estimated at compile time. */
1734 if (est_niter < 0)
1735 return false;
1736
1737 if (est_niter < (HOST_WIDE_INT) (TRIP_COUNT_TO_AHEAD_RATIO * ahead))
1738 {
1739 if (dump_file && (dump_flags & TDF_DETAILS))
1740 fprintf (dump_file,
1741 "Not prefetching -- loop estimated to roll only %d times\n",
1742 (int) est_niter);
1743 return true;
1744 }
1745
1746 return false;
1747 }
1748
1749 /* Determine whether or not the number of memory references in the loop is
1750 reasonable based on the profitablity and compilation time considerations.
1751 NINSNS: estimated number of instructions in the loop,
1752 MEM_REF_COUNT: total number of memory references in the loop. */
1753
1754 static bool
1755 mem_ref_count_reasonable_p (unsigned ninsns, unsigned mem_ref_count)
1756 {
1757 int insn_to_mem_ratio;
1758
1759 if (mem_ref_count == 0)
1760 return false;
1761
1762 /* Miss rate computation (is_miss_rate_acceptable) and dependence analysis
1763 (compute_all_dependences) have high costs based on quadratic complexity.
1764 To avoid huge compilation time, we give up prefetching if mem_ref_count
1765 is too large. */
1766 if (mem_ref_count > PREFETCH_MAX_MEM_REFS_PER_LOOP)
1767 return false;
1768
1769 /* Prefetching improves performance by overlapping cache missing
1770 memory accesses with CPU operations. If the loop does not have
1771 enough CPU operations to overlap with memory operations, prefetching
1772 won't give a significant benefit. One approximate way of checking
1773 this is to require the ratio of instructions to memory references to
1774 be above a certain limit. This approximation works well in practice.
1775 TODO: Implement a more precise computation by estimating the time
1776 for each CPU or memory op in the loop. Time estimates for memory ops
1777 should account for cache misses. */
1778 insn_to_mem_ratio = ninsns / mem_ref_count;
1779
1780 if (insn_to_mem_ratio < PREFETCH_MIN_INSN_TO_MEM_RATIO)
1781 {
1782 if (dump_file && (dump_flags & TDF_DETAILS))
1783 fprintf (dump_file,
1784 "Not prefetching -- instruction to memory reference ratio (%d) too small\n",
1785 insn_to_mem_ratio);
1786 return false;
1787 }
1788
1789 return true;
1790 }
1791
1792 /* Determine whether or not the instruction to prefetch ratio in the loop is
1793 too small based on the profitablity consideration.
1794 NINSNS: estimated number of instructions in the loop,
1795 PREFETCH_COUNT: an estimate of the number of prefetches,
1796 UNROLL_FACTOR: the factor to unroll the loop if prefetching. */
1797
1798 static bool
1799 insn_to_prefetch_ratio_too_small_p (unsigned ninsns, unsigned prefetch_count,
1800 unsigned unroll_factor)
1801 {
1802 int insn_to_prefetch_ratio;
1803
1804 /* Prefetching most likely causes performance degradation when the instruction
1805 to prefetch ratio is too small. Too many prefetch instructions in a loop
1806 may reduce the I-cache performance.
1807 (unroll_factor * ninsns) is used to estimate the number of instructions in
1808 the unrolled loop. This implementation is a bit simplistic -- the number
1809 of issued prefetch instructions is also affected by unrolling. So,
1810 prefetch_mod and the unroll factor should be taken into account when
1811 determining prefetch_count. Also, the number of insns of the unrolled
1812 loop will usually be significantly smaller than the number of insns of the
1813 original loop * unroll_factor (at least the induction variable increases
1814 and the exit branches will get eliminated), so it might be better to use
1815 tree_estimate_loop_size + estimated_unrolled_size. */
1816 insn_to_prefetch_ratio = (unroll_factor * ninsns) / prefetch_count;
1817 if (insn_to_prefetch_ratio < MIN_INSN_TO_PREFETCH_RATIO)
1818 {
1819 if (dump_file && (dump_flags & TDF_DETAILS))
1820 fprintf (dump_file,
1821 "Not prefetching -- instruction to prefetch ratio (%d) too small\n",
1822 insn_to_prefetch_ratio);
1823 return true;
1824 }
1825
1826 return false;
1827 }
1828
1829
1830 /* Issue prefetch instructions for array references in LOOP. Returns
1831 true if the LOOP was unrolled. */
1832
1833 static bool
1834 loop_prefetch_arrays (struct loop *loop)
1835 {
1836 struct mem_ref_group *refs;
1837 unsigned ahead, ninsns, time, unroll_factor;
1838 HOST_WIDE_INT est_niter;
1839 struct tree_niter_desc desc;
1840 bool unrolled = false, no_other_refs;
1841 unsigned prefetch_count;
1842 unsigned mem_ref_count;
1843
1844 if (optimize_loop_nest_for_size_p (loop))
1845 {
1846 if (dump_file && (dump_flags & TDF_DETAILS))
1847 fprintf (dump_file, " ignored (cold area)\n");
1848 return false;
1849 }
1850
1851 /* FIXME: the time should be weighted by the probabilities of the blocks in
1852 the loop body. */
1853 time = tree_num_loop_insns (loop, &eni_time_weights);
1854 if (time == 0)
1855 return false;
1856
1857 ahead = (PREFETCH_LATENCY + time - 1) / time;
1858 est_niter = estimated_stmt_executions_int (loop);
1859 if (est_niter == -1)
1860 est_niter = max_stmt_executions_int (loop);
1861
1862 /* Prefetching is not likely to be profitable if the trip count to ahead
1863 ratio is too small. */
1864 if (trip_count_to_ahead_ratio_too_small_p (ahead, est_niter))
1865 return false;
1866
1867 ninsns = tree_num_loop_insns (loop, &eni_size_weights);
1868
1869 /* Step 1: gather the memory references. */
1870 refs = gather_memory_references (loop, &no_other_refs, &mem_ref_count);
1871
1872 /* Give up prefetching if the number of memory references in the
1873 loop is not reasonable based on profitablity and compilation time
1874 considerations. */
1875 if (!mem_ref_count_reasonable_p (ninsns, mem_ref_count))
1876 goto fail;
1877
1878 /* Step 2: estimate the reuse effects. */
1879 prune_by_reuse (refs);
1880
1881 if (nothing_to_prefetch_p (refs))
1882 goto fail;
1883
1884 if (!determine_loop_nest_reuse (loop, refs, no_other_refs))
1885 goto fail;
1886
1887 /* Step 3: determine unroll factor. */
1888 unroll_factor = determine_unroll_factor (loop, refs, ninsns, &desc,
1889 est_niter);
1890
1891 /* Estimate prefetch count for the unrolled loop. */
1892 prefetch_count = estimate_prefetch_count (refs, unroll_factor);
1893 if (prefetch_count == 0)
1894 goto fail;
1895
1896 if (dump_file && (dump_flags & TDF_DETAILS))
1897 fprintf (dump_file, "Ahead %d, unroll factor %d, trip count "
1898 HOST_WIDE_INT_PRINT_DEC "\n"
1899 "insn count %d, mem ref count %d, prefetch count %d\n",
1900 ahead, unroll_factor, est_niter,
1901 ninsns, mem_ref_count, prefetch_count);
1902
1903 /* Prefetching is not likely to be profitable if the instruction to prefetch
1904 ratio is too small. */
1905 if (insn_to_prefetch_ratio_too_small_p (ninsns, prefetch_count,
1906 unroll_factor))
1907 goto fail;
1908
1909 mark_nontemporal_stores (loop, refs);
1910
1911 /* Step 4: what to prefetch? */
1912 if (!schedule_prefetches (refs, unroll_factor, ahead))
1913 goto fail;
1914
1915 /* Step 5: unroll the loop. TODO -- peeling of first and last few
1916 iterations so that we do not issue superfluous prefetches. */
1917 if (unroll_factor != 1)
1918 {
1919 tree_unroll_loop (loop, unroll_factor,
1920 single_dom_exit (loop), &desc);
1921 unrolled = true;
1922 }
1923
1924 /* Step 6: issue the prefetches. */
1925 issue_prefetches (refs, unroll_factor, ahead);
1926
1927 fail:
1928 release_mem_refs (refs);
1929 return unrolled;
1930 }
1931
1932 /* Issue prefetch instructions for array references in loops. */
1933
1934 unsigned int
1935 tree_ssa_prefetch_arrays (void)
1936 {
1937 struct loop *loop;
1938 bool unrolled = false;
1939 int todo_flags = 0;
1940
1941 if (!HAVE_prefetch
1942 /* It is possible to ask compiler for say -mtune=i486 -march=pentium4.
1943 -mtune=i486 causes us having PREFETCH_BLOCK 0, since this is part
1944 of processor costs and i486 does not have prefetch, but
1945 -march=pentium4 causes HAVE_prefetch to be true. Ugh. */
1946 || PREFETCH_BLOCK == 0)
1947 return 0;
1948
1949 if (dump_file && (dump_flags & TDF_DETAILS))
1950 {
1951 fprintf (dump_file, "Prefetching parameters:\n");
1952 fprintf (dump_file, " simultaneous prefetches: %d\n",
1953 SIMULTANEOUS_PREFETCHES);
1954 fprintf (dump_file, " prefetch latency: %d\n", PREFETCH_LATENCY);
1955 fprintf (dump_file, " prefetch block size: %d\n", PREFETCH_BLOCK);
1956 fprintf (dump_file, " L1 cache size: %d lines, %d kB\n",
1957 L1_CACHE_SIZE_BYTES / L1_CACHE_LINE_SIZE, L1_CACHE_SIZE);
1958 fprintf (dump_file, " L1 cache line size: %d\n", L1_CACHE_LINE_SIZE);
1959 fprintf (dump_file, " L2 cache size: %d kB\n", L2_CACHE_SIZE);
1960 fprintf (dump_file, " min insn-to-prefetch ratio: %d \n",
1961 MIN_INSN_TO_PREFETCH_RATIO);
1962 fprintf (dump_file, " min insn-to-mem ratio: %d \n",
1963 PREFETCH_MIN_INSN_TO_MEM_RATIO);
1964 fprintf (dump_file, "\n");
1965 }
1966
1967 initialize_original_copy_tables ();
1968
1969 if (!builtin_decl_explicit_p (BUILT_IN_PREFETCH))
1970 {
1971 tree type = build_function_type_list (void_type_node,
1972 const_ptr_type_node, NULL_TREE);
1973 tree decl = add_builtin_function ("__builtin_prefetch", type,
1974 BUILT_IN_PREFETCH, BUILT_IN_NORMAL,
1975 NULL, NULL_TREE);
1976 DECL_IS_NOVOPS (decl) = true;
1977 set_builtin_decl (BUILT_IN_PREFETCH, decl, false);
1978 }
1979
1980 /* We assume that size of cache line is a power of two, so verify this
1981 here. */
1982 gcc_assert ((PREFETCH_BLOCK & (PREFETCH_BLOCK - 1)) == 0);
1983
1984 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
1985 {
1986 if (dump_file && (dump_flags & TDF_DETAILS))
1987 fprintf (dump_file, "Processing loop %d:\n", loop->num);
1988
1989 unrolled |= loop_prefetch_arrays (loop);
1990
1991 if (dump_file && (dump_flags & TDF_DETAILS))
1992 fprintf (dump_file, "\n\n");
1993 }
1994
1995 if (unrolled)
1996 {
1997 scev_reset ();
1998 todo_flags |= TODO_cleanup_cfg;
1999 }
2000
2001 free_original_copy_tables ();
2002 return todo_flags;
2003 }
2004
2005 /* Prefetching. */
2006
2007 static unsigned int
2008 tree_ssa_loop_prefetch (void)
2009 {
2010 if (number_of_loops (cfun) <= 1)
2011 return 0;
2012
2013 return tree_ssa_prefetch_arrays ();
2014 }
2015
2016 static bool
2017 gate_tree_ssa_loop_prefetch (void)
2018 {
2019 return flag_prefetch_loop_arrays > 0;
2020 }
2021
2022 namespace {
2023
2024 const pass_data pass_data_loop_prefetch =
2025 {
2026 GIMPLE_PASS, /* type */
2027 "aprefetch", /* name */
2028 OPTGROUP_LOOP, /* optinfo_flags */
2029 true, /* has_gate */
2030 true, /* has_execute */
2031 TV_TREE_PREFETCH, /* tv_id */
2032 ( PROP_cfg | PROP_ssa ), /* properties_required */
2033 0, /* properties_provided */
2034 0, /* properties_destroyed */
2035 0, /* todo_flags_start */
2036 0, /* todo_flags_finish */
2037 };
2038
2039 class pass_loop_prefetch : public gimple_opt_pass
2040 {
2041 public:
2042 pass_loop_prefetch (gcc::context *ctxt)
2043 : gimple_opt_pass (pass_data_loop_prefetch, ctxt)
2044 {}
2045
2046 /* opt_pass methods: */
2047 bool gate () { return gate_tree_ssa_loop_prefetch (); }
2048 unsigned int execute () { return tree_ssa_loop_prefetch (); }
2049
2050 }; // class pass_loop_prefetch
2051
2052 } // anon namespace
2053
2054 gimple_opt_pass *
2055 make_pass_loop_prefetch (gcc::context *ctxt)
2056 {
2057 return new pass_loop_prefetch (ctxt);
2058 }
2059
2060